1
/******************************************************************************
3
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
5
* Portions of this file are derived from the ipw3945 project, as well
6
* as portions of the ieee80211 subsystem header files.
8
* This program is free software; you can redistribute it and/or modify it
9
* under the terms of version 2 of the GNU General Public License as
10
* published by the Free Software Foundation.
12
* This program is distributed in the hope that it will be useful, but WITHOUT
13
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17
* You should have received a copy of the GNU General Public License along with
18
* this program; if not, write to the Free Software Foundation, Inc.,
19
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21
* The full GNU General Public License is included in this distribution in the
22
* file called LICENSE.
24
* Contact Information:
25
* Intel Linux Wireless <ilw@linux.intel.com>
26
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28
*****************************************************************************/
29
#include <linux/etherdevice.h>
30
#include <linux/slab.h>
31
#include <linux/sched.h>
33
#include "iwl-debug.h"
37
#include "iwl-agn-hw.h"
38
#include "iwl-trans-pcie-int.h"
40
#define IWL_TX_CRC_SIZE 4
41
#define IWL_TX_DELIMITER_SIZE 4
44
* iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
46
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
47
struct iwl_tx_queue *txq,
50
struct iwlagn_scd_bc_tbl *scd_bc_tbl;
51
struct iwl_trans_pcie *trans_pcie =
52
IWL_TRANS_GET_PCIE_TRANS(trans);
53
int write_ptr = txq->q.write_ptr;
54
int txq_id = txq->q.id;
57
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
59
struct iwl_tx_cmd *tx_cmd =
60
(struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload;
62
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
64
WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
66
sta_id = tx_cmd->sta_id;
67
sec_ctl = tx_cmd->sec_ctl;
69
switch (sec_ctl & TX_CMD_SEC_MSK) {
77
len += WEP_IV_LEN + WEP_ICV_LEN;
81
bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
83
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
85
if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
87
tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
91
* iwl_txq_update_write_ptr - Send new write index to hardware
93
void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
96
int txq_id = txq->q.id;
98
if (txq->need_update == 0)
101
if (hw_params(trans).shadow_reg_enable) {
102
/* shadow register enabled */
103
iwl_write32(bus(trans), HBUS_TARG_WRPTR,
104
txq->q.write_ptr | (txq_id << 8));
106
/* if we're trying to save power */
107
if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
108
/* wake up nic if it's powered down ...
109
* uCode will wake up, and interrupt us again, so next
110
* time we'll skip this part. */
111
reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
113
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
114
IWL_DEBUG_INFO(trans,
115
"Tx queue %d requesting wakeup,"
116
" GP1 = 0x%x\n", txq_id, reg);
117
iwl_set_bit(bus(trans), CSR_GP_CNTRL,
118
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
122
iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
123
txq->q.write_ptr | (txq_id << 8));
126
* else not in power-save mode,
127
* uCode will never sleep when we're
128
* trying to tx (during RFKILL, we're not trying to tx).
131
iwl_write32(bus(trans), HBUS_TARG_WRPTR,
132
txq->q.write_ptr | (txq_id << 8));
134
txq->need_update = 0;
137
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
139
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
141
dma_addr_t addr = get_unaligned_le32(&tb->lo);
142
if (sizeof(dma_addr_t) > sizeof(u32))
144
((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
149
static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
151
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
153
return le16_to_cpu(tb->hi_n_len) >> 4;
156
static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
157
dma_addr_t addr, u16 len)
159
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
160
u16 hi_n_len = len << 4;
162
put_unaligned_le32(addr, &tb->lo);
163
if (sizeof(dma_addr_t) > sizeof(u32))
164
hi_n_len |= ((addr >> 16) >> 16) & 0xF;
166
tb->hi_n_len = cpu_to_le16(hi_n_len);
168
tfd->num_tbs = idx + 1;
171
static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
173
return tfd->num_tbs & 0x1f;
176
static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
177
struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
182
/* Sanity check on number of chunks */
183
num_tbs = iwl_tfd_get_num_tbs(tfd);
185
if (num_tbs >= IWL_NUM_OF_TBS) {
186
IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
187
/* @todo issue fatal error, it is quite serious situation */
193
dma_unmap_single(bus(trans)->dev,
194
dma_unmap_addr(meta, mapping),
195
dma_unmap_len(meta, len),
198
/* Unmap chunks, if any. */
199
for (i = 1; i < num_tbs; i++)
200
dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
201
iwl_tfd_tb_get_len(tfd, i), dma_dir);
205
* iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
206
* @trans - transport private data
208
* @index - the index of the TFD to be freed
209
*@dma_dir - the direction of the DMA mapping
211
* Does NOT advance any TFD circular buffer read/write indexes
212
* Does NOT free the TFD itself (which is within circular buffer)
214
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
215
int index, enum dma_data_direction dma_dir)
217
struct iwl_tfd *tfd_tmp = txq->tfds;
219
iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
225
skb = txq->skbs[index];
227
/* Can be called from irqs-disabled context
228
* If skb is not NULL, it means that the whole queue is being
229
* freed and that the queue is not empty - free the skb
232
iwl_free_skb(priv(trans), skb);
233
txq->skbs[index] = NULL;
238
int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
239
struct iwl_tx_queue *txq,
240
dma_addr_t addr, u16 len,
244
struct iwl_tfd *tfd, *tfd_tmp;
249
tfd = &tfd_tmp[q->write_ptr];
252
memset(tfd, 0, sizeof(*tfd));
254
num_tbs = iwl_tfd_get_num_tbs(tfd);
256
/* Each TFD can point to a maximum 20 Tx buffers */
257
if (num_tbs >= IWL_NUM_OF_TBS) {
258
IWL_ERR(trans, "Error can not send more than %d chunks\n",
263
if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
266
if (unlikely(addr & ~IWL_TX_DMA_MASK))
267
IWL_ERR(trans, "Unaligned address = %llx\n",
268
(unsigned long long)addr);
270
iwl_tfd_set_tb(tfd, num_tbs, addr, len);
275
/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
278
* Theory of operation
280
* A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
281
* of buffer descriptors, each of which points to one or more data buffers for
282
* the device to read from or fill. Driver and device exchange status of each
283
* queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
284
* entries in each circular buffer, to protect against confusing empty and full
287
* The device reads or writes the data in the queues via the device's several
288
* DMA/FIFO channels. Each queue is mapped to a single DMA channel.
290
* For Tx queue, there are low mark and high mark limits. If, after queuing
291
* the packet for Tx, free space become < low mark, Tx queue stopped. When
292
* reclaiming packets (on 'tx done IRQ), if free space become > high mark,
295
***************************************************/
297
int iwl_queue_space(const struct iwl_queue *q)
299
int s = q->read_ptr - q->write_ptr;
301
if (q->read_ptr > q->write_ptr)
306
/* keep some reserve to not confuse empty and full situations */
314
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
316
int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
319
q->n_window = slots_num;
322
/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
323
* and iwl_queue_dec_wrap are broken. */
324
if (WARN_ON(!is_power_of_2(count)))
327
/* slots_num must be power-of-two size, otherwise
328
* get_cmd_index is broken. */
329
if (WARN_ON(!is_power_of_2(slots_num)))
332
q->low_mark = q->n_window / 4;
336
q->high_mark = q->n_window / 8;
337
if (q->high_mark < 2)
340
q->write_ptr = q->read_ptr = 0;
345
static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
346
struct iwl_tx_queue *txq)
348
struct iwl_trans_pcie *trans_pcie =
349
IWL_TRANS_GET_PCIE_TRANS(trans);
350
struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
351
int txq_id = txq->q.id;
352
int read_ptr = txq->q.read_ptr;
355
struct iwl_tx_cmd *tx_cmd =
356
(struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload;
358
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
360
if (txq_id != trans->shrd->cmd_queue)
361
sta_id = tx_cmd->sta_id;
363
bc_ent = cpu_to_le16(1 | (sta_id << 12));
364
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
366
if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
368
tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
371
static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
378
struct iwl_trans_pcie *trans_pcie =
379
IWL_TRANS_GET_PCIE_TRANS(trans);
381
scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
383
tbl_dw_addr = trans_pcie->scd_base_addr +
384
SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
386
tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
389
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
391
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
393
iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
398
static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
400
/* Simply stop the queue, but don't change any configuration;
401
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
402
iwl_write_prph(bus(trans),
403
SCD_QUEUE_STATUS_BITS(txq_id),
404
(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
405
(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
408
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
409
int txq_id, u32 index)
411
iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
412
(index & 0xff) | (txq_id << 8));
413
iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
416
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
417
struct iwl_tx_queue *txq,
418
int tx_fifo_id, int scd_retry)
420
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
421
int txq_id = txq->q.id;
423
test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
425
iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
426
(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
427
(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
428
(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
429
SCD_QUEUE_STTS_REG_MSK);
431
txq->sched_retry = scd_retry;
433
IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
434
active ? "Activate" : "Deactivate",
435
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
438
static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
441
const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
442
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
443
return ac_to_fifo[tid_to_ac[tid]];
445
/* no support for TIDs 8-15 yet */
449
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
450
enum iwl_rxon_context_id ctx, int sta_id,
451
int tid, int frame_limit)
453
int tx_fifo, txq_id, ssn_idx;
456
struct iwl_tid_data *tid_data;
458
struct iwl_trans_pcie *trans_pcie =
459
IWL_TRANS_GET_PCIE_TRANS(trans);
461
if (WARN_ON(sta_id == IWL_INVALID_STATION))
463
if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
466
tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
467
if (WARN_ON(tx_fifo < 0)) {
468
IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
472
spin_lock_irqsave(&trans->shrd->sta_lock, flags);
473
tid_data = &trans->shrd->tid_data[sta_id][tid];
474
ssn_idx = SEQ_TO_SN(tid_data->seq_number);
475
txq_id = tid_data->agg.txq_id;
476
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
478
ra_tid = BUILD_RAxTID(sta_id, tid);
480
spin_lock_irqsave(&trans->shrd->lock, flags);
482
/* Stop this Tx queue before configuring it */
483
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
485
/* Map receiver-address / traffic-ID to this queue */
486
iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
488
/* Set this queue as a chain-building queue */
489
iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
491
/* enable aggregations for the queue */
492
iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
494
/* Place first TFD at index corresponding to start sequence number.
495
* Assumes that ssn_idx is valid (!= 0xFFF) */
496
trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
497
trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
498
iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
500
/* Set up Tx window size and frame limit for this queue */
501
iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
502
SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
505
SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
506
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
508
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
509
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
511
iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
513
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
514
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
517
trans_pcie->txq[txq_id].sta_id = sta_id;
518
trans_pcie->txq[txq_id].tid = tid;
520
spin_unlock_irqrestore(&trans->shrd->lock, flags);
524
* Find first available (lowest unused) Tx Queue, mark it "active".
525
* Called only when finding queue for aggregation.
526
* Should never return anything < 7, because they should already
527
* be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
529
static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
531
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
534
for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
535
if (!test_and_set_bit(txq_id,
536
&trans_pcie->txq_ctx_active_msk))
541
int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
542
enum iwl_rxon_context_id ctx, int sta_id,
545
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
546
struct iwl_tid_data *tid_data;
550
txq_id = iwlagn_txq_ctx_activate_free(trans);
552
IWL_ERR(trans, "No free aggregation queue available\n");
556
spin_lock_irqsave(&trans->shrd->sta_lock, flags);
557
tid_data = &trans->shrd->tid_data[sta_id][tid];
558
*ssn = SEQ_TO_SN(tid_data->seq_number);
559
tid_data->agg.txq_id = txq_id;
560
iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
562
tid_data = &trans->shrd->tid_data[sta_id][tid];
563
if (tid_data->tfds_in_queue == 0) {
564
IWL_DEBUG_HT(trans, "HW queue is empty\n");
565
tid_data->agg.state = IWL_AGG_ON;
566
iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
568
IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
569
"queue\n", tid_data->tfds_in_queue);
570
tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
572
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
577
void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
579
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
580
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
582
iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
584
trans_pcie->txq[txq_id].q.read_ptr = 0;
585
trans_pcie->txq[txq_id].q.write_ptr = 0;
586
/* supposes that ssn_idx is valid (!= 0xFFF) */
587
iwl_trans_set_wr_ptrs(trans, txq_id, 0);
589
iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
590
iwl_txq_ctx_deactivate(trans_pcie, txq_id);
591
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
594
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
595
enum iwl_rxon_context_id ctx, int sta_id,
598
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
600
int read_ptr, write_ptr;
601
struct iwl_tid_data *tid_data;
604
spin_lock_irqsave(&trans->shrd->sta_lock, flags);
606
tid_data = &trans->shrd->tid_data[sta_id][tid];
607
txq_id = tid_data->agg.txq_id;
609
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
610
(IWLAGN_FIRST_AMPDU_QUEUE +
611
hw_params(trans).num_ampdu_queues <= txq_id)) {
613
"queue number out of range: %d, must be %d to %d\n",
614
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
615
IWLAGN_FIRST_AMPDU_QUEUE +
616
hw_params(trans).num_ampdu_queues - 1);
617
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
621
switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
622
case IWL_EMPTYING_HW_QUEUE_ADDBA:
624
* This can happen if the peer stops aggregation
625
* again before we've had a chance to drain the
626
* queue we selected previously, i.e. before the
627
* session was really started completely.
629
IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
634
IWL_WARN(trans, "Stopping AGG while state not ON "
635
"or starting for %d on %d (%d)\n", sta_id, tid,
636
trans->shrd->tid_data[sta_id][tid].agg.state);
637
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
641
write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
642
read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
644
/* The queue is not empty */
645
if (write_ptr != read_ptr) {
646
IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
647
trans->shrd->tid_data[sta_id][tid].agg.state =
648
IWL_EMPTYING_HW_QUEUE_DELBA;
649
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
653
IWL_DEBUG_HT(trans, "HW queue is empty\n");
655
trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
657
/* do not restore/save irqs */
658
spin_unlock(&trans->shrd->sta_lock);
659
spin_lock(&trans->shrd->lock);
661
iwl_trans_pcie_txq_agg_disable(trans, txq_id);
663
spin_unlock_irqrestore(&trans->shrd->lock, flags);
665
iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
670
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
673
* iwl_enqueue_hcmd - enqueue a uCode command
674
* @priv: device private data point
675
* @cmd: a point to the ucode command structure
677
* The function returns < 0 values to indicate the operation is
678
* failed. On success, it turns the index (> 0) of command in the
681
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
683
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
684
struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
685
struct iwl_queue *q = &txq->q;
686
struct iwl_device_cmd *out_cmd;
687
struct iwl_cmd_meta *out_meta;
688
dma_addr_t phys_addr;
691
u16 copy_size, cmd_size;
692
bool is_ct_kill = false;
693
bool had_nocopy = false;
696
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
697
const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
698
int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
702
if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
703
IWL_WARN(trans, "fw recovery, no hcmd send\n");
707
if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
708
!(cmd->flags & CMD_ON_DEMAND)) {
709
IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
713
copy_size = sizeof(out_cmd->hdr);
714
cmd_size = sizeof(out_cmd->hdr);
716
/* need one for the header if the first is NOCOPY */
717
BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
719
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
722
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
725
/* NOCOPY must not be followed by normal! */
726
if (WARN_ON(had_nocopy))
728
copy_size += cmd->len[i];
730
cmd_size += cmd->len[i];
734
* If any of the command structures end up being larger than
735
* the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
736
* allocated into separate TFDs, then we will need to
737
* increase the size of the buffers.
739
if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
742
if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
743
IWL_WARN(trans, "Not sending command - %s KILL\n",
744
iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
748
spin_lock_irqsave(&trans->hcmd_lock, flags);
750
if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
751
spin_unlock_irqrestore(&trans->hcmd_lock, flags);
753
IWL_ERR(trans, "No space in command queue\n");
754
is_ct_kill = iwl_check_for_ct_kill(priv(trans));
756
IWL_ERR(trans, "Restarting adapter queue is full\n");
757
iwlagn_fw_error(priv(trans), false);
762
idx = get_cmd_index(q, q->write_ptr);
763
out_cmd = txq->cmd[idx];
764
out_meta = &txq->meta[idx];
766
memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
767
if (cmd->flags & CMD_WANT_SKB)
768
out_meta->source = cmd;
770
/* set up the header */
772
out_cmd->hdr.cmd = cmd->id;
773
out_cmd->hdr.flags = 0;
774
out_cmd->hdr.sequence =
775
cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
776
INDEX_TO_SEQ(q->write_ptr));
778
/* and copy the data that needs to be copied */
780
cmd_dest = out_cmd->payload;
781
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
784
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
786
memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
787
cmd_dest += cmd->len[i];
790
IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
791
"%d bytes at %d[%d]:%d\n",
792
get_cmd_string(out_cmd->hdr.cmd),
794
le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
795
q->write_ptr, idx, trans->shrd->cmd_queue);
797
phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
799
if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
804
dma_unmap_addr_set(out_meta, mapping, phys_addr);
805
dma_unmap_len_set(out_meta, len, copy_size);
807
iwlagn_txq_attach_buf_to_tfd(trans, txq,
808
phys_addr, copy_size, 1);
809
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
810
trace_bufs[0] = &out_cmd->hdr;
811
trace_lens[0] = copy_size;
815
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
818
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
820
phys_addr = dma_map_single(bus(trans)->dev,
821
(void *)cmd->data[i],
822
cmd->len[i], DMA_BIDIRECTIONAL);
823
if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
824
iwlagn_unmap_tfd(trans, out_meta,
825
&txq->tfds[q->write_ptr],
831
iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
833
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
834
trace_bufs[trace_idx] = cmd->data[i];
835
trace_lens[trace_idx] = cmd->len[i];
840
out_meta->flags = cmd->flags;
842
txq->need_update = 1;
844
/* check that tracing gets all possible blocks */
845
BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
846
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
847
trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
848
trace_bufs[0], trace_lens[0],
849
trace_bufs[1], trace_lens[1],
850
trace_bufs[2], trace_lens[2]);
853
/* Increment and update queue's write index */
854
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
855
iwl_txq_update_write_ptr(trans, txq);
858
spin_unlock_irqrestore(&trans->hcmd_lock, flags);
863
* iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
865
* When FW advances 'R' index, all entries between old and new 'R' index
866
* need to be reclaimed. As result, some free space forms. If there is
867
* enough free space (> low mark), wake the stack that feeds us.
869
static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
872
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
873
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
874
struct iwl_queue *q = &txq->q;
877
if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
878
IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
879
"index %d is out of range [0-%d] %d %d.\n", __func__,
880
txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
884
for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
885
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
888
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
889
q->write_ptr, q->read_ptr);
890
iwlagn_fw_error(priv(trans), false);
897
* iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
898
* @rxb: Rx buffer to reclaim
899
* @handler_status: return value of the handler of the command
900
* (put in setup_rx_handlers)
902
* If an Rx buffer has an async callback associated with it the callback
903
* will be executed. The attached skb (if present) will only be freed
904
* if the callback returns 1
906
void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
909
struct iwl_rx_packet *pkt = rxb_addr(rxb);
910
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
911
int txq_id = SEQ_TO_QUEUE(sequence);
912
int index = SEQ_TO_INDEX(sequence);
914
struct iwl_device_cmd *cmd;
915
struct iwl_cmd_meta *meta;
916
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
917
struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
920
/* If a Tx command is being handled and it isn't in the actual
921
* command queue then there a command routing bug has been introduced
922
* in the queue management code. */
923
if (WARN(txq_id != trans->shrd->cmd_queue,
924
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
925
txq_id, trans->shrd->cmd_queue, sequence,
926
trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
927
trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
928
iwl_print_hex_error(trans, pkt, 32);
932
cmd_index = get_cmd_index(&txq->q, index);
933
cmd = txq->cmd[cmd_index];
934
meta = &txq->meta[cmd_index];
936
txq->time_stamp = jiffies;
938
iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
941
/* Input error checking is done when commands are added to queue. */
942
if (meta->flags & CMD_WANT_SKB) {
943
meta->source->reply_page = (unsigned long)rxb_addr(rxb);
944
meta->source->handler_status = handler_status;
948
spin_lock_irqsave(&trans->hcmd_lock, flags);
950
iwl_hcmd_queue_reclaim(trans, txq_id, index);
952
if (!(meta->flags & CMD_ASYNC)) {
953
if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
955
"HCMD_ACTIVE already clear for command %s\n",
956
get_cmd_string(cmd->hdr.cmd));
958
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
959
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
960
get_cmd_string(cmd->hdr.cmd));
961
wake_up(&trans->shrd->wait_command_queue);
966
spin_unlock_irqrestore(&trans->hcmd_lock, flags);
969
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
971
static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
975
/* An asynchronous command can not expect an SKB to be set. */
976
if (WARN_ON(cmd->flags & CMD_WANT_SKB))
980
if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
983
ret = iwl_enqueue_hcmd(trans, cmd);
985
IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
986
get_cmd_string(cmd->id), ret);
992
static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
994
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
998
lockdep_assert_held(&trans->shrd->mutex);
1000
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1001
get_cmd_string(cmd->id));
1003
set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1004
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1005
get_cmd_string(cmd->id));
1007
cmd_idx = iwl_enqueue_hcmd(trans, cmd);
1010
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1011
IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1012
get_cmd_string(cmd->id), ret);
1016
ret = wait_event_timeout(trans->shrd->wait_command_queue,
1017
!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
1018
HOST_COMPLETE_TIMEOUT);
1020
if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
1021
struct iwl_tx_queue *txq =
1022
&trans_pcie->txq[trans->shrd->cmd_queue];
1023
struct iwl_queue *q = &txq->q;
1026
"Error sending %s: time out after %dms.\n",
1027
get_cmd_string(cmd->id),
1028
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1031
"Current CMD queue read_ptr %d write_ptr %d\n",
1032
q->read_ptr, q->write_ptr);
1034
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1035
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
1036
"%s\n", get_cmd_string(cmd->id));
1042
if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
1043
IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
1044
get_cmd_string(cmd->id));
1048
if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
1049
IWL_ERR(trans, "Command %s failed: FW Error\n",
1050
get_cmd_string(cmd->id));
1054
if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
1055
IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1056
get_cmd_string(cmd->id));
1064
if (cmd->flags & CMD_WANT_SKB) {
1066
* Cancel the CMD_WANT_SKB flag for the cmd in the
1067
* TX cmd queue. Otherwise in case the cmd comes
1068
* in later, it will possibly set an invalid
1069
* address (cmd->meta.source).
1071
trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
1075
if (cmd->reply_page) {
1076
iwl_free_pages(trans->shrd, cmd->reply_page);
1077
cmd->reply_page = 0;
1083
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1085
if (cmd->flags & CMD_ASYNC)
1086
return iwl_send_cmd_async(trans, cmd);
1088
return iwl_send_cmd_sync(trans, cmd);
1091
/* Frees buffers until index _not_ inclusive */
1092
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1093
struct sk_buff_head *skbs)
1095
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1096
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1097
struct iwl_queue *q = &txq->q;
1101
/* This function is not meant to release cmd queue*/
1102
if (WARN_ON(txq_id == trans->shrd->cmd_queue))
1105
/*Since we free until index _not_ inclusive, the one before index is
1106
* the last we will free. This one must be used */
1107
last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
1109
if ((index >= q->n_bd) ||
1110
(iwl_queue_used(q, last_to_free) == 0)) {
1111
IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
1112
"last_to_free %d is out of range [0-%d] %d %d.\n",
1113
__func__, txq_id, last_to_free, q->n_bd,
1114
q->write_ptr, q->read_ptr);
1118
IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
1119
q->read_ptr, index);
1121
if (WARN_ON(!skb_queue_empty(skbs)))
1125
q->read_ptr != index;
1126
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1128
if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
1131
__skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
1133
txq->skbs[txq->q.read_ptr] = NULL;
1135
iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1137
iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);