1
/*******************************************************************************
3
Intel 10 Gigabit PCI Express Linux driver
4
Copyright(c) 1999 - 2011 Intel Corporation.
6
This program is free software; you can redistribute it and/or modify it
7
under the terms and conditions of the GNU General Public License,
8
version 2, as published by the Free Software Foundation.
10
This program is distributed in the hope it will be useful, but WITHOUT
11
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15
You should have received a copy of the GNU General Public License along with
16
this program; if not, write to the Free Software Foundation, Inc.,
17
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
The full GNU General Public License is included in this distribution in
20
the file called "COPYING".
23
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
*******************************************************************************/
29
#include "ixgbe_type.h"
30
#include "ixgbe_dcb.h"
31
#include "ixgbe_dcb_82599.h"
34
* ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
35
* @hw: pointer to hardware structure
36
* @rx_pba: method to distribute packet buffer
38
* Configure packet buffers for DCB mode.
40
static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
42
int num_tcs = IXGBE_MAX_PACKET_BUFFERS;
43
u32 rx_pb_size = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT;
50
* This really means configure the first half of the TCs
51
* (Traffic Classes) to use 5/8 of the Rx packet buffer
52
* space. To determine the size of the buffer for each TC,
53
* we are multiplying the average size by 5/4 and applying
54
* it to half of the traffic classes.
56
if (rx_pba == pba_80_48) {
57
rxpktsize = (rx_pb_size * 5) / (num_tcs * 4);
58
rx_pb_size -= rxpktsize * (num_tcs / 2);
59
for (; i < (num_tcs / 2); i++)
60
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
63
/* Divide the remaining Rx packet buffer evenly among the TCs */
64
rxpktsize = rx_pb_size / (num_tcs - i);
65
for (; i < num_tcs; i++)
66
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
69
* Setup Tx packet buffer and threshold equally for all TCs
70
* TXPBTHRESH register is set in K so divide by 1024 and subtract
71
* 10 since the largest packet we support is just over 9K.
73
txpktsize = IXGBE_TXPBSIZE_MAX / num_tcs;
74
txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
75
for (i = 0; i < num_tcs; i++) {
76
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
77
IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
80
/* Clear unused TCs, if any, to zero buffer size*/
81
for (; i < MAX_TRAFFIC_CLASS; i++) {
82
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
83
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
84
IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
91
* ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
92
* @hw: pointer to hardware structure
93
* @refill: refill credits index by traffic class
94
* @max: max credits index by traffic class
95
* @bwg_id: bandwidth grouping indexed by traffic class
96
* @prio_type: priority type indexed by traffic class
98
* Configure Rx Packet Arbiter and credits for each traffic class.
100
s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
108
u32 credit_refill = 0;
113
* Disable the arbiter before changing parameters
114
* (always enable recycle mode; WSP)
116
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
117
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
119
/* Map all traffic classes to their UP, 1 to 1 */
121
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
122
reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
123
IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
125
/* Configure traffic class credits and priority */
126
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
127
credit_refill = refill[i];
129
reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
131
reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
133
if (prio_type[i] == prio_link)
134
reg |= IXGBE_RTRPT4C_LSP;
136
IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
140
* Configure Rx packet plane (recycle mode; WSP) and
143
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
144
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
150
* ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
151
* @hw: pointer to hardware structure
152
* @refill: refill credits index by traffic class
153
* @max: max credits index by traffic class
154
* @bwg_id: bandwidth grouping indexed by traffic class
155
* @prio_type: priority type indexed by traffic class
157
* Configure Tx Descriptor Arbiter and credits for each traffic class.
159
s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
165
u32 reg, max_credits;
168
/* Clear the per-Tx queue credits; we use per-TC instead */
169
for (i = 0; i < 128; i++) {
170
IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
171
IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
174
/* Configure traffic class credits and priority */
175
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
176
max_credits = max[i];
177
reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
179
reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
181
if (prio_type[i] == prio_group)
182
reg |= IXGBE_RTTDT2C_GSP;
184
if (prio_type[i] == prio_link)
185
reg |= IXGBE_RTTDT2C_LSP;
187
IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
191
* Configure Tx descriptor plane (recycle mode; WSP) and
194
reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
195
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
201
* ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
202
* @hw: pointer to hardware structure
203
* @refill: refill credits index by traffic class
204
* @max: max credits index by traffic class
205
* @bwg_id: bandwidth grouping indexed by traffic class
206
* @prio_type: priority type indexed by traffic class
208
* Configure Tx Packet Arbiter and credits for each traffic class.
210
s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
221
* Disable the arbiter before changing parameters
222
* (always enable recycle mode; SP; arb delay)
224
reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
225
(IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
227
IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
229
/* Map all traffic classes to their UP, 1 to 1 */
231
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
232
reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
233
IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
235
/* Configure traffic class credits and priority */
236
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
238
reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
239
reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
241
if (prio_type[i] == prio_group)
242
reg |= IXGBE_RTTPT2C_GSP;
244
if (prio_type[i] == prio_link)
245
reg |= IXGBE_RTTPT2C_LSP;
247
IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
251
* Configure Tx packet plane (recycle mode; SP; arb delay) and
254
reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
255
(IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
256
IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
262
* ixgbe_dcb_config_pfc_82599 - Configure priority flow control
263
* @hw: pointer to hardware structure
264
* @pfc_en: enabled pfc bitmask
266
* Configure Priority Flow Control (PFC) for each traffic class.
268
s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
270
u32 i, reg, rx_pba_size;
272
/* Configure PFC Tx thresholds per TC */
273
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
274
int enabled = pfc_en & (1 << i);
275
rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
276
rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
278
reg = (rx_pba_size - hw->fc.low_water) << 10;
281
reg |= IXGBE_FCRTL_XONE;
282
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
284
reg = (rx_pba_size - hw->fc.high_water) << 10;
286
reg |= IXGBE_FCRTH_FCEN;
287
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
291
/* Configure pause time (2 TCs per register) */
292
reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
293
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
294
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
296
/* Configure flow control refresh threshold value */
297
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
300
reg = IXGBE_FCCFG_TFCE_PRIORITY;
301
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
304
* 82599 will always honor XOFF frames we receive when
305
* we are in PFC mode however X540 only honors enabled
308
reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
309
reg &= ~IXGBE_MFLCN_RFCE;
310
reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
312
if (hw->mac.type == ixgbe_mac_X540)
313
reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
315
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
318
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
319
hw->mac.ops.fc_enable(hw, i);
326
* ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
327
* @hw: pointer to hardware structure
329
* Configure queue statistics registers, all queues belonging to same traffic
330
* class uses a single set of queue statistics counters.
332
static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
338
* Receive Queues stats setting
339
* 32 RQSMR registers, each configuring 4 queues.
340
* Set all 16 queues of each TC to the same stat
341
* with TC 'n' going to stat 'n'.
343
for (i = 0; i < 32; i++) {
344
reg = 0x01010101 * (i / 4);
345
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
348
* Transmit Queues stats setting
349
* 32 TQSM registers, each controlling 4 queues.
350
* Set all queues of each TC to the same stat
351
* with TC 'n' going to stat 'n'.
352
* Tx queues are allocated non-uniformly to TCs:
353
* 32, 32, 16, 16, 8, 8, 8, 8.
355
for (i = 0; i < 32; i++) {
372
IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
379
* ixgbe_dcb_config_82599 - Configure general DCB parameters
380
* @hw: pointer to hardware structure
382
* Configure general DCB parameters.
384
static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
389
/* Disable the Tx desc arbiter so that MTQC can be changed */
390
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
391
reg |= IXGBE_RTTDCS_ARBDIS;
392
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
394
/* Enable DCB for Rx with 8 TCs */
395
reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
396
switch (reg & IXGBE_MRQC_MRQE_MASK) {
398
case IXGBE_MRQC_RT4TCEN:
399
/* RSS disabled cases */
400
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
402
case IXGBE_MRQC_RSSEN:
403
case IXGBE_MRQC_RTRSS4TCEN:
404
/* RSS enabled cases */
405
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RTRSS8TCEN;
408
/* Unsupported value, assume stale data, overwrite no RSS */
409
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
411
IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
413
/* Enable DCB for Tx with 8 TCs */
414
reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
415
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
417
/* Disable drop for all queues */
418
for (q = 0; q < 128; q++)
419
IXGBE_WRITE_REG(hw, IXGBE_QDE, q << IXGBE_QDE_IDX_SHIFT);
421
/* Enable the Tx desc arbiter */
422
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
423
reg &= ~IXGBE_RTTDCS_ARBDIS;
424
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
426
/* Enable Security TX Buffer IFG for DCB */
427
reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
428
reg |= IXGBE_SECTX_DCB;
429
IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
435
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
436
* @hw: pointer to hardware structure
437
* @rx_pba: method to distribute packet buffer
438
* @refill: refill credits index by traffic class
439
* @max: max credits index by traffic class
440
* @bwg_id: bandwidth grouping indexed by traffic class
441
* @prio_type: priority type indexed by traffic class
442
* @pfc_en: enabled pfc bitmask
444
* Configure dcb settings and enable dcb mode.
446
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
447
u8 rx_pba, u8 pfc_en, u16 *refill,
448
u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
450
ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
451
ixgbe_dcb_config_82599(hw);
452
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
454
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
456
ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
457
bwg_id, prio_type, prio_tc);
458
ixgbe_dcb_config_pfc_82599(hw, pfc_en);
459
ixgbe_dcb_config_tc_stats_82599(hw);