2772
2773
spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2774
2775
/* stats ramrod has it's own slot on the spq */
2775
if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2776
if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
2776
2777
/* It's ok if the actual decrement is issued towards the memory
2777
2778
* somewhere between the spin_lock and spin_unlock. Thus no
2778
2779
* more explict memory barrier is needed.
2780
atomic_dec(&bp->spq_left);
2782
atomic_dec(&bp->eq_spq_left);
2784
atomic_dec(&bp->cq_spq_left);
2782
2788
DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2783
2789
"SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2784
"type(0x%x) left %x\n",
2790
"type(0x%x) left (ETH, COMMON) (%x,%x)\n",
2785
2791
bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2786
2792
(u32)(U64_LO(bp->spq_mapping) +
2787
2793
(void *)bp->spq_prod_bd - (void *)bp->spq), command,
2788
HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
2794
HW_CID(bp, cid), data_hi, data_lo, type,
2795
atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
2790
2797
bnx2x_sp_prod_update(bp);
2791
2798
spin_unlock_bh(&bp->spq_lock);
5899
5917
void bnx2x_free_mem(struct bnx2x *bp)
5902
#define BNX2X_PCI_FREE(x, y, size) \
5905
dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5911
#define BNX2X_FREE(x) \
5919
bnx2x_gunzip_end(bp);
5923
for_each_queue(bp, i) {
5925
/* FCoE client uses default status block */
5926
if (IS_FCOE_IDX(i)) {
5927
union host_hc_status_block *sb =
5928
&bnx2x_fp(bp, i, status_blk);
5929
memset(sb, 0, sizeof(union host_hc_status_block));
5930
bnx2x_fp(bp, i, status_blk_mapping) = 0;
5935
BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5936
bnx2x_fp(bp, i, status_blk_mapping),
5937
sizeof(struct host_hc_status_block_e2));
5939
BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5940
bnx2x_fp(bp, i, status_blk_mapping),
5941
sizeof(struct host_hc_status_block_e1x));
5947
for_each_rx_queue(bp, i) {
5949
/* fastpath rx rings: rx_buf rx_desc rx_comp */
5950
BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5951
BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5952
bnx2x_fp(bp, i, rx_desc_mapping),
5953
sizeof(struct eth_rx_bd) * NUM_RX_BD);
5955
BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5956
bnx2x_fp(bp, i, rx_comp_mapping),
5957
sizeof(struct eth_fast_path_rx_cqe) *
5961
BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5962
BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5963
bnx2x_fp(bp, i, rx_sge_mapping),
5964
BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5967
for_each_tx_queue(bp, i) {
5969
/* fastpath tx rings: tx_buf tx_desc */
5970
BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5971
BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5972
bnx2x_fp(bp, i, tx_desc_mapping),
5973
sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5922
bnx2x_free_fp_mem(bp);
5975
5923
/* end of fastpath */
5977
5925
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6003
5951
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6004
5952
BCM_PAGE_SIZE * NUM_EQ_PAGES);
6006
#undef BNX2X_PCI_FREE
5954
BNX2X_FREE(bp->rx_indir_table);
6010
static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
6012
union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
6013
if (CHIP_IS_E2(bp)) {
6014
bnx2x_fp(bp, index, sb_index_values) =
6015
(__le16 *)status_blk.e2_sb->sb.index_values;
6016
bnx2x_fp(bp, index, sb_running_index) =
6017
(__le16 *)status_blk.e2_sb->sb.running_index;
6019
bnx2x_fp(bp, index, sb_index_values) =
6020
(__le16 *)status_blk.e1x_sb->sb.index_values;
6021
bnx2x_fp(bp, index, sb_running_index) =
6022
(__le16 *)status_blk.e1x_sb->sb.running_index;
6026
5958
int bnx2x_alloc_mem(struct bnx2x *bp)
6028
#define BNX2X_PCI_ALLOC(x, y, size) \
6030
x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6032
goto alloc_mem_err; \
6033
memset(x, 0, size); \
6036
#define BNX2X_ALLOC(x, size) \
6038
x = kzalloc(size, GFP_KERNEL); \
6040
goto alloc_mem_err; \
6047
for_each_queue(bp, i) {
6048
union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
6049
bnx2x_fp(bp, i, bp) = bp;
6052
if (!IS_FCOE_IDX(i)) {
6055
BNX2X_PCI_ALLOC(sb->e2_sb,
6056
&bnx2x_fp(bp, i, status_blk_mapping),
6057
sizeof(struct host_hc_status_block_e2));
6059
BNX2X_PCI_ALLOC(sb->e1x_sb,
6060
&bnx2x_fp(bp, i, status_blk_mapping),
6061
sizeof(struct host_hc_status_block_e1x));
6065
set_sb_shortcuts(bp, i);
6068
for_each_queue(bp, i) {
6070
/* fastpath rx rings: rx_buf rx_desc rx_comp */
6071
BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6072
sizeof(struct sw_rx_bd) * NUM_RX_BD);
6073
BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6074
&bnx2x_fp(bp, i, rx_desc_mapping),
6075
sizeof(struct eth_rx_bd) * NUM_RX_BD);
6077
BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6078
&bnx2x_fp(bp, i, rx_comp_mapping),
6079
sizeof(struct eth_fast_path_rx_cqe) *
6083
BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6084
sizeof(struct sw_rx_page) * NUM_RX_SGE);
6085
BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6086
&bnx2x_fp(bp, i, rx_sge_mapping),
6087
BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6090
for_each_queue(bp, i) {
6092
/* fastpath tx rings: tx_buf tx_desc */
6093
BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6094
sizeof(struct sw_tx_bd) * NUM_TX_BD);
6095
BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6096
&bnx2x_fp(bp, i, tx_desc_mapping),
6097
sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6099
/* end of fastpath */
5960
if (bnx2x_gunzip_init(bp))
6101
5963
#ifdef BCM_CNIC
6102
5964
if (CHIP_IS_E2(bp))
6375
6243
bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6378
static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6247
static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6249
return CHIP_REV_IS_SLOW(bp) ?
6250
(BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6251
(BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6254
/* set mc list, do not wait as wait implies sleep and
6255
* set_rx_mode can be invoked from non-sleepable context.
6257
* Instead we use the same ramrod data buffer each time we need
6258
* to configure a list of addresses, and use the fact that the
6259
* list of MACs is changed in an incremental way and that the
6260
* function is called under the netif_addr_lock. A temporary
6261
* inconsistent CAM configuration (possible in case of a very fast
6262
* sequence of add/del/add on the host side) will shortly be
6263
* restored by the handler of the last ramrod.
6265
static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
6380
6267
int i = 0, old;
6381
6268
struct net_device *dev = bp->dev;
6269
u8 offset = bnx2x_e1_cam_mc_offset(bp);
6382
6270
struct netdev_hw_addr *ha;
6383
6271
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6384
6272
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6274
if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6386
6277
netdev_for_each_mc_addr(ha, dev) {
6388
6279
config_cmd->config_table[i].msb_mac_addr =
6426
6319
config_cmd->hdr.length = i;
6427
6320
config_cmd->hdr.offset = offset;
6428
6321
config_cmd->hdr.client_id = 0xff;
6429
config_cmd->hdr.reserved1 = 0;
6431
bp->set_mac_pending = 1;
6434
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6322
/* Mark that this ramrod doesn't use bp->set_mac_pending for
6325
config_cmd->hdr.echo = 0;
6329
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6435
6330
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6437
static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6333
void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
6440
6336
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6441
6337
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6442
6338
int ramrod_flags = WAIT_RAMROD_COMMON;
6444
bp->set_mac_pending = 1;
6447
for (i = 0; i < config_cmd->hdr.length; i++)
6339
u8 offset = bnx2x_e1_cam_mc_offset(bp);
6341
for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
6448
6342
SET_FLAG(config_cmd->config_table[i].flags,
6449
6343
MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6450
6344
T_ETH_MAC_COMMAND_INVALIDATE);
6348
config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6349
config_cmd->hdr.offset = offset;
6350
config_cmd->hdr.client_id = 0xff;
6351
/* We'll wait for a completion this time... */
6352
config_cmd->hdr.echo = 1;
6354
bp->set_mac_pending = 1;
6452
6358
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6453
6359
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6367
/* Accept one or more multicasts */
6368
static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6370
struct net_device *dev = bp->dev;
6371
struct netdev_hw_addr *ha;
6372
u32 mc_filter[MC_HASH_SIZE];
6373
u32 crc, bit, regidx;
6376
memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6378
netdev_for_each_mc_addr(ha, dev) {
6379
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6382
crc = crc32c_le(0, bnx2x_mc_addr(ha),
6384
bit = (crc >> 24) & 0xff;
6387
mc_filter[regidx] |= (1 << bit);
6390
for (i = 0; i < MC_HASH_SIZE; i++)
6391
REG_WR(bp, MC_HASH_OFFSET(bp, i),
6397
void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6401
for (i = 0; i < MC_HASH_SIZE; i++)
6402
REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6461
6405
#ifdef BCM_CNIC
6463
* Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6464
* MAC(s). This function will wait until the ramdord completion
6467
* @param bp driver handle
6468
* @param set set or clear the CAM entry
6470
* @return 0 if cussess, -ENODEV if ramrod doesn't return.
6407
* bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
6409
* @bp: driver handle
6410
* @set: set or clear the CAM entry
6412
* This function will wait until the ramdord completion returns.
6413
* Return 0 if success, -ENODEV if ramrod doesn't return.
6472
6415
static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6476
6419
u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6477
6420
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
6478
6421
u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6422
u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
6480
6424
/* Send a SET_MAC ramrod */
6481
bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6425
bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
6482
6426
cam_offset, 0);
6484
bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6428
bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6490
* Set FCoE L2 MAC(s) at the next enties in the CAM after the
6491
* ETH MAC(s). This function will wait until the ramdord
6492
* completion returns.
6494
* @param bp driver handle
6495
* @param set set or clear the CAM entry
6497
* @return 0 if cussess, -ENODEV if ramrod doesn't return.
6434
* bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s)
6436
* @bp: driver handle
6437
* @set: set or clear the CAM entry
6439
* This function will wait until the ramrod completion returns.
6440
* Returns 0 if success, -ENODEV if ramrod doesn't return.
6499
6442
int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
7349
7289
msleep(MCP_ONE_TIMEOUT);
7293
* initializes bp->common.shmem_base and waits for validity signature to appear
7295
static int bnx2x_init_shmem(struct bnx2x *bp)
7301
bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7302
if (bp->common.shmem_base) {
7303
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7304
if (val & SHR_MEM_VALIDITY_MB)
7308
bnx2x_mcp_wait_one(bp);
7310
} while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
7312
BNX2X_ERR("BAD MCP validity signature\n");
7352
7317
static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7354
u32 shmem, cnt, validity_offset, val;
7359
/* Get shmem offset */
7360
shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7362
BNX2X_ERR("Shmem 0 return failure\n");
7367
validity_offset = offsetof(struct shmem_region, validity_map[0]);
7369
/* Wait for MCP to come up */
7370
for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7371
/* TBD: its best to check validity map of last port.
7372
* currently checks on port 0.
7374
val = REG_RD(bp, shmem + validity_offset);
7375
DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7376
shmem + validity_offset, val);
7378
/* check that shared memory is valid. */
7379
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7380
== (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7383
bnx2x_mcp_wait_one(bp);
7386
DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7388
/* Check that shared memory is valid. This indicates that MCP is up. */
7389
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7390
(SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7391
BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7319
int rc = bnx2x_init_shmem(bp);
7397
7321
/* Restore the `magic' bit value */
7398
7322
if (!CHIP_IS_E1(bp))
7399
7323
bnx2x_clp_reset_done(bp, magic_val);
8405
8322
bp->common.shmem2_base);
8326
static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8328
u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8329
drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8330
u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8331
drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8333
/* Get the number of maximum allowed iSCSI and FCoE connections */
8334
bp->cnic_eth_dev.max_iscsi_conn =
8335
(max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8336
BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8338
bp->cnic_eth_dev.max_fcoe_conn =
8339
(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8340
BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8342
BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8343
bp->cnic_eth_dev.max_iscsi_conn,
8344
bp->cnic_eth_dev.max_fcoe_conn);
8346
/* If mamimum allowed number of connections is zero -
8347
* disable the feature.
8349
if (!bp->cnic_eth_dev.max_iscsi_conn)
8350
bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8352
if (!bp->cnic_eth_dev.max_fcoe_conn)
8353
bp->flags |= NO_FCOE_FLAG;
8408
8357
static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8411
8360
int func = BP_ABS_FUNC(bp);
8412
8361
int port = BP_PORT(bp);
8363
u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8364
u8 *fip_mac = bp->fip_mac;
8414
8367
if (BP_NOMCP(bp)) {
8415
8368
BNX2X_ERROR("warning: random MAC workaround active\n");
8823
#define E1_MAX_UC_LIST 29
8824
#define E1H_MAX_UC_LIST 30
8825
#define E2_MAX_UC_LIST 14
8826
static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
8829
return E1_MAX_UC_LIST;
8830
else if (CHIP_IS_E1H(bp))
8831
return E1H_MAX_UC_LIST;
8833
return E2_MAX_UC_LIST;
8837
static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
8840
/* CAM Entries for Port0:
8843
* 2 - iSCSI L2 ring ETH MAC
8846
* Port1 entries are allocated the same way starting from
8849
return 3 + 32 * BP_PORT(bp);
8850
else if (CHIP_IS_E1H(bp)) {
8852
* 0-7 - prim ETH MAC for each function
8853
* 8-15 - iSCSI L2 ring ETH MAC for each function
8854
* 16 till 255 UC MAC lists for each function
8856
* Remark: There is no FCoE support for E1H, thus FCoE related
8857
* MACs are not considered.
8859
return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
8860
bnx2x_max_uc_list(bp) * BP_FUNC(bp);
8862
/* CAM Entries (there is a separate CAM per engine):
8863
* 0-4 - prim ETH MAC for each function
8864
* 4-7 - iSCSI L2 ring ETH MAC for each function
8865
* 8-11 - FIP ucast L2 MAC for each function
8866
* 12-15 - ALL_ENODE_MACS mcast MAC for each function
8867
* 16 till 71 UC MAC lists for each function
8870
(CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
8872
return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
8873
bnx2x_max_uc_list(bp) * func_idx;
8877
/* set uc list, do not wait as wait implies sleep and
8878
* set_rx_mode can be invoked from non-sleepable context.
8880
* Instead we use the same ramrod data buffer each time we need
8881
* to configure a list of addresses, and use the fact that the
8882
* list of MACs is changed in an incremental way and that the
8883
* function is called under the netif_addr_lock. A temporary
8884
* inconsistent CAM configuration (possible in case of very fast
8885
* sequence of add/del/add on the host side) will shortly be
8886
* restored by the handler of the last ramrod.
8888
static int bnx2x_set_uc_list(struct bnx2x *bp)
8891
struct net_device *dev = bp->dev;
8892
u8 offset = bnx2x_uc_list_cam_offset(bp);
8893
struct netdev_hw_addr *ha;
8894
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8895
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8897
if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
8900
netdev_for_each_uc_addr(ha, dev) {
8902
config_cmd->config_table[i].msb_mac_addr =
8903
swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
8904
config_cmd->config_table[i].middle_mac_addr =
8905
swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
8906
config_cmd->config_table[i].lsb_mac_addr =
8907
swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
8909
config_cmd->config_table[i].vlan_id = 0;
8910
config_cmd->config_table[i].pf_id = BP_FUNC(bp);
8911
config_cmd->config_table[i].clients_bit_vector =
8912
cpu_to_le32(1 << BP_L_ID(bp));
8914
SET_FLAG(config_cmd->config_table[i].flags,
8915
MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8916
T_ETH_MAC_COMMAND_SET);
8919
"setting UCAST[%d] (%04x:%04x:%04x)\n", i,
8920
config_cmd->config_table[i].msb_mac_addr,
8921
config_cmd->config_table[i].middle_mac_addr,
8922
config_cmd->config_table[i].lsb_mac_addr);
8926
/* Set uc MAC in NIG */
8927
bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
8928
LLH_CAM_ETH_LINE + i);
8930
old = config_cmd->hdr.length;
8932
for (; i < old; i++) {
8933
if (CAM_IS_INVALID(config_cmd->
8935
/* already invalidated */
8939
SET_FLAG(config_cmd->config_table[i].flags,
8940
MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8941
T_ETH_MAC_COMMAND_INVALIDATE);
8947
config_cmd->hdr.length = i;
8948
config_cmd->hdr.offset = offset;
8949
config_cmd->hdr.client_id = 0xff;
8950
/* Mark that this ramrod doesn't use bp->set_mac_pending for
8953
config_cmd->hdr.echo = 0;
8957
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8958
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8962
void bnx2x_invalidate_uc_list(struct bnx2x *bp)
8965
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8966
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8967
int ramrod_flags = WAIT_RAMROD_COMMON;
8968
u8 offset = bnx2x_uc_list_cam_offset(bp);
8969
u8 max_list_size = bnx2x_max_uc_list(bp);
8971
for (i = 0; i < max_list_size; i++) {
8972
SET_FLAG(config_cmd->config_table[i].flags,
8973
MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8974
T_ETH_MAC_COMMAND_INVALIDATE);
8975
bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
8980
config_cmd->hdr.length = max_list_size;
8981
config_cmd->hdr.offset = offset;
8982
config_cmd->hdr.client_id = 0xff;
8983
/* We'll wait for a completion this time... */
8984
config_cmd->hdr.echo = 1;
8986
bp->set_mac_pending = 1;
8990
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8991
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8993
/* Wait for a completion */
8994
bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
8999
static inline int bnx2x_set_mc_list(struct bnx2x *bp)
9001
/* some multicasts */
9002
if (CHIP_IS_E1(bp)) {
9003
return bnx2x_set_e1_mc_list(bp);
9004
} else { /* E1H and newer */
9005
return bnx2x_set_e1h_mc_list(bp);
8840
9009
/* called with netif_tx_lock from dev_mcast.c */
8841
9010
void bnx2x_set_rx_mode(struct net_device *dev)
8843
9012
struct bnx2x *bp = netdev_priv(dev);
8844
9013
u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8845
int port = BP_PORT(bp);
8847
9015
if (bp->state != BNX2X_STATE_OPEN) {
8848
9016
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8854
9022
if (dev->flags & IFF_PROMISC)
8855
9023
rx_mode = BNX2X_RX_MODE_PROMISC;
8856
else if ((dev->flags & IFF_ALLMULTI) ||
8857
((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
9024
else if (dev->flags & IFF_ALLMULTI)
8859
9025
rx_mode = BNX2X_RX_MODE_ALLMULTI;
8860
else { /* some multicasts */
8861
if (CHIP_IS_E1(bp)) {
8863
* set mc list, do not wait as wait implies sleep
8864
* and set_rx_mode can be invoked from non-sleepable
8867
u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8868
BNX2X_MAX_EMUL_MULTI*(1 + port) :
8869
BNX2X_MAX_MULTICAST*(1 + port));
8871
bnx2x_set_e1_mc_list(bp, offset);
8873
/* Accept one or more multicasts */
8874
struct netdev_hw_addr *ha;
8875
u32 mc_filter[MC_HASH_SIZE];
8876
u32 crc, bit, regidx;
8879
memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8881
netdev_for_each_mc_addr(ha, dev) {
8882
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8885
crc = crc32c_le(0, bnx2x_mc_addr(ha),
8887
bit = (crc >> 24) & 0xff;
8890
mc_filter[regidx] |= (1 << bit);
8893
for (i = 0; i < MC_HASH_SIZE; i++)
8894
REG_WR(bp, MC_HASH_OFFSET(bp, i),
9027
/* some multicasts */
9028
if (bnx2x_set_mc_list(bp))
9029
rx_mode = BNX2X_RX_MODE_ALLMULTI;
9032
if (bnx2x_set_uc_list(bp))
9033
rx_mode = BNX2X_RX_MODE_PROMISC;
8899
9036
bp->rx_mode = rx_mode;
9106
9245
dev->netdev_ops = &bnx2x_netdev_ops;
9107
9246
bnx2x_set_ethtool_ops(dev);
9108
dev->features |= NETIF_F_SG;
9109
dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9248
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9249
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
9250
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
9252
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9253
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
9255
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
9110
9256
if (bp->flags & USING_DAC_FLAG)
9111
9257
dev->features |= NETIF_F_HIGHDMA;
9112
dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9113
dev->features |= NETIF_F_TSO6;
9114
dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
9116
dev->vlan_features |= NETIF_F_SG;
9117
dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9118
if (bp->flags & USING_DAC_FLAG)
9119
dev->vlan_features |= NETIF_F_HIGHDMA;
9120
dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9121
dev->vlan_features |= NETIF_F_TSO6;
9259
/* Add Loopback capability to the device */
9260
dev->hw_features |= NETIF_F_LOOPBACK;
9124
9263
dev->dcbnl_ops = &bnx2x_dcbnl_ops;