2
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7
* This software is available to you under a choice of one of two
8
* licenses. You may choose to be licensed under the terms of the GNU
9
* General Public License (GPL) Version 2, available from the file
10
* COPYING in the main directory of this source tree, or the
11
* OpenIB.org BSD license below:
13
* Redistribution and use in source and binary forms, with or
14
* without modification, are permitted provided that the following
17
* - Redistributions of source code must retain the above
18
* copyright notice, this list of conditions and the following
21
* - Redistributions in binary form must reproduce the above
22
* copyright notice, this list of conditions and the following
23
* disclaimer in the documentation and/or other materials
24
* provided with the distribution.
26
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36
#include <linux/module.h>
37
#include <linux/init.h>
38
#include <linux/errno.h>
39
#include <linux/pci.h>
40
#include <linux/dma-mapping.h>
41
#include <linux/slab.h>
42
#include <linux/io-mapping.h>
44
#include <linux/mlx4/device.h>
45
#include <linux/mlx4/doorbell.h>
51
MODULE_AUTHOR("Roland Dreier");
52
MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
53
MODULE_LICENSE("Dual BSD/GPL");
54
MODULE_VERSION(DRV_VERSION);
56
struct workqueue_struct *mlx4_wq;
58
#ifdef CONFIG_MLX4_DEBUG
60
int mlx4_debug_level = 0;
61
module_param_named(debug_level, mlx4_debug_level, int, 0644);
62
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
64
#endif /* CONFIG_MLX4_DEBUG */
69
module_param(msi_x, int, 0444);
70
MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
72
#else /* CONFIG_PCI_MSI */
76
#endif /* CONFIG_PCI_MSI */
78
static char mlx4_version[] __devinitdata =
79
DRV_NAME ": Mellanox ConnectX core driver v"
80
DRV_VERSION " (" DRV_RELDATE ")\n";
82
static struct mlx4_profile default_profile = {
85
.rdmarc_per_qp = 1 << 4,
92
static int log_num_mac = 2;
93
module_param_named(log_num_mac, log_num_mac, int, 0444);
94
MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
96
static int log_num_vlan;
97
module_param_named(log_num_vlan, log_num_vlan, int, 0444);
98
MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
99
/* Log2 max number of VLANs per ETH port (0-7) */
100
#define MLX4_LOG_NUM_VLANS 7
103
module_param_named(use_prio, use_prio, bool, 0444);
104
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
107
static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
108
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
109
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
111
int mlx4_check_port_params(struct mlx4_dev *dev,
112
enum mlx4_port_type *port_type)
116
for (i = 0; i < dev->caps.num_ports - 1; i++) {
117
if (port_type[i] != port_type[i + 1]) {
118
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
119
mlx4_err(dev, "Only same port types supported "
120
"on this HCA, aborting.\n");
123
if (port_type[i] == MLX4_PORT_TYPE_ETH &&
124
port_type[i + 1] == MLX4_PORT_TYPE_IB)
129
for (i = 0; i < dev->caps.num_ports; i++) {
130
if (!(port_type[i] & dev->caps.supported_type[i+1])) {
131
mlx4_err(dev, "Requested port type for port %d is not "
132
"supported on this HCA\n", i + 1);
139
static void mlx4_set_port_mask(struct mlx4_dev *dev)
143
dev->caps.port_mask = 0;
144
for (i = 1; i <= dev->caps.num_ports; ++i)
145
if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
146
dev->caps.port_mask |= 1 << (i - 1);
149
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
154
err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
156
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
160
if (dev_cap->min_page_sz > PAGE_SIZE) {
161
mlx4_err(dev, "HCA minimum page size of %d bigger than "
162
"kernel PAGE_SIZE of %ld, aborting.\n",
163
dev_cap->min_page_sz, PAGE_SIZE);
166
if (dev_cap->num_ports > MLX4_MAX_PORTS) {
167
mlx4_err(dev, "HCA has %d ports, but we only support %d, "
169
dev_cap->num_ports, MLX4_MAX_PORTS);
173
if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
174
mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
175
"PCI resource 2 size of 0x%llx, aborting.\n",
177
(unsigned long long) pci_resource_len(dev->pdev, 2));
181
dev->caps.num_ports = dev_cap->num_ports;
182
for (i = 1; i <= dev->caps.num_ports; ++i) {
183
dev->caps.vl_cap[i] = dev_cap->max_vl[i];
184
dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
185
dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
186
dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
187
dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
188
dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
189
dev->caps.def_mac[i] = dev_cap->def_mac[i];
190
dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
191
dev->caps.trans_type[i] = dev_cap->trans_type[i];
192
dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
193
dev->caps.wavelength[i] = dev_cap->wavelength[i];
194
dev->caps.trans_code[i] = dev_cap->trans_code[i];
197
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
198
dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
199
dev->caps.bf_reg_size = dev_cap->bf_reg_size;
200
dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
201
dev->caps.max_sq_sg = dev_cap->max_sq_sg;
202
dev->caps.max_rq_sg = dev_cap->max_rq_sg;
203
dev->caps.max_wqes = dev_cap->max_qp_sz;
204
dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
205
dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
206
dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
207
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
208
dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
209
dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
210
dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
212
* Subtract 1 from the limit because we need to allocate a
213
* spare CQE so the HCA HW can tell the difference between an
214
* empty CQ and a full CQ.
216
dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
217
dev->caps.reserved_cqs = dev_cap->reserved_cqs;
218
dev->caps.reserved_eqs = dev_cap->reserved_eqs;
219
dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
220
dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
221
dev->caps.mtts_per_seg);
222
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
223
dev->caps.reserved_uars = dev_cap->reserved_uars;
224
dev->caps.reserved_pds = dev_cap->reserved_pds;
225
dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
226
dev_cap->reserved_xrcds : 0;
227
dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
228
dev_cap->max_xrcds : 0;
229
dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
230
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
231
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
232
dev->caps.flags = dev_cap->flags;
233
dev->caps.bmme_flags = dev_cap->bmme_flags;
234
dev->caps.reserved_lkey = dev_cap->reserved_lkey;
235
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
236
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
238
dev->caps.log_num_macs = log_num_mac;
239
dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
240
dev->caps.log_num_prios = use_prio ? 3 : 0;
242
for (i = 1; i <= dev->caps.num_ports; ++i) {
243
if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
244
dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
246
dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
247
dev->caps.possible_type[i] = dev->caps.port_type[i];
248
mlx4_priv(dev)->sense.sense_allowed[i] =
249
dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
251
if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
252
dev->caps.log_num_macs = dev_cap->log_max_macs[i];
253
mlx4_warn(dev, "Requested number of MACs is too much "
254
"for port %d, reducing to %d.\n",
255
i, 1 << dev->caps.log_num_macs);
257
if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
258
dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
259
mlx4_warn(dev, "Requested number of VLANs is too much "
260
"for port %d, reducing to %d.\n",
261
i, 1 << dev->caps.log_num_vlans);
265
mlx4_set_port_mask(dev);
267
dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
269
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
270
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
271
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
272
(1 << dev->caps.log_num_macs) *
273
(1 << dev->caps.log_num_vlans) *
274
(1 << dev->caps.log_num_prios) *
276
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
278
dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
279
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
280
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
281
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
287
* Change the port configuration of the device.
288
* Every user of this function must hold the port mutex.
290
int mlx4_change_port_types(struct mlx4_dev *dev,
291
enum mlx4_port_type *port_types)
297
for (port = 0; port < dev->caps.num_ports; port++) {
298
/* Change the port type only if the new type is different
299
* from the current, and not set to Auto */
300
if (port_types[port] != dev->caps.port_type[port + 1]) {
302
dev->caps.port_type[port + 1] = port_types[port];
306
mlx4_unregister_device(dev);
307
for (port = 1; port <= dev->caps.num_ports; port++) {
308
mlx4_CLOSE_PORT(dev, port);
309
err = mlx4_SET_PORT(dev, port);
311
mlx4_err(dev, "Failed to set port %d, "
316
mlx4_set_port_mask(dev);
317
err = mlx4_register_device(dev);
324
static ssize_t show_port_type(struct device *dev,
325
struct device_attribute *attr,
328
struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
330
struct mlx4_dev *mdev = info->dev;
334
(mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
336
if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
337
sprintf(buf, "auto (%s)\n", type);
339
sprintf(buf, "%s\n", type);
344
static ssize_t set_port_type(struct device *dev,
345
struct device_attribute *attr,
346
const char *buf, size_t count)
348
struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
350
struct mlx4_dev *mdev = info->dev;
351
struct mlx4_priv *priv = mlx4_priv(mdev);
352
enum mlx4_port_type types[MLX4_MAX_PORTS];
353
enum mlx4_port_type new_types[MLX4_MAX_PORTS];
357
if (!strcmp(buf, "ib\n"))
358
info->tmp_type = MLX4_PORT_TYPE_IB;
359
else if (!strcmp(buf, "eth\n"))
360
info->tmp_type = MLX4_PORT_TYPE_ETH;
361
else if (!strcmp(buf, "auto\n"))
362
info->tmp_type = MLX4_PORT_TYPE_AUTO;
364
mlx4_err(mdev, "%s is not supported port type\n", buf);
368
mlx4_stop_sense(mdev);
369
mutex_lock(&priv->port_mutex);
370
/* Possible type is always the one that was delivered */
371
mdev->caps.possible_type[info->port] = info->tmp_type;
373
for (i = 0; i < mdev->caps.num_ports; i++) {
374
types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
375
mdev->caps.possible_type[i+1];
376
if (types[i] == MLX4_PORT_TYPE_AUTO)
377
types[i] = mdev->caps.port_type[i+1];
380
if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
381
for (i = 1; i <= mdev->caps.num_ports; i++) {
382
if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
383
mdev->caps.possible_type[i] = mdev->caps.port_type[i];
389
mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
390
"Set only 'eth' or 'ib' for both ports "
391
"(should be the same)\n");
395
mlx4_do_sense_ports(mdev, new_types, types);
397
err = mlx4_check_port_params(mdev, new_types);
401
/* We are about to apply the changes after the configuration
402
* was verified, no need to remember the temporary types
404
for (i = 0; i < mdev->caps.num_ports; i++)
405
priv->port[i + 1].tmp_type = 0;
407
err = mlx4_change_port_types(mdev, new_types);
410
mlx4_start_sense(mdev);
411
mutex_unlock(&priv->port_mutex);
412
return err ? err : count;
415
static int mlx4_load_fw(struct mlx4_dev *dev)
417
struct mlx4_priv *priv = mlx4_priv(dev);
420
priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
421
GFP_HIGHUSER | __GFP_NOWARN, 0);
422
if (!priv->fw.fw_icm) {
423
mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
427
err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
429
mlx4_err(dev, "MAP_FA command failed, aborting.\n");
433
err = mlx4_RUN_FW(dev);
435
mlx4_err(dev, "RUN_FW command failed, aborting.\n");
445
mlx4_free_icm(dev, priv->fw.fw_icm, 0);
449
static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
452
struct mlx4_priv *priv = mlx4_priv(dev);
455
err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
457
((u64) (MLX4_CMPT_TYPE_QP *
458
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
459
cmpt_entry_sz, dev->caps.num_qps,
460
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
465
err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
467
((u64) (MLX4_CMPT_TYPE_SRQ *
468
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
469
cmpt_entry_sz, dev->caps.num_srqs,
470
dev->caps.reserved_srqs, 0, 0);
474
err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
476
((u64) (MLX4_CMPT_TYPE_CQ *
477
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
478
cmpt_entry_sz, dev->caps.num_cqs,
479
dev->caps.reserved_cqs, 0, 0);
483
err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
485
((u64) (MLX4_CMPT_TYPE_EQ *
486
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
488
dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
495
mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
498
mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
501
mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
507
static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
508
struct mlx4_init_hca_param *init_hca, u64 icm_size)
510
struct mlx4_priv *priv = mlx4_priv(dev);
514
err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
516
mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
520
mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
521
(unsigned long long) icm_size >> 10,
522
(unsigned long long) aux_pages << 2);
524
priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
525
GFP_HIGHUSER | __GFP_NOWARN, 0);
526
if (!priv->fw.aux_icm) {
527
mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
531
err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
533
mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
537
err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
539
mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
543
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
544
init_hca->eqc_base, dev_cap->eqc_entry_sz,
545
dev->caps.num_eqs, dev->caps.num_eqs,
548
mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
553
* Reserved MTT entries must be aligned up to a cacheline
554
* boundary, since the FW will write to them, while the driver
555
* writes to all other MTT entries. (The variable
556
* dev->caps.mtt_entry_sz below is really the MTT segment
557
* size, not the raw entry size)
559
dev->caps.reserved_mtts =
560
ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
561
dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
563
err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
565
dev->caps.mtt_entry_sz,
566
dev->caps.num_mtt_segs,
567
dev->caps.reserved_mtts, 1, 0);
569
mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
573
err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
575
dev_cap->dmpt_entry_sz,
577
dev->caps.reserved_mrws, 1, 1);
579
mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
583
err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
585
dev_cap->qpc_entry_sz,
587
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
590
mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
594
err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
596
dev_cap->aux_entry_sz,
598
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
601
mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
605
err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
607
dev_cap->altc_entry_sz,
609
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
612
mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
616
err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
617
init_hca->rdmarc_base,
618
dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
620
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
623
mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
627
err = mlx4_init_icm_table(dev, &priv->cq_table.table,
629
dev_cap->cqc_entry_sz,
631
dev->caps.reserved_cqs, 0, 0);
633
mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
634
goto err_unmap_rdmarc;
637
err = mlx4_init_icm_table(dev, &priv->srq_table.table,
639
dev_cap->srq_entry_sz,
641
dev->caps.reserved_srqs, 0, 0);
643
mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
648
* It's not strictly required, but for simplicity just map the
649
* whole multicast group table now. The table isn't very big
650
* and it's a lot easier than trying to track ref counts.
652
err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
653
init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
654
dev->caps.num_mgms + dev->caps.num_amgms,
655
dev->caps.num_mgms + dev->caps.num_amgms,
658
mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
665
mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
668
mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
671
mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
674
mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
677
mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
680
mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
683
mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
686
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
689
mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
692
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
693
mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
694
mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
695
mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
698
mlx4_UNMAP_ICM_AUX(dev);
701
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
706
static void mlx4_free_icms(struct mlx4_dev *dev)
708
struct mlx4_priv *priv = mlx4_priv(dev);
710
mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
711
mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
712
mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
713
mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
714
mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
715
mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
716
mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
717
mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
718
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
719
mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
720
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
721
mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
722
mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
723
mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
725
mlx4_UNMAP_ICM_AUX(dev);
726
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
729
static int map_bf_area(struct mlx4_dev *dev)
731
struct mlx4_priv *priv = mlx4_priv(dev);
732
resource_size_t bf_start;
733
resource_size_t bf_len;
736
bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
737
bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
738
priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
739
if (!priv->bf_mapping)
745
static void unmap_bf_area(struct mlx4_dev *dev)
747
if (mlx4_priv(dev)->bf_mapping)
748
io_mapping_free(mlx4_priv(dev)->bf_mapping);
751
static void mlx4_close_hca(struct mlx4_dev *dev)
754
mlx4_CLOSE_HCA(dev, 0);
757
mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
760
static int mlx4_init_hca(struct mlx4_dev *dev)
762
struct mlx4_priv *priv = mlx4_priv(dev);
763
struct mlx4_adapter adapter;
764
struct mlx4_dev_cap dev_cap;
765
struct mlx4_mod_stat_cfg mlx4_cfg;
766
struct mlx4_profile profile;
767
struct mlx4_init_hca_param init_hca;
771
err = mlx4_QUERY_FW(dev);
774
mlx4_info(dev, "non-primary physical function, skipping.\n");
776
mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
780
err = mlx4_load_fw(dev);
782
mlx4_err(dev, "Failed to start FW, aborting.\n");
786
mlx4_cfg.log_pg_sz_m = 1;
787
mlx4_cfg.log_pg_sz = 0;
788
err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
790
mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
792
err = mlx4_dev_cap(dev, &dev_cap);
794
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
798
profile = default_profile;
800
icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
801
if ((long long) icm_size < 0) {
806
if (map_bf_area(dev))
807
mlx4_dbg(dev, "Failed to map blue flame area\n");
809
init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
811
err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
815
err = mlx4_INIT_HCA(dev, &init_hca);
817
mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
821
err = mlx4_QUERY_ADAPTER(dev, &adapter);
823
mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
827
priv->eq_table.inta_pin = adapter.inta_pin;
828
memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
833
mlx4_CLOSE_HCA(dev, 0);
841
mlx4_free_icm(dev, priv->fw.fw_icm, 0);
846
static int mlx4_init_counters_table(struct mlx4_dev *dev)
848
struct mlx4_priv *priv = mlx4_priv(dev);
851
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
854
nent = dev->caps.max_counters;
855
return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
858
static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
860
mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
863
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
865
struct mlx4_priv *priv = mlx4_priv(dev);
867
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
870
*idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
876
EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
878
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
880
mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
883
EXPORT_SYMBOL_GPL(mlx4_counter_free);
885
static int mlx4_setup_hca(struct mlx4_dev *dev)
887
struct mlx4_priv *priv = mlx4_priv(dev);
890
__be32 ib_port_default_caps;
892
err = mlx4_init_uar_table(dev);
894
mlx4_err(dev, "Failed to initialize "
895
"user access region table, aborting.\n");
899
err = mlx4_uar_alloc(dev, &priv->driver_uar);
901
mlx4_err(dev, "Failed to allocate driver access region, "
903
goto err_uar_table_free;
906
priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
908
mlx4_err(dev, "Couldn't map kernel access region, "
914
err = mlx4_init_pd_table(dev);
916
mlx4_err(dev, "Failed to initialize "
917
"protection domain table, aborting.\n");
921
err = mlx4_init_xrcd_table(dev);
923
mlx4_err(dev, "Failed to initialize "
924
"reliable connection domain table, aborting.\n");
925
goto err_pd_table_free;
928
err = mlx4_init_mr_table(dev);
930
mlx4_err(dev, "Failed to initialize "
931
"memory region table, aborting.\n");
932
goto err_xrcd_table_free;
935
err = mlx4_init_eq_table(dev);
937
mlx4_err(dev, "Failed to initialize "
938
"event queue table, aborting.\n");
939
goto err_mr_table_free;
942
err = mlx4_cmd_use_events(dev);
944
mlx4_err(dev, "Failed to switch to event-driven "
945
"firmware commands, aborting.\n");
946
goto err_eq_table_free;
951
if (dev->flags & MLX4_FLAG_MSI_X) {
952
mlx4_warn(dev, "NOP command failed to generate MSI-X "
953
"interrupt IRQ %d).\n",
954
priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
955
mlx4_warn(dev, "Trying again without MSI-X.\n");
957
mlx4_err(dev, "NOP command failed to generate interrupt "
958
"(IRQ %d), aborting.\n",
959
priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
960
mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
966
mlx4_dbg(dev, "NOP command IRQ test passed\n");
968
err = mlx4_init_cq_table(dev);
970
mlx4_err(dev, "Failed to initialize "
971
"completion queue table, aborting.\n");
975
err = mlx4_init_srq_table(dev);
977
mlx4_err(dev, "Failed to initialize "
978
"shared receive queue table, aborting.\n");
979
goto err_cq_table_free;
982
err = mlx4_init_qp_table(dev);
984
mlx4_err(dev, "Failed to initialize "
985
"queue pair table, aborting.\n");
986
goto err_srq_table_free;
989
err = mlx4_init_mcg_table(dev);
991
mlx4_err(dev, "Failed to initialize "
992
"multicast group table, aborting.\n");
993
goto err_qp_table_free;
996
err = mlx4_init_counters_table(dev);
997
if (err && err != -ENOENT) {
998
mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
999
goto err_counters_table_free;
1002
for (port = 1; port <= dev->caps.num_ports; port++) {
1003
enum mlx4_port_type port_type = 0;
1004
mlx4_SENSE_PORT(dev, port, &port_type);
1006
dev->caps.port_type[port] = port_type;
1007
ib_port_default_caps = 0;
1008
err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
1010
mlx4_warn(dev, "failed to get port %d default "
1011
"ib capabilities (%d). Continuing with "
1012
"caps = 0\n", port, err);
1013
dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1015
err = mlx4_check_ext_port_caps(dev, port);
1017
mlx4_warn(dev, "failed to get port %d extended "
1018
"port capabilities support info (%d)."
1019
" Assuming not supported\n", port, err);
1021
err = mlx4_SET_PORT(dev, port);
1023
mlx4_err(dev, "Failed to set port %d, aborting\n",
1025
goto err_mcg_table_free;
1028
mlx4_set_port_mask(dev);
1033
mlx4_cleanup_mcg_table(dev);
1035
err_counters_table_free:
1036
mlx4_cleanup_counters_table(dev);
1039
mlx4_cleanup_qp_table(dev);
1042
mlx4_cleanup_srq_table(dev);
1045
mlx4_cleanup_cq_table(dev);
1048
mlx4_cmd_use_polling(dev);
1051
mlx4_cleanup_eq_table(dev);
1054
mlx4_cleanup_mr_table(dev);
1056
err_xrcd_table_free:
1057
mlx4_cleanup_xrcd_table(dev);
1060
mlx4_cleanup_pd_table(dev);
1066
mlx4_uar_free(dev, &priv->driver_uar);
1069
mlx4_cleanup_uar_table(dev);
1073
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1075
struct mlx4_priv *priv = mlx4_priv(dev);
1076
struct msix_entry *entries;
1077
int nreq = min_t(int, dev->caps.num_ports *
1078
min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
1079
+ MSIX_LEGACY_SZ, MAX_MSIX);
1084
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
1086
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
1090
for (i = 0; i < nreq; ++i)
1091
entries[i].entry = i;
1094
err = pci_enable_msix(dev->pdev, entries, nreq);
1096
/* Try again if at least 2 vectors are available */
1098
mlx4_info(dev, "Requested %d vectors, "
1099
"but only %d MSI-X vectors available, "
1100
"trying again\n", nreq, err);
1109
MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
1110
/*Working in legacy mode , all EQ's shared*/
1111
dev->caps.comp_pool = 0;
1112
dev->caps.num_comp_vectors = nreq - 1;
1114
dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
1115
dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
1117
for (i = 0; i < nreq; ++i)
1118
priv->eq_table.eq[i].irq = entries[i].vector;
1120
dev->flags |= MLX4_FLAG_MSI_X;
1127
dev->caps.num_comp_vectors = 1;
1128
dev->caps.comp_pool = 0;
1130
for (i = 0; i < 2; ++i)
1131
priv->eq_table.eq[i].irq = dev->pdev->irq;
1134
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
1136
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
1141
mlx4_init_mac_table(dev, &info->mac_table);
1142
mlx4_init_vlan_table(dev, &info->vlan_table);
1143
info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
1144
(port - 1) * (1 << log_num_mac);
1146
sprintf(info->dev_name, "mlx4_port%d", port);
1147
info->port_attr.attr.name = info->dev_name;
1148
info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
1149
info->port_attr.show = show_port_type;
1150
info->port_attr.store = set_port_type;
1151
sysfs_attr_init(&info->port_attr.attr);
1153
err = device_create_file(&dev->pdev->dev, &info->port_attr);
1155
mlx4_err(dev, "Failed to create file for port %d\n", port);
1162
static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
1167
device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1170
static int mlx4_init_steering(struct mlx4_dev *dev)
1172
struct mlx4_priv *priv = mlx4_priv(dev);
1173
int num_entries = dev->caps.num_ports;
1176
priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
1180
for (i = 0; i < num_entries; i++) {
1181
for (j = 0; j < MLX4_NUM_STEERS; j++) {
1182
INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
1183
INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
1185
INIT_LIST_HEAD(&priv->steer[i].high_prios);
1190
static void mlx4_clear_steering(struct mlx4_dev *dev)
1192
struct mlx4_priv *priv = mlx4_priv(dev);
1193
struct mlx4_steer_index *entry, *tmp_entry;
1194
struct mlx4_promisc_qp *pqp, *tmp_pqp;
1195
int num_entries = dev->caps.num_ports;
1198
for (i = 0; i < num_entries; i++) {
1199
for (j = 0; j < MLX4_NUM_STEERS; j++) {
1200
list_for_each_entry_safe(pqp, tmp_pqp,
1201
&priv->steer[i].promisc_qps[j],
1203
list_del(&pqp->list);
1206
list_for_each_entry_safe(entry, tmp_entry,
1207
&priv->steer[i].steer_entries[j],
1209
list_del(&entry->list);
1210
list_for_each_entry_safe(pqp, tmp_pqp,
1213
list_del(&pqp->list);
1223
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1225
struct mlx4_priv *priv;
1226
struct mlx4_dev *dev;
1230
pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
1232
err = pci_enable_device(pdev);
1234
dev_err(&pdev->dev, "Cannot enable PCI device, "
1240
* Check for BARs. We expect 0: 1MB
1242
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
1243
pci_resource_len(pdev, 0) != 1 << 20) {
1244
dev_err(&pdev->dev, "Missing DCS, aborting.\n");
1246
goto err_disable_pdev;
1248
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
1249
dev_err(&pdev->dev, "Missing UAR, aborting.\n");
1251
goto err_disable_pdev;
1254
err = pci_request_regions(pdev, DRV_NAME);
1256
dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
1257
goto err_disable_pdev;
1260
pci_set_master(pdev);
1262
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1264
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
1265
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1267
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
1268
goto err_release_regions;
1271
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1273
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
1274
"consistent PCI DMA mask.\n");
1275
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1277
dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
1279
goto err_release_regions;
1283
/* Allow large DMA segments, up to the firmware limit of 1 GB */
1284
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
1286
priv = kzalloc(sizeof *priv, GFP_KERNEL);
1288
dev_err(&pdev->dev, "Device struct alloc failed, "
1291
goto err_release_regions;
1296
INIT_LIST_HEAD(&priv->ctx_list);
1297
spin_lock_init(&priv->ctx_lock);
1299
mutex_init(&priv->port_mutex);
1301
INIT_LIST_HEAD(&priv->pgdir_list);
1302
mutex_init(&priv->pgdir_mutex);
1304
INIT_LIST_HEAD(&priv->bf_list);
1305
mutex_init(&priv->bf_mutex);
1307
dev->rev_id = pdev->revision;
1310
* Now reset the HCA before we touch the PCI capabilities or
1311
* attempt a firmware command, since a boot ROM may have left
1312
* the HCA in an undefined state.
1314
err = mlx4_reset(dev);
1316
mlx4_err(dev, "Failed to reset HCA, aborting.\n");
1320
if (mlx4_cmd_init(dev)) {
1321
mlx4_err(dev, "Failed to init command interface, aborting.\n");
1325
err = mlx4_init_hca(dev);
1329
err = mlx4_alloc_eq_table(dev);
1333
priv->msix_ctl.pool_bm = 0;
1334
spin_lock_init(&priv->msix_ctl.pool_lock);
1336
mlx4_enable_msi_x(dev);
1338
err = mlx4_init_steering(dev);
1342
err = mlx4_setup_hca(dev);
1343
if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
1344
dev->flags &= ~MLX4_FLAG_MSI_X;
1345
pci_disable_msix(pdev);
1346
err = mlx4_setup_hca(dev);
1352
for (port = 1; port <= dev->caps.num_ports; port++) {
1353
err = mlx4_init_port_info(dev, port);
1358
err = mlx4_register_device(dev);
1362
mlx4_sense_init(dev);
1363
mlx4_start_sense(dev);
1365
pci_set_drvdata(pdev, dev);
1370
for (--port; port >= 1; --port)
1371
mlx4_cleanup_port_info(&priv->port[port]);
1373
mlx4_cleanup_counters_table(dev);
1374
mlx4_cleanup_mcg_table(dev);
1375
mlx4_cleanup_qp_table(dev);
1376
mlx4_cleanup_srq_table(dev);
1377
mlx4_cleanup_cq_table(dev);
1378
mlx4_cmd_use_polling(dev);
1379
mlx4_cleanup_eq_table(dev);
1380
mlx4_cleanup_mr_table(dev);
1381
mlx4_cleanup_xrcd_table(dev);
1382
mlx4_cleanup_pd_table(dev);
1383
mlx4_cleanup_uar_table(dev);
1386
mlx4_clear_steering(dev);
1389
mlx4_free_eq_table(dev);
1392
if (dev->flags & MLX4_FLAG_MSI_X)
1393
pci_disable_msix(pdev);
1395
mlx4_close_hca(dev);
1398
mlx4_cmd_cleanup(dev);
1403
err_release_regions:
1404
pci_release_regions(pdev);
1407
pci_disable_device(pdev);
1408
pci_set_drvdata(pdev, NULL);
1412
static int __devinit mlx4_init_one(struct pci_dev *pdev,
1413
const struct pci_device_id *id)
1415
printk_once(KERN_INFO "%s", mlx4_version);
1417
return __mlx4_init_one(pdev, id);
1420
static void mlx4_remove_one(struct pci_dev *pdev)
1422
struct mlx4_dev *dev = pci_get_drvdata(pdev);
1423
struct mlx4_priv *priv = mlx4_priv(dev);
1427
mlx4_stop_sense(dev);
1428
mlx4_unregister_device(dev);
1430
for (p = 1; p <= dev->caps.num_ports; p++) {
1431
mlx4_cleanup_port_info(&priv->port[p]);
1432
mlx4_CLOSE_PORT(dev, p);
1435
mlx4_cleanup_counters_table(dev);
1436
mlx4_cleanup_mcg_table(dev);
1437
mlx4_cleanup_qp_table(dev);
1438
mlx4_cleanup_srq_table(dev);
1439
mlx4_cleanup_cq_table(dev);
1440
mlx4_cmd_use_polling(dev);
1441
mlx4_cleanup_eq_table(dev);
1442
mlx4_cleanup_mr_table(dev);
1443
mlx4_cleanup_xrcd_table(dev);
1444
mlx4_cleanup_pd_table(dev);
1447
mlx4_uar_free(dev, &priv->driver_uar);
1448
mlx4_cleanup_uar_table(dev);
1449
mlx4_clear_steering(dev);
1450
mlx4_free_eq_table(dev);
1451
mlx4_close_hca(dev);
1452
mlx4_cmd_cleanup(dev);
1454
if (dev->flags & MLX4_FLAG_MSI_X)
1455
pci_disable_msix(pdev);
1458
pci_release_regions(pdev);
1459
pci_disable_device(pdev);
1460
pci_set_drvdata(pdev, NULL);
1464
int mlx4_restart_one(struct pci_dev *pdev)
1466
mlx4_remove_one(pdev);
1467
return __mlx4_init_one(pdev, NULL);
1470
static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
1471
{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
1472
{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
1473
{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
1474
{ PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
1475
{ PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
1476
{ PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
1477
{ PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
1478
{ PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
1479
{ PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
1480
{ PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1481
{ PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
1482
{ PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
1483
{ PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
1484
{ PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
1485
{ PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
1486
{ PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
1487
{ PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
1488
{ PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
1489
{ PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
1490
{ PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
1491
{ PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
1492
{ PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
1493
{ PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
1494
{ PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
1495
{ PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
1496
{ PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
1497
{ PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
1501
MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
1503
static struct pci_driver mlx4_driver = {
1505
.id_table = mlx4_pci_table,
1506
.probe = mlx4_init_one,
1507
.remove = __devexit_p(mlx4_remove_one)
1510
static int __init mlx4_verify_params(void)
1512
if ((log_num_mac < 0) || (log_num_mac > 7)) {
1513
pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
1517
if (log_num_vlan != 0)
1518
pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
1519
MLX4_LOG_NUM_VLANS);
1521
if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
1522
pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
1529
static int __init mlx4_init(void)
1533
if (mlx4_verify_params())
1538
mlx4_wq = create_singlethread_workqueue("mlx4");
1542
ret = pci_register_driver(&mlx4_driver);
1543
return ret < 0 ? ret : 0;
1546
static void __exit mlx4_cleanup(void)
1548
pci_unregister_driver(&mlx4_driver);
1549
destroy_workqueue(mlx4_wq);
1552
module_init(mlx4_init);
1553
module_exit(mlx4_cleanup);