2
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
5
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
8
* This software is available to you under a choice of one of two
9
* licenses. You may choose to be licensed under the terms of the GNU
10
* General Public License (GPL) Version 2, available from the file
11
* COPYING in the main directory of this source tree, or the
12
* OpenIB.org BSD license below:
14
* Redistribution and use in source and binary forms, with or
15
* without modification, are permitted provided that the following
18
* - Redistributions of source code must retain the above
19
* copyright notice, this list of conditions and the following
22
* - Redistributions in binary form must reproduce the above
23
* copyright notice, this list of conditions and the following
24
* disclaimer in the documentation and/or other materials
25
* provided with the distribution.
27
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37
#include <rdma/ib_smi.h>
38
#include <rdma/ib_umem.h>
39
#include <rdma/ib_user_verbs.h>
41
#include <linux/sched.h>
42
#include <linux/slab.h>
43
#include <linux/stat.h>
45
#include <linux/export.h>
47
#include "mthca_dev.h"
48
#include "mthca_cmd.h"
49
#include "mthca_user.h"
50
#include "mthca_memfree.h"
52
static void init_query_mad(struct ib_smp *mad)
54
mad->base_version = 1;
55
mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
56
mad->class_version = 1;
57
mad->method = IB_MGMT_METHOD_GET;
60
static int mthca_query_device(struct ib_device *ibdev,
61
struct ib_device_attr *props)
63
struct ib_smp *in_mad = NULL;
64
struct ib_smp *out_mad = NULL;
66
struct mthca_dev *mdev = to_mdev(ibdev);
68
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
69
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
70
if (!in_mad || !out_mad)
73
memset(props, 0, sizeof *props);
75
props->fw_ver = mdev->fw_ver;
77
init_query_mad(in_mad);
78
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
80
err = mthca_MAD_IFC(mdev, 1, 1,
81
1, NULL, NULL, in_mad, out_mad);
85
props->device_cap_flags = mdev->device_cap_flags;
86
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
88
props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
89
props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
90
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
92
props->max_mr_size = ~0ull;
93
props->page_size_cap = mdev->limits.page_size_cap;
94
props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
95
props->max_qp_wr = mdev->limits.max_wqes;
96
props->max_sge = mdev->limits.max_sg;
97
props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
98
props->max_cqe = mdev->limits.max_cqes;
99
props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
100
props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds;
101
props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift;
102
props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
103
props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
104
props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
105
props->max_srq_wr = mdev->limits.max_srq_wqes;
106
props->max_srq_sge = mdev->limits.max_srq_sge;
107
props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
108
props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
109
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
110
props->max_pkeys = mdev->limits.pkey_table_len;
111
props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms;
112
props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
113
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
114
props->max_mcast_grp;
116
* If Sinai memory key optimization is being used, then only
117
* the 8-bit key portion will change. For other HCAs, the
118
* unused index bits will also be used for FMR remapping.
120
if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
121
props->max_map_per_fmr = 255;
123
props->max_map_per_fmr =
124
(1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
133
static int mthca_query_port(struct ib_device *ibdev,
134
u8 port, struct ib_port_attr *props)
136
struct ib_smp *in_mad = NULL;
137
struct ib_smp *out_mad = NULL;
140
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
141
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
142
if (!in_mad || !out_mad)
145
memset(props, 0, sizeof *props);
147
init_query_mad(in_mad);
148
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
149
in_mad->attr_mod = cpu_to_be32(port);
151
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
152
port, NULL, NULL, in_mad, out_mad);
156
props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
157
props->lmc = out_mad->data[34] & 0x7;
158
props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
159
props->sm_sl = out_mad->data[36] & 0xf;
160
props->state = out_mad->data[32] & 0xf;
161
props->phys_state = out_mad->data[33] >> 4;
162
props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
163
props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
164
props->max_msg_sz = 0x80000000;
165
props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
166
props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
167
props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
168
props->active_width = out_mad->data[31] & 0xf;
169
props->active_speed = out_mad->data[35] >> 4;
170
props->max_mtu = out_mad->data[41] & 0xf;
171
props->active_mtu = out_mad->data[36] >> 4;
172
props->subnet_timeout = out_mad->data[51] & 0x1f;
173
props->max_vl_num = out_mad->data[37] >> 4;
174
props->init_type_reply = out_mad->data[41] >> 4;
182
static int mthca_modify_device(struct ib_device *ibdev,
184
struct ib_device_modify *props)
186
if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
189
if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
190
if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
192
memcpy(ibdev->node_desc, props->node_desc, 64);
193
mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
199
static int mthca_modify_port(struct ib_device *ibdev,
200
u8 port, int port_modify_mask,
201
struct ib_port_modify *props)
203
struct mthca_set_ib_param set_ib;
204
struct ib_port_attr attr;
207
if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
210
err = mthca_query_port(ibdev, port, &attr);
214
set_ib.set_si_guid = 0;
215
set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
217
set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
218
~props->clr_port_cap_mask;
220
err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port);
224
mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
228
static int mthca_query_pkey(struct ib_device *ibdev,
229
u8 port, u16 index, u16 *pkey)
231
struct ib_smp *in_mad = NULL;
232
struct ib_smp *out_mad = NULL;
235
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
236
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
237
if (!in_mad || !out_mad)
240
init_query_mad(in_mad);
241
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
242
in_mad->attr_mod = cpu_to_be32(index / 32);
244
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
245
port, NULL, NULL, in_mad, out_mad);
249
*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
257
static int mthca_query_gid(struct ib_device *ibdev, u8 port,
258
int index, union ib_gid *gid)
260
struct ib_smp *in_mad = NULL;
261
struct ib_smp *out_mad = NULL;
264
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
265
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
266
if (!in_mad || !out_mad)
269
init_query_mad(in_mad);
270
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
271
in_mad->attr_mod = cpu_to_be32(port);
273
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
274
port, NULL, NULL, in_mad, out_mad);
278
memcpy(gid->raw, out_mad->data + 8, 8);
280
init_query_mad(in_mad);
281
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
282
in_mad->attr_mod = cpu_to_be32(index / 8);
284
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
285
port, NULL, NULL, in_mad, out_mad);
289
memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
297
static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
298
struct ib_udata *udata)
300
struct mthca_alloc_ucontext_resp uresp;
301
struct mthca_ucontext *context;
304
if (!(to_mdev(ibdev)->active))
305
return ERR_PTR(-EAGAIN);
307
memset(&uresp, 0, sizeof uresp);
309
uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
310
if (mthca_is_memfree(to_mdev(ibdev)))
311
uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
315
context = kmalloc(sizeof *context, GFP_KERNEL);
317
return ERR_PTR(-ENOMEM);
319
err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
325
context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
326
if (IS_ERR(context->db_tab)) {
327
err = PTR_ERR(context->db_tab);
328
mthca_uar_free(to_mdev(ibdev), &context->uar);
333
if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
334
mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
335
mthca_uar_free(to_mdev(ibdev), &context->uar);
337
return ERR_PTR(-EFAULT);
340
context->reg_mr_warned = 0;
342
return &context->ibucontext;
345
static int mthca_dealloc_ucontext(struct ib_ucontext *context)
347
mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
348
to_mucontext(context)->db_tab);
349
mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
350
kfree(to_mucontext(context));
355
static int mthca_mmap_uar(struct ib_ucontext *context,
356
struct vm_area_struct *vma)
358
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
361
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
363
if (io_remap_pfn_range(vma, vma->vm_start,
364
to_mucontext(context)->uar.pfn,
365
PAGE_SIZE, vma->vm_page_prot))
371
static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
372
struct ib_ucontext *context,
373
struct ib_udata *udata)
378
pd = kmalloc(sizeof *pd, GFP_KERNEL);
380
return ERR_PTR(-ENOMEM);
382
err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
389
if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
390
mthca_pd_free(to_mdev(ibdev), pd);
392
return ERR_PTR(-EFAULT);
399
static int mthca_dealloc_pd(struct ib_pd *pd)
401
mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
407
static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
408
struct ib_ah_attr *ah_attr)
413
ah = kmalloc(sizeof *ah, GFP_ATOMIC);
415
return ERR_PTR(-ENOMEM);
417
err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
426
static int mthca_ah_destroy(struct ib_ah *ah)
428
mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
434
static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
435
struct ib_srq_init_attr *init_attr,
436
struct ib_udata *udata)
438
struct mthca_create_srq ucmd;
439
struct mthca_ucontext *context = NULL;
440
struct mthca_srq *srq;
443
if (init_attr->srq_type != IB_SRQT_BASIC)
444
return ERR_PTR(-ENOSYS);
446
srq = kmalloc(sizeof *srq, GFP_KERNEL);
448
return ERR_PTR(-ENOMEM);
451
context = to_mucontext(pd->uobject->context);
453
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
458
err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
459
context->db_tab, ucmd.db_index,
465
srq->mr.ibmr.lkey = ucmd.lkey;
466
srq->db_index = ucmd.db_index;
469
err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
470
&init_attr->attr, srq);
472
if (err && pd->uobject)
473
mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
474
context->db_tab, ucmd.db_index);
479
if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
480
mthca_free_srq(to_mdev(pd->device), srq);
493
static int mthca_destroy_srq(struct ib_srq *srq)
495
struct mthca_ucontext *context;
498
context = to_mucontext(srq->uobject->context);
500
mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
501
context->db_tab, to_msrq(srq)->db_index);
504
mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
510
static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
511
struct ib_qp_init_attr *init_attr,
512
struct ib_udata *udata)
514
struct mthca_create_qp ucmd;
518
if (init_attr->create_flags)
519
return ERR_PTR(-EINVAL);
521
switch (init_attr->qp_type) {
526
struct mthca_ucontext *context;
528
qp = kmalloc(sizeof *qp, GFP_KERNEL);
530
return ERR_PTR(-ENOMEM);
533
context = to_mucontext(pd->uobject->context);
535
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
537
return ERR_PTR(-EFAULT);
540
err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
542
ucmd.sq_db_index, ucmd.sq_db_page);
548
err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
550
ucmd.rq_db_index, ucmd.rq_db_page);
552
mthca_unmap_user_db(to_mdev(pd->device),
560
qp->mr.ibmr.lkey = ucmd.lkey;
561
qp->sq.db_index = ucmd.sq_db_index;
562
qp->rq.db_index = ucmd.rq_db_index;
565
err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
566
to_mcq(init_attr->send_cq),
567
to_mcq(init_attr->recv_cq),
568
init_attr->qp_type, init_attr->sq_sig_type,
569
&init_attr->cap, qp);
571
if (err && pd->uobject) {
572
context = to_mucontext(pd->uobject->context);
574
mthca_unmap_user_db(to_mdev(pd->device),
578
mthca_unmap_user_db(to_mdev(pd->device),
584
qp->ibqp.qp_num = qp->qpn;
590
/* Don't allow userspace to create special QPs */
592
return ERR_PTR(-EINVAL);
594
qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
596
return ERR_PTR(-ENOMEM);
598
qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
600
err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
601
to_mcq(init_attr->send_cq),
602
to_mcq(init_attr->recv_cq),
603
init_attr->sq_sig_type, &init_attr->cap,
604
qp->ibqp.qp_num, init_attr->port_num,
609
/* Don't support raw QPs */
610
return ERR_PTR(-ENOSYS);
618
init_attr->cap.max_send_wr = qp->sq.max;
619
init_attr->cap.max_recv_wr = qp->rq.max;
620
init_attr->cap.max_send_sge = qp->sq.max_gs;
621
init_attr->cap.max_recv_sge = qp->rq.max_gs;
622
init_attr->cap.max_inline_data = qp->max_inline_data;
627
static int mthca_destroy_qp(struct ib_qp *qp)
630
mthca_unmap_user_db(to_mdev(qp->device),
631
&to_mucontext(qp->uobject->context)->uar,
632
to_mucontext(qp->uobject->context)->db_tab,
633
to_mqp(qp)->sq.db_index);
634
mthca_unmap_user_db(to_mdev(qp->device),
635
&to_mucontext(qp->uobject->context)->uar,
636
to_mucontext(qp->uobject->context)->db_tab,
637
to_mqp(qp)->rq.db_index);
639
mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
644
static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
646
struct ib_ucontext *context,
647
struct ib_udata *udata)
649
struct mthca_create_cq ucmd;
654
if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
655
return ERR_PTR(-EINVAL);
658
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
659
return ERR_PTR(-EFAULT);
661
err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
662
to_mucontext(context)->db_tab,
663
ucmd.set_db_index, ucmd.set_db_page);
667
err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
668
to_mucontext(context)->db_tab,
669
ucmd.arm_db_index, ucmd.arm_db_page);
674
cq = kmalloc(sizeof *cq, GFP_KERNEL);
681
cq->buf.mr.ibmr.lkey = ucmd.lkey;
682
cq->set_ci_db_index = ucmd.set_db_index;
683
cq->arm_db_index = ucmd.arm_db_index;
686
for (nent = 1; nent <= entries; nent <<= 1)
689
err = mthca_init_cq(to_mdev(ibdev), nent,
690
context ? to_mucontext(context) : NULL,
691
context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
696
if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
697
mthca_free_cq(to_mdev(ibdev), cq);
701
cq->resize_buf = NULL;
710
mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
711
to_mucontext(context)->db_tab, ucmd.arm_db_index);
715
mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
716
to_mucontext(context)->db_tab, ucmd.set_db_index);
721
static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
726
spin_lock_irq(&cq->lock);
727
if (cq->resize_buf) {
732
cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
733
if (!cq->resize_buf) {
738
cq->resize_buf->state = CQ_RESIZE_ALLOC;
743
spin_unlock_irq(&cq->lock);
748
ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
750
spin_lock_irq(&cq->lock);
751
kfree(cq->resize_buf);
752
cq->resize_buf = NULL;
753
spin_unlock_irq(&cq->lock);
757
cq->resize_buf->cqe = entries - 1;
759
spin_lock_irq(&cq->lock);
760
cq->resize_buf->state = CQ_RESIZE_READY;
761
spin_unlock_irq(&cq->lock);
766
static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
768
struct mthca_dev *dev = to_mdev(ibcq->device);
769
struct mthca_cq *cq = to_mcq(ibcq);
770
struct mthca_resize_cq ucmd;
774
if (entries < 1 || entries > dev->limits.max_cqes)
777
mutex_lock(&cq->mutex);
779
entries = roundup_pow_of_two(entries + 1);
780
if (entries == ibcq->cqe + 1) {
786
ret = mthca_alloc_resize_buf(dev, cq, entries);
789
lkey = cq->resize_buf->buf.mr.ibmr.lkey;
791
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
798
ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries));
801
if (cq->resize_buf) {
802
mthca_free_cq_buf(dev, &cq->resize_buf->buf,
803
cq->resize_buf->cqe);
804
kfree(cq->resize_buf);
805
spin_lock_irq(&cq->lock);
806
cq->resize_buf = NULL;
807
spin_unlock_irq(&cq->lock);
813
struct mthca_cq_buf tbuf;
816
spin_lock_irq(&cq->lock);
817
if (cq->resize_buf->state == CQ_RESIZE_READY) {
818
mthca_cq_resize_copy_cqes(cq);
821
cq->buf = cq->resize_buf->buf;
822
cq->ibcq.cqe = cq->resize_buf->cqe;
824
tbuf = cq->resize_buf->buf;
825
tcqe = cq->resize_buf->cqe;
828
kfree(cq->resize_buf);
829
cq->resize_buf = NULL;
830
spin_unlock_irq(&cq->lock);
832
mthca_free_cq_buf(dev, &tbuf, tcqe);
834
ibcq->cqe = entries - 1;
837
mutex_unlock(&cq->mutex);
842
static int mthca_destroy_cq(struct ib_cq *cq)
845
mthca_unmap_user_db(to_mdev(cq->device),
846
&to_mucontext(cq->uobject->context)->uar,
847
to_mucontext(cq->uobject->context)->db_tab,
848
to_mcq(cq)->arm_db_index);
849
mthca_unmap_user_db(to_mdev(cq->device),
850
&to_mucontext(cq->uobject->context)->uar,
851
to_mucontext(cq->uobject->context)->db_tab,
852
to_mcq(cq)->set_ci_db_index);
854
mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
860
static inline u32 convert_access(int acc)
862
return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
863
(acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
864
(acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
865
(acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
866
MTHCA_MPT_FLAG_LOCAL_READ;
869
static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
874
mr = kmalloc(sizeof *mr, GFP_KERNEL);
876
return ERR_PTR(-ENOMEM);
878
err = mthca_mr_alloc_notrans(to_mdev(pd->device),
880
convert_access(acc), mr);
892
static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
893
struct ib_phys_buf *buffer_list,
907
mask = buffer_list[0].addr ^ *iova_start;
909
for (i = 0; i < num_phys_buf; ++i) {
911
mask |= buffer_list[i].addr;
912
if (i != num_phys_buf - 1)
913
mask |= buffer_list[i].addr + buffer_list[i].size;
915
total_size += buffer_list[i].size;
918
if (mask & ~PAGE_MASK)
919
return ERR_PTR(-EINVAL);
921
shift = __ffs(mask | 1 << 31);
923
buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
924
buffer_list[0].addr &= ~0ull << shift;
926
mr = kmalloc(sizeof *mr, GFP_KERNEL);
928
return ERR_PTR(-ENOMEM);
931
for (i = 0; i < num_phys_buf; ++i)
932
npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
937
page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
940
return ERR_PTR(-ENOMEM);
944
for (i = 0; i < num_phys_buf; ++i)
946
j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
948
page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
950
mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) "
951
"in PD %x; shift %d, npages %d.\n",
952
(unsigned long long) buffer_list[0].addr,
953
(unsigned long long) *iova_start,
957
err = mthca_mr_alloc_phys(to_mdev(pd->device),
959
page_list, shift, npages,
960
*iova_start, total_size,
961
convert_access(acc), mr);
975
static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
976
u64 virt, int acc, struct ib_udata *udata)
978
struct mthca_dev *dev = to_mdev(pd->device);
979
struct ib_umem_chunk *chunk;
981
struct mthca_reg_mr ucmd;
988
if (udata->inlen - sizeof (struct ib_uverbs_cmd_hdr) < sizeof ucmd) {
989
if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
990
mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
992
mthca_warn(dev, " Update libmthca to fix this.\n");
994
++to_mucontext(pd->uobject->context)->reg_mr_warned;
996
} else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
997
return ERR_PTR(-EFAULT);
999
mr = kmalloc(sizeof *mr, GFP_KERNEL);
1001
return ERR_PTR(-ENOMEM);
1003
mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
1004
ucmd.mr_attrs & MTHCA_MR_DMASYNC);
1006
if (IS_ERR(mr->umem)) {
1007
err = PTR_ERR(mr->umem);
1011
shift = ffs(mr->umem->page_size) - 1;
1014
list_for_each_entry(chunk, &mr->umem->chunk_list, list)
1017
mr->mtt = mthca_alloc_mtt(dev, n);
1018
if (IS_ERR(mr->mtt)) {
1019
err = PTR_ERR(mr->mtt);
1023
pages = (u64 *) __get_free_page(GFP_KERNEL);
1031
write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
1033
list_for_each_entry(chunk, &mr->umem->chunk_list, list)
1034
for (j = 0; j < chunk->nmap; ++j) {
1035
len = sg_dma_len(&chunk->page_list[j]) >> shift;
1036
for (k = 0; k < len; ++k) {
1037
pages[i++] = sg_dma_address(&chunk->page_list[j]) +
1038
mr->umem->page_size * k;
1040
* Be friendly to write_mtt and pass it chunks
1041
* of appropriate size.
1043
if (i == write_mtt_size) {
1044
err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
1054
err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
1056
free_page((unsigned long) pages);
1060
err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
1061
convert_access(acc), mr);
1069
mthca_free_mtt(dev, mr->mtt);
1072
ib_umem_release(mr->umem);
1076
return ERR_PTR(err);
1079
static int mthca_dereg_mr(struct ib_mr *mr)
1081
struct mthca_mr *mmr = to_mmr(mr);
1083
mthca_free_mr(to_mdev(mr->device), mmr);
1085
ib_umem_release(mmr->umem);
1091
static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1092
struct ib_fmr_attr *fmr_attr)
1094
struct mthca_fmr *fmr;
1097
fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
1099
return ERR_PTR(-ENOMEM);
1101
memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
1102
err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
1103
convert_access(mr_access_flags), fmr);
1107
return ERR_PTR(err);
1113
static int mthca_dealloc_fmr(struct ib_fmr *fmr)
1115
struct mthca_fmr *mfmr = to_mfmr(fmr);
1118
err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
1126
static int mthca_unmap_fmr(struct list_head *fmr_list)
1130
struct mthca_dev *mdev = NULL;
1132
list_for_each_entry(fmr, fmr_list, list) {
1133
if (mdev && to_mdev(fmr->device) != mdev)
1135
mdev = to_mdev(fmr->device);
1141
if (mthca_is_memfree(mdev)) {
1142
list_for_each_entry(fmr, fmr_list, list)
1143
mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
1147
list_for_each_entry(fmr, fmr_list, list)
1148
mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1150
err = mthca_SYNC_TPT(mdev);
1154
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1157
struct mthca_dev *dev =
1158
container_of(device, struct mthca_dev, ib_dev.dev);
1159
return sprintf(buf, "%x\n", dev->rev_id);
1162
static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1165
struct mthca_dev *dev =
1166
container_of(device, struct mthca_dev, ib_dev.dev);
1167
return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
1168
(int) (dev->fw_ver >> 16) & 0xffff,
1169
(int) dev->fw_ver & 0xffff);
1172
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1175
struct mthca_dev *dev =
1176
container_of(device, struct mthca_dev, ib_dev.dev);
1177
switch (dev->pdev->device) {
1178
case PCI_DEVICE_ID_MELLANOX_TAVOR:
1179
return sprintf(buf, "MT23108\n");
1180
case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
1181
return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
1182
case PCI_DEVICE_ID_MELLANOX_ARBEL:
1183
return sprintf(buf, "MT25208\n");
1184
case PCI_DEVICE_ID_MELLANOX_SINAI:
1185
case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
1186
return sprintf(buf, "MT25204\n");
1188
return sprintf(buf, "unknown\n");
1192
static ssize_t show_board(struct device *device, struct device_attribute *attr,
1195
struct mthca_dev *dev =
1196
container_of(device, struct mthca_dev, ib_dev.dev);
1197
return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1200
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1201
static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1202
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1203
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1205
static struct device_attribute *mthca_dev_attributes[] = {
1212
static int mthca_init_node_data(struct mthca_dev *dev)
1214
struct ib_smp *in_mad = NULL;
1215
struct ib_smp *out_mad = NULL;
1218
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1219
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1220
if (!in_mad || !out_mad)
1223
init_query_mad(in_mad);
1224
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1226
err = mthca_MAD_IFC(dev, 1, 1,
1227
1, NULL, NULL, in_mad, out_mad);
1231
memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1233
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1235
err = mthca_MAD_IFC(dev, 1, 1,
1236
1, NULL, NULL, in_mad, out_mad);
1240
if (mthca_is_memfree(dev))
1241
dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1242
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1250
int mthca_register_device(struct mthca_dev *dev)
1255
ret = mthca_init_node_data(dev);
1259
strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
1260
dev->ib_dev.owner = THIS_MODULE;
1262
dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
1263
dev->ib_dev.uverbs_cmd_mask =
1264
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1265
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1266
(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1267
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1268
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1269
(1ull << IB_USER_VERBS_CMD_REG_MR) |
1270
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1271
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1272
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1273
(1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1274
(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1275
(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1276
(1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1277
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1278
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1279
(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1280
(1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1281
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1282
dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1283
dev->ib_dev.num_comp_vectors = 1;
1284
dev->ib_dev.dma_device = &dev->pdev->dev;
1285
dev->ib_dev.query_device = mthca_query_device;
1286
dev->ib_dev.query_port = mthca_query_port;
1287
dev->ib_dev.modify_device = mthca_modify_device;
1288
dev->ib_dev.modify_port = mthca_modify_port;
1289
dev->ib_dev.query_pkey = mthca_query_pkey;
1290
dev->ib_dev.query_gid = mthca_query_gid;
1291
dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext;
1292
dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
1293
dev->ib_dev.mmap = mthca_mmap_uar;
1294
dev->ib_dev.alloc_pd = mthca_alloc_pd;
1295
dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
1296
dev->ib_dev.create_ah = mthca_ah_create;
1297
dev->ib_dev.query_ah = mthca_ah_query;
1298
dev->ib_dev.destroy_ah = mthca_ah_destroy;
1300
if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1301
dev->ib_dev.create_srq = mthca_create_srq;
1302
dev->ib_dev.modify_srq = mthca_modify_srq;
1303
dev->ib_dev.query_srq = mthca_query_srq;
1304
dev->ib_dev.destroy_srq = mthca_destroy_srq;
1305
dev->ib_dev.uverbs_cmd_mask |=
1306
(1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1307
(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1308
(1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1309
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1311
if (mthca_is_memfree(dev))
1312
dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
1314
dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
1317
dev->ib_dev.create_qp = mthca_create_qp;
1318
dev->ib_dev.modify_qp = mthca_modify_qp;
1319
dev->ib_dev.query_qp = mthca_query_qp;
1320
dev->ib_dev.destroy_qp = mthca_destroy_qp;
1321
dev->ib_dev.create_cq = mthca_create_cq;
1322
dev->ib_dev.resize_cq = mthca_resize_cq;
1323
dev->ib_dev.destroy_cq = mthca_destroy_cq;
1324
dev->ib_dev.poll_cq = mthca_poll_cq;
1325
dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
1326
dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr;
1327
dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
1328
dev->ib_dev.dereg_mr = mthca_dereg_mr;
1330
if (dev->mthca_flags & MTHCA_FLAG_FMR) {
1331
dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
1332
dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
1333
dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
1334
if (mthca_is_memfree(dev))
1335
dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
1337
dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
1340
dev->ib_dev.attach_mcast = mthca_multicast_attach;
1341
dev->ib_dev.detach_mcast = mthca_multicast_detach;
1342
dev->ib_dev.process_mad = mthca_process_mad;
1344
if (mthca_is_memfree(dev)) {
1345
dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
1346
dev->ib_dev.post_send = mthca_arbel_post_send;
1347
dev->ib_dev.post_recv = mthca_arbel_post_receive;
1349
dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
1350
dev->ib_dev.post_send = mthca_tavor_post_send;
1351
dev->ib_dev.post_recv = mthca_tavor_post_receive;
1354
mutex_init(&dev->cap_mask_mutex);
1356
ret = ib_register_device(&dev->ib_dev, NULL);
1360
for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) {
1361
ret = device_create_file(&dev->ib_dev.dev,
1362
mthca_dev_attributes[i]);
1364
ib_unregister_device(&dev->ib_dev);
1369
mthca_start_catas_poll(dev);
1374
void mthca_unregister_device(struct mthca_dev *dev)
1376
mthca_stop_catas_poll(dev);
1377
ib_unregister_device(&dev->ib_dev);