~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise

« back to all changes in this revision

Viewing changes to drivers/infiniband/ulp/srp/ib_srp.c

  • Committer: Bazaar Package Importer
  • Author(s): Paolo Pisati
  • Date: 2011-06-29 15:23:51 UTC
  • mfrom: (26.1.1 natty-proposed)
  • Revision ID: james.westby@ubuntu.com-20110629152351-xs96tm303d95rpbk
Tags: 3.0.0-1200.2
* Rebased against 3.0.0-6.7
* BSP from TI based on 3.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
59
59
                   "v" DRV_VERSION " (" DRV_RELDATE ")");
60
60
MODULE_LICENSE("Dual BSD/GPL");
61
61
 
62
 
static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE;
63
 
static int srp_max_iu_len;
64
 
 
65
 
module_param(srp_sg_tablesize, int, 0444);
66
 
MODULE_PARM_DESC(srp_sg_tablesize,
67
 
                 "Max number of gather/scatter entries per I/O (default is 12, max 255)");
68
 
 
 
62
static unsigned int srp_sg_tablesize;
 
63
static unsigned int cmd_sg_entries;
 
64
static unsigned int indirect_sg_entries;
 
65
static bool allow_ext_sg;
69
66
static int topspin_workarounds = 1;
70
67
 
 
68
module_param(srp_sg_tablesize, uint, 0444);
 
69
MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
 
70
 
 
71
module_param(cmd_sg_entries, uint, 0444);
 
72
MODULE_PARM_DESC(cmd_sg_entries,
 
73
                 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
 
74
 
 
75
module_param(indirect_sg_entries, uint, 0444);
 
76
MODULE_PARM_DESC(indirect_sg_entries,
 
77
                 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
 
78
 
 
79
module_param(allow_ext_sg, bool, 0444);
 
80
MODULE_PARM_DESC(allow_ext_sg,
 
81
                  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
 
82
 
71
83
module_param(topspin_workarounds, int, 0444);
72
84
MODULE_PARM_DESC(topspin_workarounds,
73
85
                 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
74
86
 
75
 
static int mellanox_workarounds = 1;
76
 
 
77
 
module_param(mellanox_workarounds, int, 0444);
78
 
MODULE_PARM_DESC(mellanox_workarounds,
79
 
                 "Enable workarounds for Mellanox SRP target bugs if != 0");
80
 
 
81
87
static void srp_add_one(struct ib_device *device);
82
88
static void srp_remove_one(struct ib_device *device);
83
89
static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
114
120
                 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
115
121
}
116
122
 
117
 
static int srp_target_is_mellanox(struct srp_target_port *target)
118
 
{
119
 
        static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 };
120
 
 
121
 
        return mellanox_workarounds &&
122
 
                !memcmp(&target->ioc_guid, mellanox_oui, sizeof mellanox_oui);
123
 
}
124
 
 
125
123
static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
126
124
                                   gfp_t gfp_mask,
127
125
                                   enum dma_data_direction direction)
378
376
 
379
377
        req->priv.opcode        = SRP_LOGIN_REQ;
380
378
        req->priv.tag           = 0;
381
 
        req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len);
 
379
        req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
382
380
        req->priv.req_buf_fmt   = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
383
381
                                              SRP_BUF_FORMAT_INDIRECT);
384
382
        /*
456
454
        return changed;
457
455
}
458
456
 
 
457
static void srp_free_req_data(struct srp_target_port *target)
 
458
{
 
459
        struct ib_device *ibdev = target->srp_host->srp_dev->dev;
 
460
        struct srp_request *req;
 
461
        int i;
 
462
 
 
463
        for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
 
464
                kfree(req->fmr_list);
 
465
                kfree(req->map_page);
 
466
                if (req->indirect_dma_addr) {
 
467
                        ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
 
468
                                            target->indirect_size,
 
469
                                            DMA_TO_DEVICE);
 
470
                }
 
471
                kfree(req->indirect_desc);
 
472
        }
 
473
}
 
474
 
459
475
static void srp_remove_work(struct work_struct *work)
460
476
{
461
477
        struct srp_target_port *target =
472
488
        scsi_remove_host(target->scsi_host);
473
489
        ib_destroy_cm_id(target->cm_id);
474
490
        srp_free_target_ib(target);
 
491
        srp_free_req_data(target);
475
492
        scsi_host_put(target->scsi_host);
476
493
}
477
494
 
535
552
                           struct srp_target_port *target,
536
553
                           struct srp_request *req)
537
554
{
 
555
        struct ib_device *ibdev = target->srp_host->srp_dev->dev;
 
556
        struct ib_pool_fmr **pfmr;
 
557
 
538
558
        if (!scsi_sglist(scmnd) ||
539
559
            (scmnd->sc_data_direction != DMA_TO_DEVICE &&
540
560
             scmnd->sc_data_direction != DMA_FROM_DEVICE))
541
561
                return;
542
562
 
543
 
        if (req->fmr) {
544
 
                ib_fmr_pool_unmap(req->fmr);
545
 
                req->fmr = NULL;
546
 
        }
 
563
        pfmr = req->fmr_list;
 
564
        while (req->nfmr--)
 
565
                ib_fmr_pool_unmap(*pfmr++);
547
566
 
548
 
        ib_dma_unmap_sg(target->srp_host->srp_dev->dev, scsi_sglist(scmnd),
549
 
                        scsi_sg_count(scmnd), scmnd->sc_data_direction);
 
567
        ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
 
568
                        scmnd->sc_data_direction);
550
569
}
551
570
 
552
571
static void srp_remove_req(struct srp_target_port *target,
645
664
        return ret;
646
665
}
647
666
 
648
 
static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
649
 
                       int sg_cnt, struct srp_request *req,
650
 
                       struct srp_direct_buf *buf)
651
 
{
 
667
static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
 
668
                         unsigned int dma_len, u32 rkey)
 
669
{
 
670
        struct srp_direct_buf *desc = state->desc;
 
671
 
 
672
        desc->va = cpu_to_be64(dma_addr);
 
673
        desc->key = cpu_to_be32(rkey);
 
674
        desc->len = cpu_to_be32(dma_len);
 
675
 
 
676
        state->total_len += dma_len;
 
677
        state->desc++;
 
678
        state->ndesc++;
 
679
}
 
680
 
 
681
static int srp_map_finish_fmr(struct srp_map_state *state,
 
682
                              struct srp_target_port *target)
 
683
{
 
684
        struct srp_device *dev = target->srp_host->srp_dev;
 
685
        struct ib_pool_fmr *fmr;
652
686
        u64 io_addr = 0;
653
 
        u64 *dma_pages;
654
 
        u32 len;
655
 
        int page_cnt;
656
 
        int i, j;
657
 
        int ret;
 
687
 
 
688
        if (!state->npages)
 
689
                return 0;
 
690
 
 
691
        if (state->npages == 1) {
 
692
                srp_map_desc(state, state->base_dma_addr, state->fmr_len,
 
693
                             target->rkey);
 
694
                state->npages = state->fmr_len = 0;
 
695
                return 0;
 
696
        }
 
697
 
 
698
        fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
 
699
                                   state->npages, io_addr);
 
700
        if (IS_ERR(fmr))
 
701
                return PTR_ERR(fmr);
 
702
 
 
703
        *state->next_fmr++ = fmr;
 
704
        state->nfmr++;
 
705
 
 
706
        srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
 
707
        state->npages = state->fmr_len = 0;
 
708
        return 0;
 
709
}
 
710
 
 
711
static void srp_map_update_start(struct srp_map_state *state,
 
712
                                 struct scatterlist *sg, int sg_index,
 
713
                                 dma_addr_t dma_addr)
 
714
{
 
715
        state->unmapped_sg = sg;
 
716
        state->unmapped_index = sg_index;
 
717
        state->unmapped_addr = dma_addr;
 
718
}
 
719
 
 
720
static int srp_map_sg_entry(struct srp_map_state *state,
 
721
                            struct srp_target_port *target,
 
722
                            struct scatterlist *sg, int sg_index,
 
723
                            int use_fmr)
 
724
{
658
725
        struct srp_device *dev = target->srp_host->srp_dev;
659
726
        struct ib_device *ibdev = dev->dev;
660
 
        struct scatterlist *sg;
661
 
 
662
 
        if (!dev->fmr_pool)
663
 
                return -ENODEV;
664
 
 
665
 
        if (srp_target_is_mellanox(target) &&
666
 
            (ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask))
667
 
                return -EINVAL;
668
 
 
669
 
        len = page_cnt = 0;
670
 
        scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {
671
 
                unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
672
 
 
673
 
                if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) {
674
 
                        if (i > 0)
675
 
                                return -EINVAL;
676
 
                        else
677
 
                                ++page_cnt;
678
 
                }
679
 
                if ((ib_sg_dma_address(ibdev, sg) + dma_len) &
680
 
                    ~dev->fmr_page_mask) {
681
 
                        if (i < sg_cnt - 1)
682
 
                                return -EINVAL;
683
 
                        else
684
 
                                ++page_cnt;
685
 
                }
686
 
 
687
 
                len += dma_len;
688
 
        }
689
 
 
690
 
        page_cnt += len >> dev->fmr_page_shift;
691
 
        if (page_cnt > SRP_FMR_SIZE)
692
 
                return -ENOMEM;
693
 
 
694
 
        dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC);
695
 
        if (!dma_pages)
696
 
                return -ENOMEM;
697
 
 
698
 
        page_cnt = 0;
699
 
        scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {
700
 
                unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
701
 
 
702
 
                for (j = 0; j < dma_len; j += dev->fmr_page_size)
703
 
                        dma_pages[page_cnt++] =
704
 
                                (ib_sg_dma_address(ibdev, sg) &
705
 
                                 dev->fmr_page_mask) + j;
706
 
        }
707
 
 
708
 
        req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
709
 
                                        dma_pages, page_cnt, io_addr);
710
 
        if (IS_ERR(req->fmr)) {
711
 
                ret = PTR_ERR(req->fmr);
712
 
                req->fmr = NULL;
713
 
                goto out;
714
 
        }
715
 
 
716
 
        buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) &
717
 
                               ~dev->fmr_page_mask);
718
 
        buf->key = cpu_to_be32(req->fmr->fmr->rkey);
719
 
        buf->len = cpu_to_be32(len);
720
 
 
 
727
        dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
 
728
        unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
 
729
        unsigned int len;
 
730
        int ret;
 
731
 
 
732
        if (!dma_len)
 
733
                return 0;
 
734
 
 
735
        if (use_fmr == SRP_MAP_NO_FMR) {
 
736
                /* Once we're in direct map mode for a request, we don't
 
737
                 * go back to FMR mode, so no need to update anything
 
738
                 * other than the descriptor.
 
739
                 */
 
740
                srp_map_desc(state, dma_addr, dma_len, target->rkey);
 
741
                return 0;
 
742
        }
 
743
 
 
744
        /* If we start at an offset into the FMR page, don't merge into
 
745
         * the current FMR. Finish it out, and use the kernel's MR for this
 
746
         * sg entry. This is to avoid potential bugs on some SRP targets
 
747
         * that were never quite defined, but went away when the initiator
 
748
         * avoided using FMR on such page fragments.
 
749
         */
 
750
        if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
 
751
                ret = srp_map_finish_fmr(state, target);
 
752
                if (ret)
 
753
                        return ret;
 
754
 
 
755
                srp_map_desc(state, dma_addr, dma_len, target->rkey);
 
756
                srp_map_update_start(state, NULL, 0, 0);
 
757
                return 0;
 
758
        }
 
759
 
 
760
        /* If this is the first sg to go into the FMR, save our position.
 
761
         * We need to know the first unmapped entry, its index, and the
 
762
         * first unmapped address within that entry to be able to restart
 
763
         * mapping after an error.
 
764
         */
 
765
        if (!state->unmapped_sg)
 
766
                srp_map_update_start(state, sg, sg_index, dma_addr);
 
767
 
 
768
        while (dma_len) {
 
769
                if (state->npages == SRP_FMR_SIZE) {
 
770
                        ret = srp_map_finish_fmr(state, target);
 
771
                        if (ret)
 
772
                                return ret;
 
773
 
 
774
                        srp_map_update_start(state, sg, sg_index, dma_addr);
 
775
                }
 
776
 
 
777
                len = min_t(unsigned int, dma_len, dev->fmr_page_size);
 
778
 
 
779
                if (!state->npages)
 
780
                        state->base_dma_addr = dma_addr;
 
781
                state->pages[state->npages++] = dma_addr;
 
782
                state->fmr_len += len;
 
783
                dma_addr += len;
 
784
                dma_len -= len;
 
785
        }
 
786
 
 
787
        /* If the last entry of the FMR wasn't a full page, then we need to
 
788
         * close it out and start a new one -- we can only merge at page
 
789
         * boundries.
 
790
         */
721
791
        ret = 0;
722
 
 
723
 
out:
724
 
        kfree(dma_pages);
725
 
 
 
792
        if (len != dev->fmr_page_size) {
 
793
                ret = srp_map_finish_fmr(state, target);
 
794
                if (!ret)
 
795
                        srp_map_update_start(state, NULL, 0, 0);
 
796
        }
726
797
        return ret;
727
798
}
728
799
 
729
800
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
730
801
                        struct srp_request *req)
731
802
{
732
 
        struct scatterlist *scat;
 
803
        struct scatterlist *scat, *sg;
733
804
        struct srp_cmd *cmd = req->cmd->buf;
734
 
        int len, nents, count;
735
 
        u8 fmt = SRP_DATA_DESC_DIRECT;
 
805
        int i, len, nents, count, use_fmr;
736
806
        struct srp_device *dev;
737
807
        struct ib_device *ibdev;
 
808
        struct srp_map_state state;
 
809
        struct srp_indirect_buf *indirect_hdr;
 
810
        u32 table_len;
 
811
        u8 fmt;
738
812
 
739
813
        if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
740
814
                return sizeof (struct srp_cmd);
754
828
        ibdev = dev->dev;
755
829
 
756
830
        count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
 
831
        if (unlikely(count == 0))
 
832
                return -EIO;
757
833
 
758
834
        fmt = SRP_DATA_DESC_DIRECT;
759
835
        len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
770
846
                buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
771
847
                buf->key = cpu_to_be32(target->rkey);
772
848
                buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
773
 
        } else if (srp_map_fmr(target, scat, count, req,
774
 
                               (void *) cmd->add_data)) {
775
 
                /*
776
 
                 * FMR mapping failed, and the scatterlist has more
777
 
                 * than one entry.  Generate an indirect memory
778
 
                 * descriptor.
 
849
 
 
850
                req->nfmr = 0;
 
851
                goto map_complete;
 
852
        }
 
853
 
 
854
        /* We have more than one scatter/gather entry, so build our indirect
 
855
         * descriptor table, trying to merge as many entries with FMR as we
 
856
         * can.
 
857
         */
 
858
        indirect_hdr = (void *) cmd->add_data;
 
859
 
 
860
        ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
 
861
                                   target->indirect_size, DMA_TO_DEVICE);
 
862
 
 
863
        memset(&state, 0, sizeof(state));
 
864
        state.desc      = req->indirect_desc;
 
865
        state.pages     = req->map_page;
 
866
        state.next_fmr  = req->fmr_list;
 
867
 
 
868
        use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
 
869
 
 
870
        for_each_sg(scat, sg, count, i) {
 
871
                if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
 
872
                        /* FMR mapping failed, so backtrack to the first
 
873
                         * unmapped entry and continue on without using FMR.
 
874
                         */
 
875
                        dma_addr_t dma_addr;
 
876
                        unsigned int dma_len;
 
877
 
 
878
backtrack:
 
879
                        sg = state.unmapped_sg;
 
880
                        i = state.unmapped_index;
 
881
 
 
882
                        dma_addr = ib_sg_dma_address(ibdev, sg);
 
883
                        dma_len = ib_sg_dma_len(ibdev, sg);
 
884
                        dma_len -= (state.unmapped_addr - dma_addr);
 
885
                        dma_addr = state.unmapped_addr;
 
886
                        use_fmr = SRP_MAP_NO_FMR;
 
887
                        srp_map_desc(&state, dma_addr, dma_len, target->rkey);
 
888
                }
 
889
        }
 
890
 
 
891
        if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
 
892
                goto backtrack;
 
893
 
 
894
        /* We've mapped the request, now pull as much of the indirect
 
895
         * descriptor table as we can into the command buffer. If this
 
896
         * target is not using an external indirect table, we are
 
897
         * guaranteed to fit into the command, as the SCSI layer won't
 
898
         * give us more S/G entries than we allow.
 
899
         */
 
900
        req->nfmr = state.nfmr;
 
901
        if (state.ndesc == 1) {
 
902
                /* FMR mapping was able to collapse this to one entry,
 
903
                 * so use a direct descriptor.
779
904
                 */
780
 
                struct srp_indirect_buf *buf = (void *) cmd->add_data;
781
 
                struct scatterlist *sg;
782
 
                u32 datalen = 0;
783
 
                int i;
784
 
 
785
 
                fmt = SRP_DATA_DESC_INDIRECT;
786
 
                len = sizeof (struct srp_cmd) +
787
 
                        sizeof (struct srp_indirect_buf) +
788
 
                        count * sizeof (struct srp_direct_buf);
789
 
 
790
 
                scsi_for_each_sg(scmnd, sg, count, i) {
791
 
                        unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
792
 
 
793
 
                        buf->desc_list[i].va  =
794
 
                                cpu_to_be64(ib_sg_dma_address(ibdev, sg));
795
 
                        buf->desc_list[i].key =
796
 
                                cpu_to_be32(target->rkey);
797
 
                        buf->desc_list[i].len = cpu_to_be32(dma_len);
798
 
                        datalen += dma_len;
799
 
                }
800
 
 
801
 
                if (scmnd->sc_data_direction == DMA_TO_DEVICE)
802
 
                        cmd->data_out_desc_cnt = count;
803
 
                else
804
 
                        cmd->data_in_desc_cnt = count;
805
 
 
806
 
                buf->table_desc.va  =
807
 
                        cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
808
 
                buf->table_desc.key =
809
 
                        cpu_to_be32(target->rkey);
810
 
                buf->table_desc.len =
811
 
                        cpu_to_be32(count * sizeof (struct srp_direct_buf));
812
 
 
813
 
                buf->len = cpu_to_be32(datalen);
814
 
        }
815
 
 
 
905
                struct srp_direct_buf *buf = (void *) cmd->add_data;
 
906
 
 
907
                *buf = req->indirect_desc[0];
 
908
                goto map_complete;
 
909
        }
 
910
 
 
911
        if (unlikely(target->cmd_sg_cnt < state.ndesc &&
 
912
                                                !target->allow_ext_sg)) {
 
913
                shost_printk(KERN_ERR, target->scsi_host,
 
914
                             "Could not fit S/G list into SRP_CMD\n");
 
915
                return -EIO;
 
916
        }
 
917
 
 
918
        count = min(state.ndesc, target->cmd_sg_cnt);
 
919
        table_len = state.ndesc * sizeof (struct srp_direct_buf);
 
920
 
 
921
        fmt = SRP_DATA_DESC_INDIRECT;
 
922
        len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
 
923
        len += count * sizeof (struct srp_direct_buf);
 
924
 
 
925
        memcpy(indirect_hdr->desc_list, req->indirect_desc,
 
926
               count * sizeof (struct srp_direct_buf));
 
927
 
 
928
        indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
 
929
        indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
 
930
        indirect_hdr->table_desc.len = cpu_to_be32(table_len);
 
931
        indirect_hdr->len = cpu_to_be32(state.total_len);
 
932
 
 
933
        if (scmnd->sc_data_direction == DMA_TO_DEVICE)
 
934
                cmd->data_out_desc_cnt = count;
 
935
        else
 
936
                cmd->data_in_desc_cnt = count;
 
937
 
 
938
        ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
 
939
                                      DMA_TO_DEVICE);
 
940
 
 
941
map_complete:
816
942
        if (scmnd->sc_data_direction == DMA_TO_DEVICE)
817
943
                cmd->buf_fmt = fmt << 4;
818
944
        else
1021
1147
static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1022
1148
{
1023
1149
        struct ib_device *dev = target->srp_host->srp_dev->dev;
1024
 
        struct srp_iu *iu = (struct srp_iu *) wc->wr_id;
 
1150
        struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1025
1151
        int res;
1026
1152
        u8 opcode;
1027
1153
 
1105
1231
                        break;
1106
1232
                }
1107
1233
 
1108
 
                iu = (struct srp_iu *) wc.wr_id;
 
1234
                iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1109
1235
                list_add(&iu->list, &target->free_tx);
1110
1236
        }
1111
1237
}
1140
1266
        spin_unlock_irqrestore(&target->lock, flags);
1141
1267
 
1142
1268
        dev = target->srp_host->srp_dev->dev;
1143
 
        ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
 
1269
        ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1144
1270
                                   DMA_TO_DEVICE);
1145
1271
 
1146
1272
        scmnd->result        = 0;
1164
1290
                goto err_iu;
1165
1291
        }
1166
1292
 
1167
 
        ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
 
1293
        ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
1168
1294
                                      DMA_TO_DEVICE);
1169
1295
 
1170
1296
        if (srp_post_send(target, iu, len)) {
1204
1330
 
1205
1331
        for (i = 0; i < SRP_SQ_SIZE; ++i) {
1206
1332
                target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1207
 
                                                  srp_max_iu_len,
 
1333
                                                  target->max_iu_len,
1208
1334
                                                  GFP_KERNEL, DMA_TO_DEVICE);
1209
1335
                if (!target->tx_ring[i])
1210
1336
                        goto err;
1228
1354
        return -ENOMEM;
1229
1355
}
1230
1356
 
 
1357
static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
 
1358
                               struct srp_login_rsp *lrsp,
 
1359
                               struct srp_target_port *target)
 
1360
{
 
1361
        struct ib_qp_attr *qp_attr = NULL;
 
1362
        int attr_mask = 0;
 
1363
        int ret;
 
1364
        int i;
 
1365
 
 
1366
        if (lrsp->opcode == SRP_LOGIN_RSP) {
 
1367
                target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
 
1368
                target->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
 
1369
 
 
1370
                /*
 
1371
                 * Reserve credits for task management so we don't
 
1372
                 * bounce requests back to the SCSI mid-layer.
 
1373
                 */
 
1374
                target->scsi_host->can_queue
 
1375
                        = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
 
1376
                              target->scsi_host->can_queue);
 
1377
        } else {
 
1378
                shost_printk(KERN_WARNING, target->scsi_host,
 
1379
                             PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
 
1380
                ret = -ECONNRESET;
 
1381
                goto error;
 
1382
        }
 
1383
 
 
1384
        if (!target->rx_ring[0]) {
 
1385
                ret = srp_alloc_iu_bufs(target);
 
1386
                if (ret)
 
1387
                        goto error;
 
1388
        }
 
1389
 
 
1390
        ret = -ENOMEM;
 
1391
        qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
 
1392
        if (!qp_attr)
 
1393
                goto error;
 
1394
 
 
1395
        qp_attr->qp_state = IB_QPS_RTR;
 
1396
        ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
 
1397
        if (ret)
 
1398
                goto error_free;
 
1399
 
 
1400
        ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
 
1401
        if (ret)
 
1402
                goto error_free;
 
1403
 
 
1404
        for (i = 0; i < SRP_RQ_SIZE; i++) {
 
1405
                struct srp_iu *iu = target->rx_ring[i];
 
1406
                ret = srp_post_recv(target, iu);
 
1407
                if (ret)
 
1408
                        goto error_free;
 
1409
        }
 
1410
 
 
1411
        qp_attr->qp_state = IB_QPS_RTS;
 
1412
        ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
 
1413
        if (ret)
 
1414
                goto error_free;
 
1415
 
 
1416
        ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
 
1417
        if (ret)
 
1418
                goto error_free;
 
1419
 
 
1420
        ret = ib_send_cm_rtu(cm_id, NULL, 0);
 
1421
 
 
1422
error_free:
 
1423
        kfree(qp_attr);
 
1424
 
 
1425
error:
 
1426
        target->status = ret;
 
1427
}
 
1428
 
1231
1429
static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1232
1430
                               struct ib_cm_event *event,
1233
1431
                               struct srp_target_port *target)
1311
1509
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1312
1510
{
1313
1511
        struct srp_target_port *target = cm_id->context;
1314
 
        struct ib_qp_attr *qp_attr = NULL;
1315
 
        int attr_mask = 0;
1316
1512
        int comp = 0;
1317
 
        int opcode = 0;
1318
 
        int i;
1319
1513
 
1320
1514
        switch (event->event) {
1321
1515
        case IB_CM_REQ_ERROR:
1327
1521
 
1328
1522
        case IB_CM_REP_RECEIVED:
1329
1523
                comp = 1;
1330
 
                opcode = *(u8 *) event->private_data;
1331
 
 
1332
 
                if (opcode == SRP_LOGIN_RSP) {
1333
 
                        struct srp_login_rsp *rsp = event->private_data;
1334
 
 
1335
 
                        target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
1336
 
                        target->req_lim       = be32_to_cpu(rsp->req_lim_delta);
1337
 
 
1338
 
                        /*
1339
 
                         * Reserve credits for task management so we don't
1340
 
                         * bounce requests back to the SCSI mid-layer.
1341
 
                         */
1342
 
                        target->scsi_host->can_queue
1343
 
                                = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1344
 
                                      target->scsi_host->can_queue);
1345
 
                } else {
1346
 
                        shost_printk(KERN_WARNING, target->scsi_host,
1347
 
                                    PFX "Unhandled RSP opcode %#x\n", opcode);
1348
 
                        target->status = -ECONNRESET;
1349
 
                        break;
1350
 
                }
1351
 
 
1352
 
                if (!target->rx_ring[0]) {
1353
 
                        target->status = srp_alloc_iu_bufs(target);
1354
 
                        if (target->status)
1355
 
                                break;
1356
 
                }
1357
 
 
1358
 
                qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1359
 
                if (!qp_attr) {
1360
 
                        target->status = -ENOMEM;
1361
 
                        break;
1362
 
                }
1363
 
 
1364
 
                qp_attr->qp_state = IB_QPS_RTR;
1365
 
                target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1366
 
                if (target->status)
1367
 
                        break;
1368
 
 
1369
 
                target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1370
 
                if (target->status)
1371
 
                        break;
1372
 
 
1373
 
                for (i = 0; i < SRP_RQ_SIZE; i++) {
1374
 
                        struct srp_iu *iu = target->rx_ring[i];
1375
 
                        target->status = srp_post_recv(target, iu);
1376
 
                        if (target->status)
1377
 
                                break;
1378
 
                }
1379
 
                if (target->status)
1380
 
                        break;
1381
 
 
1382
 
                qp_attr->qp_state = IB_QPS_RTS;
1383
 
                target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1384
 
                if (target->status)
1385
 
                        break;
1386
 
 
1387
 
                target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1388
 
                if (target->status)
1389
 
                        break;
1390
 
 
1391
 
                target->status = ib_send_cm_rtu(cm_id, NULL, 0);
1392
 
                if (target->status)
1393
 
                        break;
1394
 
 
 
1524
                srp_cm_rep_handler(cm_id, event->private_data, target);
1395
1525
                break;
1396
1526
 
1397
1527
        case IB_CM_REJ_RECEIVED:
1431
1561
        if (comp)
1432
1562
                complete(&target->done);
1433
1563
 
1434
 
        kfree(qp_attr);
1435
 
 
1436
1564
        return 0;
1437
1565
}
1438
1566
 
1658
1786
        return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
1659
1787
}
1660
1788
 
 
1789
static ssize_t show_cmd_sg_entries(struct device *dev,
 
1790
                                   struct device_attribute *attr, char *buf)
 
1791
{
 
1792
        struct srp_target_port *target = host_to_target(class_to_shost(dev));
 
1793
 
 
1794
        return sprintf(buf, "%u\n", target->cmd_sg_cnt);
 
1795
}
 
1796
 
 
1797
static ssize_t show_allow_ext_sg(struct device *dev,
 
1798
                                 struct device_attribute *attr, char *buf)
 
1799
{
 
1800
        struct srp_target_port *target = host_to_target(class_to_shost(dev));
 
1801
 
 
1802
        return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
 
1803
}
 
1804
 
1661
1805
static DEVICE_ATTR(id_ext,          S_IRUGO, show_id_ext,          NULL);
1662
1806
static DEVICE_ATTR(ioc_guid,        S_IRUGO, show_ioc_guid,        NULL);
1663
1807
static DEVICE_ATTR(service_id,      S_IRUGO, show_service_id,      NULL);
1668
1812
static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,    NULL);
1669
1813
static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
1670
1814
static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
 
1815
static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
 
1816
static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
1671
1817
 
1672
1818
static struct device_attribute *srp_host_attrs[] = {
1673
1819
        &dev_attr_id_ext,
1680
1826
        &dev_attr_zero_req_lim,
1681
1827
        &dev_attr_local_ib_port,
1682
1828
        &dev_attr_local_ib_device,
 
1829
        &dev_attr_cmd_sg_entries,
 
1830
        &dev_attr_allow_ext_sg,
1683
1831
        NULL
1684
1832
};
1685
1833
 
1692
1840
        .eh_abort_handler               = srp_abort,
1693
1841
        .eh_device_reset_handler        = srp_reset_device,
1694
1842
        .eh_host_reset_handler          = srp_reset_host,
 
1843
        .sg_tablesize                   = SRP_DEF_SG_TABLESIZE,
1695
1844
        .can_queue                      = SRP_CMD_SQ_SIZE,
1696
1845
        .this_id                        = -1,
1697
1846
        .cmd_per_lun                    = SRP_CMD_SQ_SIZE,
1763
1912
        SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
1764
1913
        SRP_OPT_IO_CLASS        = 1 << 7,
1765
1914
        SRP_OPT_INITIATOR_EXT   = 1 << 8,
 
1915
        SRP_OPT_CMD_SG_ENTRIES  = 1 << 9,
 
1916
        SRP_OPT_ALLOW_EXT_SG    = 1 << 10,
 
1917
        SRP_OPT_SG_TABLESIZE    = 1 << 11,
1766
1918
        SRP_OPT_ALL             = (SRP_OPT_ID_EXT       |
1767
1919
                                   SRP_OPT_IOC_GUID     |
1768
1920
                                   SRP_OPT_DGID         |
1780
1932
        { SRP_OPT_MAX_CMD_PER_LUN,      "max_cmd_per_lun=%d"    },
1781
1933
        { SRP_OPT_IO_CLASS,             "io_class=%x"           },
1782
1934
        { SRP_OPT_INITIATOR_EXT,        "initiator_ext=%s"      },
 
1935
        { SRP_OPT_CMD_SG_ENTRIES,       "cmd_sg_entries=%u"     },
 
1936
        { SRP_OPT_ALLOW_EXT_SG,         "allow_ext_sg=%u"       },
 
1937
        { SRP_OPT_SG_TABLESIZE,         "sg_tablesize=%u"       },
1783
1938
        { SRP_OPT_ERR,                  NULL                    }
1784
1939
};
1785
1940
 
1907
2062
                        kfree(p);
1908
2063
                        break;
1909
2064
 
 
2065
                case SRP_OPT_CMD_SG_ENTRIES:
 
2066
                        if (match_int(args, &token) || token < 1 || token > 255) {
 
2067
                                printk(KERN_WARNING PFX "bad max cmd_sg_entries parameter '%s'\n", p);
 
2068
                                goto out;
 
2069
                        }
 
2070
                        target->cmd_sg_cnt = token;
 
2071
                        break;
 
2072
 
 
2073
                case SRP_OPT_ALLOW_EXT_SG:
 
2074
                        if (match_int(args, &token)) {
 
2075
                                printk(KERN_WARNING PFX "bad allow_ext_sg parameter '%s'\n", p);
 
2076
                                goto out;
 
2077
                        }
 
2078
                        target->allow_ext_sg = !!token;
 
2079
                        break;
 
2080
 
 
2081
                case SRP_OPT_SG_TABLESIZE:
 
2082
                        if (match_int(args, &token) || token < 1 ||
 
2083
                                        token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
 
2084
                                printk(KERN_WARNING PFX "bad max sg_tablesize parameter '%s'\n", p);
 
2085
                                goto out;
 
2086
                        }
 
2087
                        target->sg_tablesize = token;
 
2088
                        break;
 
2089
 
1910
2090
                default:
1911
2091
                        printk(KERN_WARNING PFX "unknown parameter or missing value "
1912
2092
                               "'%s' in target creation request\n", p);
1937
2117
                container_of(dev, struct srp_host, dev);
1938
2118
        struct Scsi_Host *target_host;
1939
2119
        struct srp_target_port *target;
1940
 
        int ret;
1941
 
        int i;
 
2120
        struct ib_device *ibdev = host->srp_dev->dev;
 
2121
        dma_addr_t dma_addr;
 
2122
        int i, ret;
1942
2123
 
1943
2124
        target_host = scsi_host_alloc(&srp_template,
1944
2125
                                      sizeof (struct srp_target_port));
1945
2126
        if (!target_host)
1946
2127
                return -ENOMEM;
1947
2128
 
1948
 
        target_host->transportt = ib_srp_transport_template;
 
2129
        target_host->transportt  = ib_srp_transport_template;
1949
2130
        target_host->max_lun     = SRP_MAX_LUN;
1950
2131
        target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
1951
2132
 
1952
2133
        target = host_to_target(target_host);
1953
2134
 
1954
 
        target->io_class   = SRP_REV16A_IB_IO_CLASS;
1955
 
        target->scsi_host  = target_host;
1956
 
        target->srp_host   = host;
1957
 
        target->lkey       = host->srp_dev->mr->lkey;
1958
 
        target->rkey       = host->srp_dev->mr->rkey;
 
2135
        target->io_class        = SRP_REV16A_IB_IO_CLASS;
 
2136
        target->scsi_host       = target_host;
 
2137
        target->srp_host        = host;
 
2138
        target->lkey            = host->srp_dev->mr->lkey;
 
2139
        target->rkey            = host->srp_dev->mr->rkey;
 
2140
        target->cmd_sg_cnt      = cmd_sg_entries;
 
2141
        target->sg_tablesize    = indirect_sg_entries ? : cmd_sg_entries;
 
2142
        target->allow_ext_sg    = allow_ext_sg;
 
2143
 
 
2144
        ret = srp_parse_options(buf, target);
 
2145
        if (ret)
 
2146
                goto err;
 
2147
 
 
2148
        if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
 
2149
                                target->cmd_sg_cnt < target->sg_tablesize) {
 
2150
                printk(KERN_WARNING PFX "No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
 
2151
                target->sg_tablesize = target->cmd_sg_cnt;
 
2152
        }
 
2153
 
 
2154
        target_host->sg_tablesize = target->sg_tablesize;
 
2155
        target->indirect_size = target->sg_tablesize *
 
2156
                                sizeof (struct srp_direct_buf);
 
2157
        target->max_iu_len = sizeof (struct srp_cmd) +
 
2158
                             sizeof (struct srp_indirect_buf) +
 
2159
                             target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
1959
2160
 
1960
2161
        spin_lock_init(&target->lock);
1961
2162
        INIT_LIST_HEAD(&target->free_tx);
1962
2163
        INIT_LIST_HEAD(&target->free_reqs);
1963
2164
        for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1964
 
                target->req_ring[i].index = i;
1965
 
                list_add_tail(&target->req_ring[i].list, &target->free_reqs);
 
2165
                struct srp_request *req = &target->req_ring[i];
 
2166
 
 
2167
                req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
 
2168
                                        GFP_KERNEL);
 
2169
                req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
 
2170
                                        GFP_KERNEL);
 
2171
                req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
 
2172
                if (!req->fmr_list || !req->map_page || !req->indirect_desc)
 
2173
                        goto err_free_mem;
 
2174
 
 
2175
                dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
 
2176
                                             target->indirect_size,
 
2177
                                             DMA_TO_DEVICE);
 
2178
                if (ib_dma_mapping_error(ibdev, dma_addr))
 
2179
                        goto err_free_mem;
 
2180
 
 
2181
                req->indirect_dma_addr = dma_addr;
 
2182
                req->index = i;
 
2183
                list_add_tail(&req->list, &target->free_reqs);
1966
2184
        }
1967
2185
 
1968
 
        ret = srp_parse_options(buf, target);
1969
 
        if (ret)
1970
 
                goto err;
1971
 
 
1972
 
        ib_query_gid(host->srp_dev->dev, host->port, 0, &target->path.sgid);
 
2186
        ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
1973
2187
 
1974
2188
        shost_printk(KERN_DEBUG, target->scsi_host, PFX
1975
2189
                     "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
1982
2196
 
1983
2197
        ret = srp_create_target_ib(target);
1984
2198
        if (ret)
1985
 
                goto err;
 
2199
                goto err_free_mem;
1986
2200
 
1987
2201
        ret = srp_new_cm_id(target);
1988
2202
        if (ret)
1989
 
                goto err_free;
 
2203
                goto err_free_ib;
1990
2204
 
1991
2205
        target->qp_in_error = 0;
1992
2206
        ret = srp_connect_target(target);
2008
2222
err_cm_id:
2009
2223
        ib_destroy_cm_id(target->cm_id);
2010
2224
 
2011
 
err_free:
 
2225
err_free_ib:
2012
2226
        srp_free_target_ib(target);
2013
2227
 
 
2228
err_free_mem:
 
2229
        srp_free_req_data(target);
 
2230
 
2014
2231
err:
2015
2232
        scsi_host_put(target_host);
2016
2233
 
2083
2300
        struct ib_device_attr *dev_attr;
2084
2301
        struct ib_fmr_pool_param fmr_param;
2085
2302
        struct srp_host *host;
2086
 
        int s, e, p;
 
2303
        int max_pages_per_fmr, fmr_page_shift, s, e, p;
2087
2304
 
2088
2305
        dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2089
2306
        if (!dev_attr)
2101
2318
 
2102
2319
        /*
2103
2320
         * Use the smallest page size supported by the HCA, down to a
2104
 
         * minimum of 512 bytes (which is the smallest sector that a
2105
 
         * SCSI command will ever carry).
 
2321
         * minimum of 4096 bytes. We're unlikely to build large sglists
 
2322
         * out of smaller entries.
2106
2323
         */
2107
 
        srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
2108
 
        srp_dev->fmr_page_size  = 1 << srp_dev->fmr_page_shift;
2109
 
        srp_dev->fmr_page_mask  = ~((u64) srp_dev->fmr_page_size - 1);
 
2324
        fmr_page_shift          = max(12, ffs(dev_attr->page_size_cap) - 1);
 
2325
        srp_dev->fmr_page_size  = 1 << fmr_page_shift;
 
2326
        srp_dev->fmr_page_mask  = ~((u64) srp_dev->fmr_page_size - 1);
 
2327
        srp_dev->fmr_max_size   = srp_dev->fmr_page_size * SRP_FMR_SIZE;
2110
2328
 
2111
2329
        INIT_LIST_HEAD(&srp_dev->dev_list);
2112
2330
 
2122
2340
        if (IS_ERR(srp_dev->mr))
2123
2341
                goto err_pd;
2124
2342
 
2125
 
        memset(&fmr_param, 0, sizeof fmr_param);
2126
 
        fmr_param.pool_size         = SRP_FMR_POOL_SIZE;
2127
 
        fmr_param.dirty_watermark   = SRP_FMR_DIRTY_SIZE;
2128
 
        fmr_param.cache             = 1;
2129
 
        fmr_param.max_pages_per_fmr = SRP_FMR_SIZE;
2130
 
        fmr_param.page_shift        = srp_dev->fmr_page_shift;
2131
 
        fmr_param.access            = (IB_ACCESS_LOCAL_WRITE |
2132
 
                                       IB_ACCESS_REMOTE_WRITE |
2133
 
                                       IB_ACCESS_REMOTE_READ);
2134
 
 
2135
 
        srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
 
2343
        for (max_pages_per_fmr = SRP_FMR_SIZE;
 
2344
                        max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
 
2345
                        max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
 
2346
                memset(&fmr_param, 0, sizeof fmr_param);
 
2347
                fmr_param.pool_size         = SRP_FMR_POOL_SIZE;
 
2348
                fmr_param.dirty_watermark   = SRP_FMR_DIRTY_SIZE;
 
2349
                fmr_param.cache             = 1;
 
2350
                fmr_param.max_pages_per_fmr = max_pages_per_fmr;
 
2351
                fmr_param.page_shift        = fmr_page_shift;
 
2352
                fmr_param.access            = (IB_ACCESS_LOCAL_WRITE |
 
2353
                                               IB_ACCESS_REMOTE_WRITE |
 
2354
                                               IB_ACCESS_REMOTE_READ);
 
2355
 
 
2356
                srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
 
2357
                if (!IS_ERR(srp_dev->fmr_pool))
 
2358
                        break;
 
2359
        }
 
2360
 
2136
2361
        if (IS_ERR(srp_dev->fmr_pool))
2137
2362
                srp_dev->fmr_pool = NULL;
2138
2363
 
2207
2432
                        srp_disconnect_target(target);
2208
2433
                        ib_destroy_cm_id(target->cm_id);
2209
2434
                        srp_free_target_ib(target);
 
2435
                        srp_free_req_data(target);
2210
2436
                        scsi_host_put(target->scsi_host);
2211
2437
                }
2212
2438
 
2230
2456
 
2231
2457
        BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2232
2458
 
2233
 
        if (srp_sg_tablesize > 255) {
2234
 
                printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
2235
 
                srp_sg_tablesize = 255;
 
2459
        if (srp_sg_tablesize) {
 
2460
                printk(KERN_WARNING PFX "srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
 
2461
                if (!cmd_sg_entries)
 
2462
                        cmd_sg_entries = srp_sg_tablesize;
 
2463
        }
 
2464
 
 
2465
        if (!cmd_sg_entries)
 
2466
                cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
 
2467
 
 
2468
        if (cmd_sg_entries > 255) {
 
2469
                printk(KERN_WARNING PFX "Clamping cmd_sg_entries to 255\n");
 
2470
                cmd_sg_entries = 255;
 
2471
        }
 
2472
 
 
2473
        if (!indirect_sg_entries)
 
2474
                indirect_sg_entries = cmd_sg_entries;
 
2475
        else if (indirect_sg_entries < cmd_sg_entries) {
 
2476
                printk(KERN_WARNING PFX "Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", cmd_sg_entries);
 
2477
                indirect_sg_entries = cmd_sg_entries;
2236
2478
        }
2237
2479
 
2238
2480
        ib_srp_transport_template =
2240
2482
        if (!ib_srp_transport_template)
2241
2483
                return -ENOMEM;
2242
2484
 
2243
 
        srp_template.sg_tablesize = srp_sg_tablesize;
2244
 
        srp_max_iu_len = (sizeof (struct srp_cmd) +
2245
 
                          sizeof (struct srp_indirect_buf) +
2246
 
                          srp_sg_tablesize * 16);
2247
 
 
2248
2485
        ret = class_register(&srp_class);
2249
2486
        if (ret) {
2250
2487
                printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");