2
* libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
4
* Copyright (c) 2010 Chelsio Communications, Inc.
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License as published by
8
* the Free Software Foundation.
10
* Written by: Karen Xie (kxie@chelsio.com)
11
* Written by: Rakesh Ranjan (rranjan@chelsio.com)
14
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16
#include <linux/skbuff.h>
17
#include <linux/crypto.h>
18
#include <linux/scatterlist.h>
19
#include <linux/pci.h>
20
#include <scsi/scsi.h>
21
#include <scsi/scsi_cmnd.h>
22
#include <scsi/scsi_host.h>
23
#include <linux/if_vlan.h>
24
#include <linux/inet.h>
26
#include <net/route.h>
27
#include <linux/inetdevice.h> /* ip_dev_find */
28
#include <linux/module.h>
31
static unsigned int dbg_level;
35
#define DRV_MODULE_NAME "libcxgbi"
36
#define DRV_MODULE_DESC "Chelsio iSCSI driver library"
37
#define DRV_MODULE_VERSION "0.9.0"
38
#define DRV_MODULE_RELDATE "Jun. 2010"
40
MODULE_AUTHOR("Chelsio Communications, Inc.");
41
MODULE_DESCRIPTION(DRV_MODULE_DESC);
42
MODULE_VERSION(DRV_MODULE_VERSION);
43
MODULE_LICENSE("GPL");
45
module_param(dbg_level, uint, 0644);
46
MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)");
50
* cxgbi device management
51
* maintains a list of the cxgbi devices
53
static LIST_HEAD(cdev_list);
54
static DEFINE_MUTEX(cdev_mutex);
56
int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
57
unsigned int max_conn)
59
struct cxgbi_ports_map *pmap = &cdev->pmap;
61
pmap->port_csk = cxgbi_alloc_big_mem(max_conn *
62
sizeof(struct cxgbi_sock *),
64
if (!pmap->port_csk) {
65
pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
69
pmap->max_connect = max_conn;
70
pmap->sport_base = base;
71
spin_lock_init(&pmap->lock);
74
EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create);
76
void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev)
78
struct cxgbi_ports_map *pmap = &cdev->pmap;
79
struct cxgbi_sock *csk;
82
for (i = 0; i < pmap->max_connect; i++) {
83
if (pmap->port_csk[i]) {
84
csk = pmap->port_csk[i];
85
pmap->port_csk[i] = NULL;
86
log_debug(1 << CXGBI_DBG_SOCK,
87
"csk 0x%p, cdev 0x%p, offload down.\n",
89
spin_lock_bh(&csk->lock);
90
cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN);
91
cxgbi_sock_closed(csk);
92
spin_unlock_bh(&csk->lock);
97
EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup);
99
static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
101
log_debug(1 << CXGBI_DBG_DEV,
102
"cdev 0x%p, p# %u.\n", cdev, cdev->nports);
103
cxgbi_hbas_remove(cdev);
104
cxgbi_device_portmap_cleanup(cdev);
105
if (cdev->dev_ddp_cleanup)
106
cdev->dev_ddp_cleanup(cdev);
108
cxgbi_ddp_cleanup(cdev);
110
cxgbi_ddp_cleanup(cdev);
111
if (cdev->pmap.max_connect)
112
cxgbi_free_big_mem(cdev->pmap.port_csk);
116
struct cxgbi_device *cxgbi_device_register(unsigned int extra,
119
struct cxgbi_device *cdev;
121
cdev = kzalloc(sizeof(*cdev) + extra + nports *
122
(sizeof(struct cxgbi_hba *) +
123
sizeof(struct net_device *)),
126
pr_warn("nport %d, OOM.\n", nports);
129
cdev->ports = (struct net_device **)(cdev + 1);
130
cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports *
131
sizeof(struct net_device *));
133
cdev->dd_data = ((char *)cdev->hbas) +
134
nports * sizeof(struct cxgbi_hba *);
135
spin_lock_init(&cdev->pmap.lock);
137
mutex_lock(&cdev_mutex);
138
list_add_tail(&cdev->list_head, &cdev_list);
139
mutex_unlock(&cdev_mutex);
141
log_debug(1 << CXGBI_DBG_DEV,
142
"cdev 0x%p, p# %u.\n", cdev, nports);
145
EXPORT_SYMBOL_GPL(cxgbi_device_register);
147
void cxgbi_device_unregister(struct cxgbi_device *cdev)
149
log_debug(1 << CXGBI_DBG_DEV,
150
"cdev 0x%p, p# %u,%s.\n",
151
cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : "");
152
mutex_lock(&cdev_mutex);
153
list_del(&cdev->list_head);
154
mutex_unlock(&cdev_mutex);
155
cxgbi_device_destroy(cdev);
157
EXPORT_SYMBOL_GPL(cxgbi_device_unregister);
159
void cxgbi_device_unregister_all(unsigned int flag)
161
struct cxgbi_device *cdev, *tmp;
163
mutex_lock(&cdev_mutex);
164
list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
165
if ((cdev->flags & flag) == flag) {
166
log_debug(1 << CXGBI_DBG_DEV,
167
"cdev 0x%p, p# %u,%s.\n",
168
cdev, cdev->nports, cdev->nports ?
169
cdev->ports[0]->name : "");
170
list_del(&cdev->list_head);
171
cxgbi_device_destroy(cdev);
174
mutex_unlock(&cdev_mutex);
176
EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all);
178
struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev)
180
struct cxgbi_device *cdev, *tmp;
182
mutex_lock(&cdev_mutex);
183
list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
184
if (cdev->lldev == lldev) {
185
mutex_unlock(&cdev_mutex);
189
mutex_unlock(&cdev_mutex);
190
log_debug(1 << CXGBI_DBG_DEV,
191
"lldev 0x%p, NO match found.\n", lldev);
194
EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev);
196
static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
199
struct net_device *vdev = NULL;
200
struct cxgbi_device *cdev, *tmp;
203
if (ndev->priv_flags & IFF_802_1Q_VLAN) {
205
ndev = vlan_dev_real_dev(ndev);
206
log_debug(1 << CXGBI_DBG_DEV,
207
"vlan dev %s -> %s.\n", vdev->name, ndev->name);
210
mutex_lock(&cdev_mutex);
211
list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
212
for (i = 0; i < cdev->nports; i++) {
213
if (ndev == cdev->ports[i]) {
214
cdev->hbas[i]->vdev = vdev;
215
mutex_unlock(&cdev_mutex);
222
mutex_unlock(&cdev_mutex);
223
log_debug(1 << CXGBI_DBG_DEV,
224
"ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
228
void cxgbi_hbas_remove(struct cxgbi_device *cdev)
231
struct cxgbi_hba *chba;
233
log_debug(1 << CXGBI_DBG_DEV,
234
"cdev 0x%p, p#%u.\n", cdev, cdev->nports);
236
for (i = 0; i < cdev->nports; i++) {
237
chba = cdev->hbas[i];
239
cdev->hbas[i] = NULL;
240
iscsi_host_remove(chba->shost);
241
pci_dev_put(cdev->pdev);
242
iscsi_host_free(chba->shost);
246
EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
248
int cxgbi_hbas_add(struct cxgbi_device *cdev, unsigned int max_lun,
249
unsigned int max_id, struct scsi_host_template *sht,
250
struct scsi_transport_template *stt)
252
struct cxgbi_hba *chba;
253
struct Scsi_Host *shost;
256
log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
258
for (i = 0; i < cdev->nports; i++) {
259
shost = iscsi_host_alloc(sht, sizeof(*chba), 1);
261
pr_info("0x%p, p%d, %s, host alloc failed.\n",
262
cdev, i, cdev->ports[i]->name);
267
shost->transportt = stt;
268
shost->max_lun = max_lun;
269
shost->max_id = max_id;
270
shost->max_channel = 0;
271
shost->max_cmd_len = 16;
273
chba = iscsi_host_priv(shost);
275
chba->ndev = cdev->ports[i];
278
log_debug(1 << CXGBI_DBG_DEV,
279
"cdev 0x%p, p#%d %s: chba 0x%p.\n",
280
cdev, i, cdev->ports[i]->name, chba);
282
pci_dev_get(cdev->pdev);
283
err = iscsi_host_add(shost, &cdev->pdev->dev);
285
pr_info("cdev 0x%p, p#%d %s, host add failed.\n",
286
cdev, i, cdev->ports[i]->name);
287
pci_dev_put(cdev->pdev);
288
scsi_host_put(shost);
292
cdev->hbas[i] = chba;
298
cxgbi_hbas_remove(cdev);
301
EXPORT_SYMBOL_GPL(cxgbi_hbas_add);
306
* - source port management
307
* To find a free source port in the port allocation map we use a very simple
308
* rotor scheme to look for the next free port.
310
* If a source port has been specified make sure that it doesn't collide with
311
* our normal source port allocation map. If it's outside the range of our
312
* allocation/deallocation scheme just let them use it.
314
* If the source port is outside our allocation range, the caller is
315
* responsible for keeping track of their port usage.
317
static int sock_get_port(struct cxgbi_sock *csk)
319
struct cxgbi_device *cdev = csk->cdev;
320
struct cxgbi_ports_map *pmap = &cdev->pmap;
324
if (!pmap->max_connect) {
325
pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
326
cdev, csk->port_id, cdev->ports[csk->port_id]->name);
327
return -EADDRNOTAVAIL;
330
if (csk->saddr.sin_port) {
331
pr_err("source port NON-ZERO %u.\n",
332
ntohs(csk->saddr.sin_port));
336
spin_lock_bh(&pmap->lock);
337
if (pmap->used >= pmap->max_connect) {
338
spin_unlock_bh(&pmap->lock);
339
pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n",
340
cdev, csk->port_id, cdev->ports[csk->port_id]->name);
341
return -EADDRNOTAVAIL;
344
start = idx = pmap->next;
346
if (++idx >= pmap->max_connect)
348
if (!pmap->port_csk[idx]) {
350
csk->saddr.sin_port =
351
htons(pmap->sport_base + idx);
353
pmap->port_csk[idx] = csk;
354
spin_unlock_bh(&pmap->lock);
356
log_debug(1 << CXGBI_DBG_SOCK,
357
"cdev 0x%p, p#%u %s, p %u, %u.\n",
359
cdev->ports[csk->port_id]->name,
360
pmap->sport_base + idx, pmap->next);
363
} while (idx != start);
364
spin_unlock_bh(&pmap->lock);
366
/* should not happen */
367
pr_warn("cdev 0x%p, p#%u %s, next %u?\n",
368
cdev, csk->port_id, cdev->ports[csk->port_id]->name,
370
return -EADDRNOTAVAIL;
373
static void sock_put_port(struct cxgbi_sock *csk)
375
struct cxgbi_device *cdev = csk->cdev;
376
struct cxgbi_ports_map *pmap = &cdev->pmap;
378
if (csk->saddr.sin_port) {
379
int idx = ntohs(csk->saddr.sin_port) - pmap->sport_base;
381
csk->saddr.sin_port = 0;
382
if (idx < 0 || idx >= pmap->max_connect) {
383
pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
385
cdev->ports[csk->port_id]->name,
386
ntohs(csk->saddr.sin_port));
390
spin_lock_bh(&pmap->lock);
391
pmap->port_csk[idx] = NULL;
393
spin_unlock_bh(&pmap->lock);
395
log_debug(1 << CXGBI_DBG_SOCK,
396
"cdev 0x%p, p#%u %s, release %u.\n",
397
cdev, csk->port_id, cdev->ports[csk->port_id]->name,
398
pmap->sport_base + idx);
405
* iscsi tcp connection
407
void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk)
409
if (csk->cpl_close) {
410
kfree_skb(csk->cpl_close);
411
csk->cpl_close = NULL;
413
if (csk->cpl_abort_req) {
414
kfree_skb(csk->cpl_abort_req);
415
csk->cpl_abort_req = NULL;
417
if (csk->cpl_abort_rpl) {
418
kfree_skb(csk->cpl_abort_rpl);
419
csk->cpl_abort_rpl = NULL;
422
EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs);
424
static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
426
struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO);
429
pr_info("alloc csk %zu failed.\n", sizeof(*csk));
433
if (cdev->csk_alloc_cpls(csk) < 0) {
434
pr_info("csk 0x%p, alloc cpls failed.\n", csk);
439
spin_lock_init(&csk->lock);
440
kref_init(&csk->refcnt);
441
skb_queue_head_init(&csk->receive_queue);
442
skb_queue_head_init(&csk->write_queue);
443
setup_timer(&csk->retry_timer, NULL, (unsigned long)csk);
444
rwlock_init(&csk->callback_lock);
447
cxgbi_sock_set_state(csk, CTP_CLOSED);
449
log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk);
454
static struct rtable *find_route_ipv4(struct flowi4 *fl4,
455
__be32 saddr, __be32 daddr,
456
__be16 sport, __be16 dport, u8 tos)
460
rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr,
461
dport, sport, IPPROTO_TCP, tos, 0);
468
static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
470
struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr;
471
struct dst_entry *dst;
472
struct net_device *ndev;
473
struct cxgbi_device *cdev;
474
struct rtable *rt = NULL;
476
struct cxgbi_sock *csk = NULL;
477
unsigned int mtu = 0;
481
if (daddr->sin_family != AF_INET) {
482
pr_info("address family 0x%x NOT supported.\n",
488
rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0);
490
pr_info("no route to ipv4 0x%x, port %u.\n",
491
daddr->sin_addr.s_addr, daddr->sin_port);
496
ndev = dst_get_neighbour(dst)->dev;
498
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
499
pr_info("multi-cast route %pI4, port %u, dev %s.\n",
500
&daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
506
if (ndev->flags & IFF_LOOPBACK) {
507
ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
509
pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
510
dst_get_neighbour(dst)->dev->name, ndev->name, mtu);
513
cdev = cxgbi_device_find_by_netdev(ndev, &port);
515
pr_info("dst %pI4, %s, NOT cxgbi device.\n",
516
&daddr->sin_addr.s_addr, ndev->name);
520
log_debug(1 << CXGBI_DBG_SOCK,
521
"route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
522
&daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
523
port, ndev->name, cdev);
525
csk = cxgbi_sock_create(cdev);
534
csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
535
csk->daddr.sin_port = daddr->sin_port;
536
csk->daddr.sin_family = daddr->sin_family;
537
csk->saddr.sin_addr.s_addr = fl4.saddr;
544
cxgbi_sock_closed(csk);
549
void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn,
552
csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn;
553
dst_confirm(csk->dst);
555
cxgbi_sock_set_state(csk, CTP_ESTABLISHED);
557
EXPORT_SYMBOL_GPL(cxgbi_sock_established);
559
static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk)
561
log_debug(1 << CXGBI_DBG_SOCK,
562
"csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
563
csk, csk->state, csk->flags, csk->user_data);
565
if (csk->state != CTP_ESTABLISHED) {
566
read_lock_bh(&csk->callback_lock);
568
iscsi_conn_failure(csk->user_data,
569
ISCSI_ERR_CONN_FAILED);
570
read_unlock_bh(&csk->callback_lock);
574
void cxgbi_sock_closed(struct cxgbi_sock *csk)
576
log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
577
csk, (csk)->state, (csk)->flags, (csk)->tid);
578
cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
579
if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED)
581
if (csk->saddr.sin_port)
584
dst_release(csk->dst);
585
csk->cdev->csk_release_offload_resources(csk);
586
cxgbi_sock_set_state(csk, CTP_CLOSED);
587
cxgbi_inform_iscsi_conn_closing(csk);
590
EXPORT_SYMBOL_GPL(cxgbi_sock_closed);
592
static void need_active_close(struct cxgbi_sock *csk)
597
log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
598
csk, (csk)->state, (csk)->flags, (csk)->tid);
599
spin_lock_bh(&csk->lock);
600
dst_confirm(csk->dst);
601
data_lost = skb_queue_len(&csk->receive_queue);
602
__skb_queue_purge(&csk->receive_queue);
604
if (csk->state == CTP_ACTIVE_OPEN)
605
cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
606
else if (csk->state == CTP_ESTABLISHED) {
608
cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE);
609
} else if (csk->state == CTP_PASSIVE_CLOSE) {
611
cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
616
csk->cdev->csk_send_abort_req(csk);
618
csk->cdev->csk_send_close_req(csk);
621
spin_unlock_bh(&csk->lock);
624
void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno)
626
pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
627
csk, csk->state, csk->flags,
628
&csk->saddr.sin_addr.s_addr, csk->saddr.sin_port,
629
&csk->daddr.sin_addr.s_addr, csk->daddr.sin_port,
632
cxgbi_sock_set_state(csk, CTP_CONNECTING);
634
cxgbi_sock_closed(csk);
636
EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
638
void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
640
struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
642
log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
643
csk, (csk)->state, (csk)->flags, (csk)->tid);
645
spin_lock_bh(&csk->lock);
646
if (csk->state == CTP_ACTIVE_OPEN)
647
cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH);
648
spin_unlock_bh(&csk->lock);
652
EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
654
void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk)
657
spin_lock_bh(&csk->lock);
658
if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
659
if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD))
660
cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
662
cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD);
663
cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
664
if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
665
pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
666
csk, csk->state, csk->flags, csk->tid);
667
cxgbi_sock_closed(csk);
670
spin_unlock_bh(&csk->lock);
673
EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl);
675
void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk)
677
log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
678
csk, (csk)->state, (csk)->flags, (csk)->tid);
680
spin_lock_bh(&csk->lock);
682
if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
685
switch (csk->state) {
686
case CTP_ESTABLISHED:
687
cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE);
689
case CTP_ACTIVE_CLOSE:
690
cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
692
case CTP_CLOSE_WAIT_1:
693
cxgbi_sock_closed(csk);
698
pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
699
csk, csk->state, csk->flags, csk->tid);
701
cxgbi_inform_iscsi_conn_closing(csk);
703
spin_unlock_bh(&csk->lock);
706
EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close);
708
void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt)
710
log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
711
csk, (csk)->state, (csk)->flags, (csk)->tid);
713
spin_lock_bh(&csk->lock);
715
csk->snd_una = snd_nxt - 1;
716
if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
719
switch (csk->state) {
720
case CTP_ACTIVE_CLOSE:
721
cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1);
723
case CTP_CLOSE_WAIT_1:
724
case CTP_CLOSE_WAIT_2:
725
cxgbi_sock_closed(csk);
730
pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
731
csk, csk->state, csk->flags, csk->tid);
734
spin_unlock_bh(&csk->lock);
737
EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl);
739
void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits,
740
unsigned int snd_una, int seq_chk)
742
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
743
"csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
744
csk, csk->state, csk->flags, csk->tid, credits,
745
csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk);
747
spin_lock_bh(&csk->lock);
749
csk->wr_cred += credits;
750
if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred)
751
csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
754
struct sk_buff *p = cxgbi_sock_peek_wr(csk);
757
pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
758
csk, csk->state, csk->flags, csk->tid, credits,
759
csk->wr_cred, csk->wr_una_cred);
763
if (unlikely(credits < p->csum)) {
764
pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
765
csk, csk->state, csk->flags, csk->tid,
766
credits, csk->wr_cred, csk->wr_una_cred,
771
cxgbi_sock_dequeue_wr(csk);
777
cxgbi_sock_check_wr_invariants(csk);
780
if (unlikely(before(snd_una, csk->snd_una))) {
781
pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
782
csk, csk->state, csk->flags, csk->tid, snd_una,
787
if (csk->snd_una != snd_una) {
788
csk->snd_una = snd_una;
789
dst_confirm(csk->dst);
793
if (skb_queue_len(&csk->write_queue)) {
794
if (csk->cdev->csk_push_tx_frames(csk, 0))
795
cxgbi_conn_tx_open(csk);
797
cxgbi_conn_tx_open(csk);
799
spin_unlock_bh(&csk->lock);
801
EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack);
803
static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk,
808
while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu)
814
unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu)
817
struct dst_entry *dst = csk->dst;
819
csk->advmss = dst_metric_advmss(dst);
821
if (csk->advmss > pmtu - 40)
822
csk->advmss = pmtu - 40;
823
if (csk->advmss < csk->cdev->mtus[0] - 40)
824
csk->advmss = csk->cdev->mtus[0] - 40;
825
idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40);
829
EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss);
831
void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb)
833
cxgbi_skcb_tcp_seq(skb) = csk->write_seq;
834
__skb_queue_tail(&csk->write_queue, skb);
836
EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail);
838
void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk)
842
while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL)
845
EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue);
847
void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk)
849
int pending = cxgbi_sock_count_pending_wrs(csk);
851
if (unlikely(csk->wr_cred + pending != csk->wr_max_cred))
852
pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
853
csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred);
855
EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants);
857
static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
859
struct cxgbi_device *cdev = csk->cdev;
860
struct sk_buff *next;
863
spin_lock_bh(&csk->lock);
865
if (csk->state != CTP_ESTABLISHED) {
866
log_debug(1 << CXGBI_DBG_PDU_TX,
867
"csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
868
csk, csk->state, csk->flags, csk->tid);
874
log_debug(1 << CXGBI_DBG_PDU_TX,
875
"csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
876
csk, csk->state, csk->flags, csk->tid, csk->err);
881
if (csk->write_seq - csk->snd_una >= cdev->snd_win) {
882
log_debug(1 << CXGBI_DBG_PDU_TX,
883
"csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
884
csk, csk->state, csk->flags, csk->tid, csk->write_seq,
885
csk->snd_una, cdev->snd_win);
891
int frags = skb_shinfo(skb)->nr_frags +
892
(skb->len != skb->data_len);
894
if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) {
895
pr_err("csk 0x%p, skb head %u < %u.\n",
896
csk, skb_headroom(skb), cdev->skb_tx_rsvd);
901
if (frags >= SKB_WR_LIST_SIZE) {
902
pr_err("csk 0x%p, frags %d, %u,%u >%u.\n",
903
csk, skb_shinfo(skb)->nr_frags, skb->len,
904
skb->data_len, (uint)(SKB_WR_LIST_SIZE));
911
cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR);
912
cxgbi_sock_skb_entail(csk, skb);
914
csk->write_seq += skb->len +
915
cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
919
if (likely(skb_queue_len(&csk->write_queue)))
920
cdev->csk_push_tx_frames(csk, 1);
921
spin_unlock_bh(&csk->lock);
925
if (copied == 0 && err == -EPIPE)
926
copied = csk->err ? csk->err : -EPIPE;
933
* Direct Data Placement -
934
* Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted
935
* final destination host-memory buffers based on the Initiator Task Tag (ITT)
936
* in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
937
* The host memory address is programmed into h/w in the format of pagepod
939
* The location of the pagepod entry is encoded into ddp tag which is used as
940
* the base for ITT/TTT.
943
static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
944
static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
945
static unsigned char page_idx = DDP_PGIDX_MAX;
947
static unsigned char sw_tag_idx_bits;
948
static unsigned char sw_tag_age_bits;
951
* Direct-Data Placement page size adjustment
953
static int ddp_adjust_page_table(void)
956
unsigned int base_order, order;
958
if (PAGE_SIZE < (1UL << ddp_page_shift[0])) {
959
pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n",
960
PAGE_SIZE, 1UL << ddp_page_shift[0]);
964
base_order = get_order(1UL << ddp_page_shift[0]);
965
order = get_order(1UL << PAGE_SHIFT);
967
for (i = 0; i < DDP_PGIDX_MAX; i++) {
968
/* first is the kernel page size, then just doubling */
969
ddp_page_order[i] = order - base_order + i;
970
ddp_page_shift[i] = PAGE_SHIFT + i;
975
static int ddp_find_page_index(unsigned long pgsz)
979
for (i = 0; i < DDP_PGIDX_MAX; i++) {
980
if (pgsz == (1UL << ddp_page_shift[i]))
983
pr_info("ddp page size %lu not supported.\n", pgsz);
984
return DDP_PGIDX_MAX;
987
static void ddp_setup_host_page_size(void)
989
if (page_idx == DDP_PGIDX_MAX) {
990
page_idx = ddp_find_page_index(PAGE_SIZE);
992
if (page_idx == DDP_PGIDX_MAX) {
993
pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE);
994
if (ddp_adjust_page_table() < 0) {
995
pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE);
998
page_idx = ddp_find_page_index(PAGE_SIZE);
1000
pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx);
1004
void cxgbi_ddp_page_size_factor(int *pgsz_factor)
1008
for (i = 0; i < DDP_PGIDX_MAX; i++)
1009
pgsz_factor[i] = ddp_page_order[i];
1011
EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor);
1014
* DDP setup & teardown
1017
void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod,
1018
struct cxgbi_pagepod_hdr *hdr,
1019
struct cxgbi_gather_list *gl, unsigned int gidx)
1023
memcpy(ppod, hdr, sizeof(*hdr));
1024
for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) {
1025
ppod->addr[i] = gidx < gl->nelem ?
1026
cpu_to_be64(gl->phys_addr[gidx]) : 0ULL;
1029
EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set);
1031
void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod)
1033
memset(ppod, 0, sizeof(*ppod));
1035
EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear);
1037
static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp,
1038
unsigned int start, unsigned int max,
1040
struct cxgbi_gather_list *gl)
1042
unsigned int i, j, k;
1044
/* not enough entries */
1045
if ((max - start) < count) {
1046
log_debug(1 << CXGBI_DBG_DDP,
1047
"NOT enough entries %u+%u < %u.\n", start, count, max);
1052
spin_lock(&ddp->map_lock);
1053
for (i = start; i < max;) {
1054
for (j = 0, k = i; j < count; j++, k++) {
1059
for (j = 0, k = i; j < count; j++, k++)
1060
ddp->gl_map[k] = gl;
1061
spin_unlock(&ddp->map_lock);
1066
spin_unlock(&ddp->map_lock);
1067
log_debug(1 << CXGBI_DBG_DDP,
1068
"NO suitable entries %u available.\n", count);
1072
static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp,
1073
int start, int count)
1075
spin_lock(&ddp->map_lock);
1076
memset(&ddp->gl_map[start], 0,
1077
count * sizeof(struct cxgbi_gather_list *));
1078
spin_unlock(&ddp->map_lock);
1081
static inline void ddp_gl_unmap(struct pci_dev *pdev,
1082
struct cxgbi_gather_list *gl)
1086
for (i = 0; i < gl->nelem; i++)
1087
dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE,
1088
PCI_DMA_FROMDEVICE);
1091
static inline int ddp_gl_map(struct pci_dev *pdev,
1092
struct cxgbi_gather_list *gl)
1096
for (i = 0; i < gl->nelem; i++) {
1097
gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0,
1099
PCI_DMA_FROMDEVICE);
1100
if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) {
1101
log_debug(1 << CXGBI_DBG_DDP,
1102
"page %d 0x%p, 0x%p dma mapping err.\n",
1103
i, gl->pages[i], pdev);
1110
unsigned int nelem = gl->nelem;
1113
ddp_gl_unmap(pdev, gl);
1119
static void ddp_release_gl(struct cxgbi_gather_list *gl,
1120
struct pci_dev *pdev)
1122
ddp_gl_unmap(pdev, gl);
1126
static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen,
1127
struct scatterlist *sgl,
1129
struct pci_dev *pdev,
1132
struct cxgbi_gather_list *gl;
1133
struct scatterlist *sg = sgl;
1134
struct page *sgpage = sg_page(sg);
1135
unsigned int sglen = sg->length;
1136
unsigned int sgoffset = sg->offset;
1137
unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
1141
if (xferlen < DDP_THRESHOLD) {
1142
log_debug(1 << CXGBI_DBG_DDP,
1143
"xfer %u < threshold %u, no ddp.\n",
1144
xferlen, DDP_THRESHOLD);
1148
gl = kzalloc(sizeof(struct cxgbi_gather_list) +
1149
npages * (sizeof(dma_addr_t) +
1150
sizeof(struct page *)), gfp);
1152
log_debug(1 << CXGBI_DBG_DDP,
1153
"xfer %u, %u pages, OOM.\n", xferlen, npages);
1157
log_debug(1 << CXGBI_DBG_DDP,
1158
"xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages);
1160
gl->pages = (struct page **)&gl->phys_addr[npages];
1162
gl->length = xferlen;
1163
gl->offset = sgoffset;
1164
gl->pages[0] = sgpage;
1166
for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt;
1167
i++, sg = sg_next(sg)) {
1168
struct page *page = sg_page(sg);
1170
if (sgpage == page && sg->offset == sgoffset + sglen)
1171
sglen += sg->length;
1173
/* make sure the sgl is fit for ddp:
1174
* each has the same page size, and
1175
* all of the middle pages are used completely
1177
if ((j && sgoffset) || ((i != sgcnt - 1) &&
1178
((sglen + sgoffset) & ~PAGE_MASK))) {
1179
log_debug(1 << CXGBI_DBG_DDP,
1180
"page %d/%u, %u + %u.\n",
1181
i, sgcnt, sgoffset, sglen);
1186
if (j == gl->nelem || sg->offset) {
1187
log_debug(1 << CXGBI_DBG_DDP,
1188
"page %d/%u, offset %u.\n",
1189
j, gl->nelem, sg->offset);
1192
gl->pages[j] = page;
1194
sgoffset = sg->offset;
1200
if (ddp_gl_map(pdev, gl) < 0)
1210
static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag)
1212
struct cxgbi_device *cdev = chba->cdev;
1213
struct cxgbi_ddp_info *ddp = cdev->ddp;
1216
idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
1217
if (idx < ddp->nppods) {
1218
struct cxgbi_gather_list *gl = ddp->gl_map[idx];
1221
if (!gl || !gl->nelem) {
1222
pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n",
1223
tag, idx, gl, gl ? gl->nelem : 0);
1226
npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
1227
log_debug(1 << CXGBI_DBG_DDP,
1228
"tag 0x%x, release idx %u, npods %u.\n",
1230
cdev->csk_ddp_clear(chba, tag, idx, npods);
1231
ddp_unmark_entries(ddp, idx, npods);
1232
ddp_release_gl(gl, ddp->pdev);
1234
pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods);
1237
static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
1238
u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl,
1241
struct cxgbi_device *cdev = csk->cdev;
1242
struct cxgbi_ddp_info *ddp = cdev->ddp;
1243
struct cxgbi_tag_format *tformat = &cdev->tag_format;
1244
struct cxgbi_pagepod_hdr hdr;
1250
npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
1251
if (ddp->idx_last == ddp->nppods)
1252
idx = ddp_find_unused_entries(ddp, 0, ddp->nppods,
1255
idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
1258
if (idx < 0 && ddp->idx_last >= npods) {
1259
idx = ddp_find_unused_entries(ddp, 0,
1260
min(ddp->idx_last + npods, ddp->nppods),
1265
log_debug(1 << CXGBI_DBG_DDP,
1266
"xferlen %u, gl %u, npods %u NO DDP.\n",
1267
gl->length, gl->nelem, npods);
1271
tag = cxgbi_ddp_tag_base(tformat, sw_tag);
1272
tag |= idx << PPOD_IDX_SHIFT;
1275
hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
1276
hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
1277
hdr.max_offset = htonl(gl->length);
1278
hdr.page_offset = htonl(gl->offset);
1280
err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl);
1282
goto unmark_entries;
1284
ddp->idx_last = idx;
1285
log_debug(1 << CXGBI_DBG_DDP,
1286
"xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n",
1287
gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx,
1293
ddp_unmark_entries(ddp, idx, npods);
1297
int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp,
1298
unsigned int sw_tag, unsigned int xferlen,
1299
struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp)
1301
struct cxgbi_device *cdev = csk->cdev;
1302
struct cxgbi_tag_format *tformat = &cdev->tag_format;
1303
struct cxgbi_gather_list *gl;
1306
if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp ||
1307
xferlen < DDP_THRESHOLD) {
1308
log_debug(1 << CXGBI_DBG_DDP,
1309
"pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen);
1313
if (!cxgbi_sw_tag_usable(tformat, sw_tag)) {
1314
log_debug(1 << CXGBI_DBG_DDP,
1315
"sw_tag 0x%x NOT usable.\n", sw_tag);
1319
gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp);
1323
err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp);
1325
ddp_release_gl(gl, cdev->pdev);
1330
static void ddp_destroy(struct kref *kref)
1332
struct cxgbi_ddp_info *ddp = container_of(kref,
1333
struct cxgbi_ddp_info,
1335
struct cxgbi_device *cdev = ddp->cdev;
1338
pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev);
1340
while (i < ddp->nppods) {
1341
struct cxgbi_gather_list *gl = ddp->gl_map[i];
1344
int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
1345
>> PPOD_PAGES_SHIFT;
1346
pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
1352
cxgbi_free_big_mem(ddp);
1355
int cxgbi_ddp_cleanup(struct cxgbi_device *cdev)
1357
struct cxgbi_ddp_info *ddp = cdev->ddp;
1359
log_debug(1 << CXGBI_DBG_DDP,
1360
"cdev 0x%p, release ddp 0x%p.\n", cdev, ddp);
1363
return kref_put(&ddp->refcnt, ddp_destroy);
1366
EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup);
1368
int cxgbi_ddp_init(struct cxgbi_device *cdev,
1369
unsigned int llimit, unsigned int ulimit,
1370
unsigned int max_txsz, unsigned int max_rxsz)
1372
struct cxgbi_ddp_info *ddp;
1373
unsigned int ppmax, bits;
1375
ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT;
1376
bits = __ilog2_u32(ppmax) + 1;
1377
if (bits > PPOD_IDX_MAX_SIZE)
1378
bits = PPOD_IDX_MAX_SIZE;
1379
ppmax = (1 << (bits - 1)) - 1;
1381
ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) +
1382
ppmax * (sizeof(struct cxgbi_gather_list *) +
1383
sizeof(struct sk_buff *)),
1386
pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax);
1389
ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
1392
spin_lock_init(&ddp->map_lock);
1393
kref_init(&ddp->refcnt);
1396
ddp->pdev = cdev->pdev;
1397
ddp->llimit = llimit;
1398
ddp->ulimit = ulimit;
1399
ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE);
1400
ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE);
1401
ddp->nppods = ppmax;
1402
ddp->idx_last = ppmax;
1403
ddp->idx_bits = bits;
1404
ddp->idx_mask = (1 << bits) - 1;
1405
ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
1407
cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
1408
cdev->tag_format.rsvd_bits = ddp->idx_bits;
1409
cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT;
1410
cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1;
1412
pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n",
1413
cdev->ports[0]->name, cdev->tag_format.sw_bits,
1414
cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift,
1415
cdev->tag_format.rsvd_mask);
1417
cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1418
ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
1419
cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1420
ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
1422
log_debug(1 << CXGBI_DBG_DDP,
1423
"%s max payload size: %u/%u, %u/%u.\n",
1424
cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz,
1425
cdev->rx_max_size, ddp->max_rxsz);
1428
EXPORT_SYMBOL_GPL(cxgbi_ddp_init);
1431
* APIs interacting with open-iscsi libraries
1434
static unsigned char padding[4];
1436
static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt)
1438
struct scsi_cmnd *sc = task->sc;
1439
struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
1440
struct cxgbi_conn *cconn = tcp_conn->dd_data;
1441
struct cxgbi_hba *chba = cconn->chba;
1442
struct cxgbi_tag_format *tformat = &chba->cdev->tag_format;
1443
u32 tag = ntohl((__force u32)hdr_itt);
1445
log_debug(1 << CXGBI_DBG_DDP,
1446
"cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag);
1448
(scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
1449
cxgbi_is_ddp_tag(tformat, tag))
1450
ddp_tag_release(chba, tag);
1453
static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
1455
struct scsi_cmnd *sc = task->sc;
1456
struct iscsi_conn *conn = task->conn;
1457
struct iscsi_session *sess = conn->session;
1458
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1459
struct cxgbi_conn *cconn = tcp_conn->dd_data;
1460
struct cxgbi_hba *chba = cconn->chba;
1461
struct cxgbi_tag_format *tformat = &chba->cdev->tag_format;
1462
u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
1467
(scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) {
1468
err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag,
1469
scsi_in(sc)->length,
1470
scsi_in(sc)->table.sgl,
1471
scsi_in(sc)->table.nents,
1474
log_debug(1 << CXGBI_DBG_DDP,
1475
"csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
1476
cconn->cep->csk, task, scsi_in(sc)->length,
1477
scsi_in(sc)->table.nents);
1481
tag = cxgbi_set_non_ddp_tag(tformat, sw_tag);
1482
/* the itt need to sent in big-endian order */
1483
*hdr_itt = (__force itt_t)htonl(tag);
1485
log_debug(1 << CXGBI_DBG_DDP,
1486
"cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
1487
chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt);
1491
void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age)
1493
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1494
struct cxgbi_conn *cconn = tcp_conn->dd_data;
1495
struct cxgbi_device *cdev = cconn->chba->cdev;
1496
u32 tag = ntohl((__force u32) itt);
1499
sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag);
1501
*idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
1503
*age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
1505
log_debug(1 << CXGBI_DBG_DDP,
1506
"cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
1507
cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
1510
EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt);
1512
void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
1514
struct iscsi_conn *conn = csk->user_data;
1517
log_debug(1 << CXGBI_DBG_SOCK,
1518
"csk 0x%p, cid %d.\n", csk, conn->id);
1519
iscsi_conn_queue_work(conn);
1522
EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open);
1525
* pdu receive, interact with libiscsi_tcp
1527
static inline int read_pdu_skb(struct iscsi_conn *conn,
1528
struct sk_buff *skb,
1529
unsigned int offset,
1535
bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
1537
case ISCSI_TCP_CONN_ERR:
1538
pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n",
1539
skb, offset, offloaded);
1541
case ISCSI_TCP_SUSPENDED:
1542
log_debug(1 << CXGBI_DBG_PDU_RX,
1543
"skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
1544
skb, offset, offloaded, bytes_read);
1545
/* no transfer - just have caller flush queue */
1547
case ISCSI_TCP_SKB_DONE:
1548
pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
1549
skb, offset, offloaded);
1551
* pdus should always fit in the skb and we should get
1552
* segment done notifcation.
1554
iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
1556
case ISCSI_TCP_SEGMENT_DONE:
1557
log_debug(1 << CXGBI_DBG_PDU_RX,
1558
"skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
1559
skb, offset, offloaded, bytes_read);
1562
pr_info("skb 0x%p, off %u, %d, invalid status %d.\n",
1563
skb, offset, offloaded, status);
1568
static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
1570
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1572
log_debug(1 << CXGBI_DBG_PDU_RX,
1573
"conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1574
conn, skb, skb->len, cxgbi_skcb_flags(skb));
1576
if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
1577
pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb);
1578
iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
1582
if (conn->hdrdgst_en &&
1583
cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) {
1584
pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb);
1585
iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
1589
return read_pdu_skb(conn, skb, 0, 0);
1592
static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
1593
struct sk_buff *skb, unsigned int offset)
1595
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1597
int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
1599
log_debug(1 << CXGBI_DBG_PDU_RX,
1600
"conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1601
conn, skb, skb->len, cxgbi_skcb_flags(skb));
1603
if (conn->datadgst_en &&
1604
cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) {
1605
pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
1606
conn, lskb, cxgbi_skcb_flags(lskb));
1607
iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
1611
if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
1614
/* coalesced, add header digest length */
1615
if (lskb == skb && conn->hdrdgst_en)
1616
offset += ISCSI_DIGEST_SIZE;
1618
if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD))
1621
if (opcode == ISCSI_OP_SCSI_DATA_IN)
1622
log_debug(1 << CXGBI_DBG_PDU_RX,
1623
"skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
1624
skb, opcode, ntohl(tcp_conn->in.hdr->itt),
1625
tcp_conn->in.datalen, offloaded ? "is" : "not");
1627
return read_pdu_skb(conn, skb, offset, offloaded);
1630
static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
1632
struct cxgbi_device *cdev = csk->cdev;
1636
log_debug(1 << CXGBI_DBG_PDU_RX,
1637
"csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n",
1638
csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
1639
csk->rcv_wup, cdev->rx_credit_thres,
1642
if (csk->state != CTP_ESTABLISHED)
1645
credits = csk->copied_seq - csk->rcv_wup;
1646
if (unlikely(!credits))
1648
if (unlikely(cdev->rx_credit_thres == 0))
1651
must_send = credits + 16384 >= cdev->rcv_win;
1652
if (must_send || credits >= cdev->rx_credit_thres)
1653
csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
1656
void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
1658
struct cxgbi_device *cdev = csk->cdev;
1659
struct iscsi_conn *conn = csk->user_data;
1660
struct sk_buff *skb;
1661
unsigned int read = 0;
1664
log_debug(1 << CXGBI_DBG_PDU_RX,
1665
"csk 0x%p, conn 0x%p.\n", csk, conn);
1667
if (unlikely(!conn || conn->suspend_rx)) {
1668
log_debug(1 << CXGBI_DBG_PDU_RX,
1669
"csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
1670
csk, conn, conn ? conn->id : 0xFF,
1671
conn ? conn->suspend_rx : 0xFF);
1676
skb = skb_peek(&csk->receive_queue);
1678
!(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) {
1680
log_debug(1 << CXGBI_DBG_PDU_RX,
1681
"skb 0x%p, NOT ready 0x%lx.\n",
1682
skb, cxgbi_skcb_flags(skb));
1685
__skb_unlink(skb, &csk->receive_queue);
1687
read += cxgbi_skcb_rx_pdulen(skb);
1688
log_debug(1 << CXGBI_DBG_PDU_RX,
1689
"csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
1690
csk, skb, skb->len, cxgbi_skcb_flags(skb),
1691
cxgbi_skcb_rx_pdulen(skb));
1693
if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
1694
err = skb_read_pdu_bhs(conn, skb);
1696
pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1697
"f 0x%lx, plen %u.\n",
1699
cxgbi_skcb_flags(skb),
1700
cxgbi_skcb_rx_pdulen(skb));
1703
err = skb_read_pdu_data(conn, skb, skb,
1704
err + cdev->skb_rx_extra);
1706
pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
1707
"f 0x%lx, plen %u.\n",
1709
cxgbi_skcb_flags(skb),
1710
cxgbi_skcb_rx_pdulen(skb));
1712
err = skb_read_pdu_bhs(conn, skb);
1714
pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1715
"f 0x%lx, plen %u.\n",
1717
cxgbi_skcb_flags(skb),
1718
cxgbi_skcb_rx_pdulen(skb));
1722
if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
1723
struct sk_buff *dskb;
1725
dskb = skb_peek(&csk->receive_queue);
1727
pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
1728
" plen %u, NO data.\n",
1730
cxgbi_skcb_flags(skb),
1731
cxgbi_skcb_rx_pdulen(skb));
1735
__skb_unlink(dskb, &csk->receive_queue);
1737
err = skb_read_pdu_data(conn, skb, dskb, 0);
1739
pr_err("data, csk 0x%p, skb 0x%p,%u, "
1740
"f 0x%lx, plen %u, dskb 0x%p,"
1743
cxgbi_skcb_flags(skb),
1744
cxgbi_skcb_rx_pdulen(skb),
1748
err = skb_read_pdu_data(conn, skb, skb, 0);
1757
log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read);
1759
csk->copied_seq += read;
1760
csk_return_rx_credits(csk, read);
1761
conn->rxdata_octets += read;
1765
pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
1766
csk, conn, err, read);
1767
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1770
EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready);
1772
static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
1773
unsigned int offset, unsigned int *off,
1774
struct scatterlist **sgp)
1777
struct scatterlist *sg;
1779
for_each_sg(sgl, sg, sgcnt, i) {
1780
if (offset < sg->length) {
1785
offset -= sg->length;
1790
static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
1791
unsigned int dlen, struct page_frag *frags,
1794
unsigned int datalen = dlen;
1795
unsigned int sglen = sg->length - sgoffset;
1796
struct page *page = sg_page(sg);
1806
pr_warn("sg %d NULL, len %u/%u.\n",
1815
copy = min(datalen, sglen);
1816
if (i && page == frags[i - 1].page &&
1817
sgoffset + sg->offset ==
1818
frags[i - 1].offset + frags[i - 1].size) {
1819
frags[i - 1].size += copy;
1821
if (i >= frag_max) {
1822
pr_warn("too many pages %u, dlen %u.\n",
1827
frags[i].page = page;
1828
frags[i].offset = sg->offset + sgoffset;
1829
frags[i].size = copy;
1840
int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
1842
struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
1843
struct cxgbi_conn *cconn = tcp_conn->dd_data;
1844
struct cxgbi_device *cdev = cconn->chba->cdev;
1845
struct iscsi_conn *conn = task->conn;
1846
struct iscsi_tcp_task *tcp_task = task->dd_data;
1847
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1848
struct scsi_cmnd *sc = task->sc;
1849
int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
1851
tcp_task->dd_data = tdata;
1854
if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
1855
(opcode == ISCSI_OP_SCSI_DATA_OUT ||
1856
(opcode == ISCSI_OP_SCSI_CMD &&
1857
(scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
1858
/* data could goes into skb head */
1859
headroom += min_t(unsigned int,
1860
SKB_MAX_HEAD(cdev->skb_tx_rsvd),
1861
conn->max_xmit_dlength);
1863
tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
1865
pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n",
1866
cdev->skb_tx_rsvd, headroom, opcode);
1870
skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
1871
task->hdr = (struct iscsi_hdr *)tdata->skb->data;
1872
task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
1874
/* data_out uses scsi_cmd's itt */
1875
if (opcode != ISCSI_OP_SCSI_DATA_OUT)
1876
task_reserve_itt(task, &task->hdr->itt);
1878
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1879
"task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
1880
task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom,
1881
conn->max_xmit_dlength, ntohl(task->hdr->itt));
1885
EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
1887
static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
1896
cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
1898
cxgbi_skcb_ulp_mode(skb) = 0;
1901
int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
1904
struct iscsi_conn *conn = task->conn;
1905
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1906
struct sk_buff *skb = tdata->skb;
1907
unsigned int datalen = count;
1908
int i, padlen = iscsi_padding(count);
1911
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1912
"task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
1913
task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
1914
ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count);
1916
skb_put(skb, task->hdr_len);
1917
tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
1922
struct scsi_data_buffer *sdb = scsi_out(task->sc);
1923
struct scatterlist *sg = NULL;
1926
tdata->offset = offset;
1927
tdata->count = count;
1928
err = sgl_seek_offset(
1929
sdb->table.sgl, sdb->table.nents,
1930
tdata->offset, &tdata->sgoffset, &sg);
1932
pr_warn("tpdu, sgl %u, bad offset %u/%u.\n",
1933
sdb->table.nents, tdata->offset, sdb->length);
1936
err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
1937
tdata->frags, MAX_PDU_FRAGS);
1939
pr_warn("tpdu, sgl %u, bad offset %u + %u.\n",
1940
sdb->table.nents, tdata->offset, tdata->count);
1943
tdata->nr_frags = err;
1945
if (tdata->nr_frags > MAX_SKB_FRAGS ||
1946
(padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
1947
char *dst = skb->data + task->hdr_len;
1948
struct page_frag *frag = tdata->frags;
1950
/* data fits in the skb's headroom */
1951
for (i = 0; i < tdata->nr_frags; i++, frag++) {
1952
char *src = kmap_atomic(frag->page,
1955
memcpy(dst, src+frag->offset, frag->size);
1957
kunmap_atomic(src, KM_SOFTIRQ0);
1960
memset(dst, 0, padlen);
1963
skb_put(skb, count + padlen);
1965
/* data fit into frag_list */
1966
for (i = 0; i < tdata->nr_frags; i++) {
1967
__skb_fill_page_desc(skb, i,
1968
tdata->frags[i].page,
1969
tdata->frags[i].offset,
1970
tdata->frags[i].size);
1971
skb_frag_ref(skb, i);
1973
skb_shinfo(skb)->nr_frags = tdata->nr_frags;
1975
skb->data_len += count;
1976
skb->truesize += count;
1980
pg = virt_to_page(task->data);
1983
skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
1986
skb->data_len += count;
1987
skb->truesize += count;
1991
i = skb_shinfo(skb)->nr_frags;
1992
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1993
virt_to_page(padding), offset_in_page(padding),
1996
skb->data_len += padlen;
1997
skb->truesize += padlen;
2003
EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu);
2005
int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2007
struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
2008
struct cxgbi_conn *cconn = tcp_conn->dd_data;
2009
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2010
struct sk_buff *skb = tdata->skb;
2011
unsigned int datalen;
2015
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2016
"task 0x%p, skb NULL.\n", task);
2020
datalen = skb->data_len;
2022
err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
2026
log_debug(1 << CXGBI_DBG_PDU_TX,
2027
"task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n",
2028
task, task->sc, skb, skb->len, skb->data_len, err);
2030
if (task->conn->hdrdgst_en)
2031
pdulen += ISCSI_DIGEST_SIZE;
2033
if (datalen && task->conn->datadgst_en)
2034
pdulen += ISCSI_DIGEST_SIZE;
2036
task->conn->txdata_octets += pdulen;
2040
if (err == -EAGAIN || err == -ENOBUFS) {
2041
log_debug(1 << CXGBI_DBG_PDU_TX,
2042
"task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2043
task, skb, skb->len, skb->data_len, err);
2044
/* reset skb to send when we are called again */
2050
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2051
"itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2052
task->itt, skb, skb->len, skb->data_len, err);
2053
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
2054
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
2057
EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu);
2059
void cxgbi_cleanup_task(struct iscsi_task *task)
2061
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2063
log_debug(1 << CXGBI_DBG_ISCSI,
2064
"task 0x%p, skb 0x%p, itt 0x%x.\n",
2065
task, tdata->skb, task->hdr_itt);
2067
/* never reached the xmit task callout */
2069
__kfree_skb(tdata->skb);
2070
memset(tdata, 0, sizeof(*tdata));
2072
task_release_itt(task, task->hdr_itt);
2073
iscsi_tcp_cleanup_task(task);
2075
EXPORT_SYMBOL_GPL(cxgbi_cleanup_task);
2077
void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn,
2078
struct iscsi_stats *stats)
2080
struct iscsi_conn *conn = cls_conn->dd_data;
2082
stats->txdata_octets = conn->txdata_octets;
2083
stats->rxdata_octets = conn->rxdata_octets;
2084
stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
2085
stats->dataout_pdus = conn->dataout_pdus_cnt;
2086
stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
2087
stats->datain_pdus = conn->datain_pdus_cnt;
2088
stats->r2t_pdus = conn->r2t_pdus_cnt;
2089
stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
2090
stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
2091
stats->digest_err = 0;
2092
stats->timeout_err = 0;
2093
stats->custom_length = 1;
2094
strcpy(stats->custom[0].desc, "eh_abort_cnt");
2095
stats->custom[0].value = conn->eh_abort_cnt;
2097
EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats);
2099
static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn)
2101
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2102
struct cxgbi_conn *cconn = tcp_conn->dd_data;
2103
struct cxgbi_device *cdev = cconn->chba->cdev;
2104
unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd);
2105
unsigned int max_def = 512 * MAX_SKB_FRAGS;
2106
unsigned int max = max(max_def, headroom);
2108
max = min(cconn->chba->cdev->tx_max_size, max);
2109
if (conn->max_xmit_dlength)
2110
conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
2112
conn->max_xmit_dlength = max;
2113
cxgbi_align_pdu_size(conn->max_xmit_dlength);
2118
static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn)
2120
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2121
struct cxgbi_conn *cconn = tcp_conn->dd_data;
2122
unsigned int max = cconn->chba->cdev->rx_max_size;
2124
cxgbi_align_pdu_size(max);
2126
if (conn->max_recv_dlength) {
2127
if (conn->max_recv_dlength > max) {
2128
pr_err("MaxRecvDataSegmentLength %u > %u.\n",
2129
conn->max_recv_dlength, max);
2132
conn->max_recv_dlength = min(conn->max_recv_dlength, max);
2133
cxgbi_align_pdu_size(conn->max_recv_dlength);
2135
conn->max_recv_dlength = max;
2140
int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2141
enum iscsi_param param, char *buf, int buflen)
2143
struct iscsi_conn *conn = cls_conn->dd_data;
2144
struct iscsi_session *session = conn->session;
2145
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2146
struct cxgbi_conn *cconn = tcp_conn->dd_data;
2147
struct cxgbi_sock *csk = cconn->cep->csk;
2150
log_debug(1 << CXGBI_DBG_ISCSI,
2151
"cls_conn 0x%p, param %d, buf(%d) %s.\n",
2152
cls_conn, param, buflen, buf);
2155
case ISCSI_PARAM_HDRDGST_EN:
2156
err = iscsi_set_param(cls_conn, param, buf, buflen);
2157
if (!err && conn->hdrdgst_en)
2158
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2160
conn->datadgst_en, 0);
2162
case ISCSI_PARAM_DATADGST_EN:
2163
err = iscsi_set_param(cls_conn, param, buf, buflen);
2164
if (!err && conn->datadgst_en)
2165
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2167
conn->datadgst_en, 0);
2169
case ISCSI_PARAM_MAX_R2T:
2170
sscanf(buf, "%d", &value);
2171
if (value <= 0 || !is_power_of_2(value))
2173
if (session->max_r2t == value)
2175
iscsi_tcp_r2tpool_free(session);
2176
err = iscsi_set_param(cls_conn, param, buf, buflen);
2177
if (!err && iscsi_tcp_r2tpool_alloc(session))
2179
case ISCSI_PARAM_MAX_RECV_DLENGTH:
2180
err = iscsi_set_param(cls_conn, param, buf, buflen);
2182
err = cxgbi_conn_max_recv_dlength(conn);
2184
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2185
err = iscsi_set_param(cls_conn, param, buf, buflen);
2187
err = cxgbi_conn_max_xmit_dlength(conn);
2190
return iscsi_set_param(cls_conn, param, buf, buflen);
2194
EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
2196
int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
2199
struct cxgbi_endpoint *cep = ep->dd_data;
2200
struct cxgbi_sock *csk;
2203
log_debug(1 << CXGBI_DBG_ISCSI,
2204
"cls_conn 0x%p, param %d.\n", ep, param);
2207
case ISCSI_PARAM_CONN_PORT:
2208
case ISCSI_PARAM_CONN_ADDRESS:
2216
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
2217
&csk->daddr, param, buf);
2223
EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
2225
struct iscsi_cls_conn *
2226
cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
2228
struct iscsi_cls_conn *cls_conn;
2229
struct iscsi_conn *conn;
2230
struct iscsi_tcp_conn *tcp_conn;
2231
struct cxgbi_conn *cconn;
2233
cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
2237
conn = cls_conn->dd_data;
2238
tcp_conn = conn->dd_data;
2239
cconn = tcp_conn->dd_data;
2240
cconn->iconn = conn;
2242
log_debug(1 << CXGBI_DBG_ISCSI,
2243
"cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
2244
cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn);
2248
EXPORT_SYMBOL_GPL(cxgbi_create_conn);
2250
int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
2251
struct iscsi_cls_conn *cls_conn,
2252
u64 transport_eph, int is_leading)
2254
struct iscsi_conn *conn = cls_conn->dd_data;
2255
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2256
struct cxgbi_conn *cconn = tcp_conn->dd_data;
2257
struct iscsi_endpoint *ep;
2258
struct cxgbi_endpoint *cep;
2259
struct cxgbi_sock *csk;
2262
ep = iscsi_lookup_endpoint(transport_eph);
2266
/* setup ddp pagesize */
2269
err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0);
2273
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
2277
/* calculate the tag idx bits needed for this conn based on cmds_max */
2278
cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
2280
write_lock_bh(&csk->callback_lock);
2281
csk->user_data = conn;
2282
cconn->chba = cep->chba;
2285
write_unlock_bh(&csk->callback_lock);
2287
cxgbi_conn_max_xmit_dlength(conn);
2288
cxgbi_conn_max_recv_dlength(conn);
2290
log_debug(1 << CXGBI_DBG_ISCSI,
2291
"cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
2292
cls_session, cls_conn, ep, cconn, csk);
2293
/* init recv engine */
2294
iscsi_tcp_hdr_recv_prep(tcp_conn);
2298
EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
2300
struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep,
2301
u16 cmds_max, u16 qdepth,
2304
struct cxgbi_endpoint *cep;
2305
struct cxgbi_hba *chba;
2306
struct Scsi_Host *shost;
2307
struct iscsi_cls_session *cls_session;
2308
struct iscsi_session *session;
2311
pr_err("missing endpoint.\n");
2317
shost = chba->shost;
2319
BUG_ON(chba != iscsi_host_priv(shost));
2321
cls_session = iscsi_session_setup(chba->cdev->itp, shost,
2323
sizeof(struct iscsi_tcp_task) +
2324
sizeof(struct cxgbi_task_data),
2325
initial_cmdsn, ISCSI_MAX_TARGET);
2329
session = cls_session->dd_data;
2330
if (iscsi_tcp_r2tpool_alloc(session))
2331
goto remove_session;
2333
log_debug(1 << CXGBI_DBG_ISCSI,
2334
"ep 0x%p, cls sess 0x%p.\n", ep, cls_session);
2338
iscsi_session_teardown(cls_session);
2341
EXPORT_SYMBOL_GPL(cxgbi_create_session);
2343
void cxgbi_destroy_session(struct iscsi_cls_session *cls_session)
2345
log_debug(1 << CXGBI_DBG_ISCSI,
2346
"cls sess 0x%p.\n", cls_session);
2348
iscsi_tcp_r2tpool_free(cls_session->dd_data);
2349
iscsi_session_teardown(cls_session);
2351
EXPORT_SYMBOL_GPL(cxgbi_destroy_session);
2353
int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2354
char *buf, int buflen)
2356
struct cxgbi_hba *chba = iscsi_host_priv(shost);
2359
shost_printk(KERN_ERR, shost, "Could not get host param. "
2360
"netdev for host not set.\n");
2364
log_debug(1 << CXGBI_DBG_ISCSI,
2365
"shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
2366
shost, chba, chba->ndev->name, param, buflen, buf);
2369
case ISCSI_HOST_PARAM_IPADDRESS:
2371
__be32 addr = in_aton(buf);
2372
log_debug(1 << CXGBI_DBG_ISCSI,
2373
"hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr);
2374
cxgbi_set_iscsi_ipv4(chba, addr);
2377
case ISCSI_HOST_PARAM_HWADDRESS:
2378
case ISCSI_HOST_PARAM_NETDEV_NAME:
2381
return iscsi_host_set_param(shost, param, buf, buflen);
2384
EXPORT_SYMBOL_GPL(cxgbi_set_host_param);
2386
int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2389
struct cxgbi_hba *chba = iscsi_host_priv(shost);
2393
shost_printk(KERN_ERR, shost, "Could not get host param. "
2394
"netdev for host not set.\n");
2398
log_debug(1 << CXGBI_DBG_ISCSI,
2399
"shost 0x%p, hba 0x%p,%s, param %d.\n",
2400
shost, chba, chba->ndev->name, param);
2403
case ISCSI_HOST_PARAM_HWADDRESS:
2404
len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6);
2406
case ISCSI_HOST_PARAM_NETDEV_NAME:
2407
len = sprintf(buf, "%s\n", chba->ndev->name);
2409
case ISCSI_HOST_PARAM_IPADDRESS:
2413
addr = cxgbi_get_iscsi_ipv4(chba);
2414
len = sprintf(buf, "%pI4", &addr);
2415
log_debug(1 << CXGBI_DBG_ISCSI,
2416
"hba %s, ipv4 %pI4.\n", chba->ndev->name, &addr);
2420
return iscsi_host_get_param(shost, param, buf);
2425
EXPORT_SYMBOL_GPL(cxgbi_get_host_param);
2427
struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
2428
struct sockaddr *dst_addr,
2431
struct iscsi_endpoint *ep;
2432
struct cxgbi_endpoint *cep;
2433
struct cxgbi_hba *hba = NULL;
2434
struct cxgbi_sock *csk;
2437
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2438
"shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
2439
shost, non_blocking, dst_addr);
2442
hba = iscsi_host_priv(shost);
2444
pr_info("shost 0x%p, priv NULL.\n", shost);
2449
csk = cxgbi_check_route(dst_addr);
2451
return (struct iscsi_endpoint *)csk;
2452
cxgbi_sock_get(csk);
2455
hba = csk->cdev->hbas[csk->port_id];
2456
else if (hba != csk->cdev->hbas[csk->port_id]) {
2457
pr_info("Could not connect through requested host %u"
2458
"hba 0x%p != 0x%p (%u).\n",
2459
shost->host_no, hba,
2460
csk->cdev->hbas[csk->port_id], csk->port_id);
2465
err = sock_get_port(csk);
2469
cxgbi_sock_set_state(csk, CTP_CONNECTING);
2470
err = csk->cdev->csk_init_act_open(csk);
2474
if (cxgbi_sock_is_closing(csk)) {
2476
pr_info("csk 0x%p is closing.\n", csk);
2480
ep = iscsi_create_endpoint(sizeof(*cep));
2483
pr_info("iscsi alloc ep, OOM.\n");
2491
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2492
"ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
2493
ep, cep, csk, hba, hba->ndev->name);
2497
cxgbi_sock_put(csk);
2498
cxgbi_sock_closed(csk);
2500
return ERR_PTR(err);
2502
EXPORT_SYMBOL_GPL(cxgbi_ep_connect);
2504
int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
2506
struct cxgbi_endpoint *cep = ep->dd_data;
2507
struct cxgbi_sock *csk = cep->csk;
2509
if (!cxgbi_sock_is_established(csk))
2513
EXPORT_SYMBOL_GPL(cxgbi_ep_poll);
2515
void cxgbi_ep_disconnect(struct iscsi_endpoint *ep)
2517
struct cxgbi_endpoint *cep = ep->dd_data;
2518
struct cxgbi_conn *cconn = cep->cconn;
2519
struct cxgbi_sock *csk = cep->csk;
2521
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2522
"ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
2523
ep, cep, cconn, csk, csk->state, csk->flags);
2525
if (cconn && cconn->iconn) {
2526
iscsi_suspend_tx(cconn->iconn);
2527
write_lock_bh(&csk->callback_lock);
2528
cep->csk->user_data = NULL;
2530
write_unlock_bh(&csk->callback_lock);
2532
iscsi_destroy_endpoint(ep);
2534
if (likely(csk->state >= CTP_ESTABLISHED))
2535
need_active_close(csk);
2537
cxgbi_sock_closed(csk);
2539
cxgbi_sock_put(csk);
2541
EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect);
2543
int cxgbi_iscsi_init(struct iscsi_transport *itp,
2544
struct scsi_transport_template **stt)
2546
*stt = iscsi_register_transport(itp);
2548
pr_err("unable to register %s transport 0x%p.\n",
2552
log_debug(1 << CXGBI_DBG_ISCSI,
2553
"%s, registered iscsi transport 0x%p.\n",
2557
EXPORT_SYMBOL_GPL(cxgbi_iscsi_init);
2559
void cxgbi_iscsi_cleanup(struct iscsi_transport *itp,
2560
struct scsi_transport_template **stt)
2563
log_debug(1 << CXGBI_DBG_ISCSI,
2564
"de-register transport 0x%p, %s, stt 0x%p.\n",
2565
itp, itp->name, *stt);
2567
iscsi_unregister_transport(itp);
2570
EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
2572
mode_t cxgbi_attr_is_visible(int param_type, int param)
2574
switch (param_type) {
2575
case ISCSI_HOST_PARAM:
2577
case ISCSI_HOST_PARAM_NETDEV_NAME:
2578
case ISCSI_HOST_PARAM_HWADDRESS:
2579
case ISCSI_HOST_PARAM_IPADDRESS:
2580
case ISCSI_HOST_PARAM_INITIATOR_NAME:
2587
case ISCSI_PARAM_MAX_RECV_DLENGTH:
2588
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2589
case ISCSI_PARAM_HDRDGST_EN:
2590
case ISCSI_PARAM_DATADGST_EN:
2591
case ISCSI_PARAM_CONN_ADDRESS:
2592
case ISCSI_PARAM_CONN_PORT:
2593
case ISCSI_PARAM_EXP_STATSN:
2594
case ISCSI_PARAM_PERSISTENT_ADDRESS:
2595
case ISCSI_PARAM_PERSISTENT_PORT:
2596
case ISCSI_PARAM_PING_TMO:
2597
case ISCSI_PARAM_RECV_TMO:
2598
case ISCSI_PARAM_INITIAL_R2T_EN:
2599
case ISCSI_PARAM_MAX_R2T:
2600
case ISCSI_PARAM_IMM_DATA_EN:
2601
case ISCSI_PARAM_FIRST_BURST:
2602
case ISCSI_PARAM_MAX_BURST:
2603
case ISCSI_PARAM_PDU_INORDER_EN:
2604
case ISCSI_PARAM_DATASEQ_INORDER_EN:
2605
case ISCSI_PARAM_ERL:
2606
case ISCSI_PARAM_TARGET_NAME:
2607
case ISCSI_PARAM_TPGT:
2608
case ISCSI_PARAM_USERNAME:
2609
case ISCSI_PARAM_PASSWORD:
2610
case ISCSI_PARAM_USERNAME_IN:
2611
case ISCSI_PARAM_PASSWORD_IN:
2612
case ISCSI_PARAM_FAST_ABORT:
2613
case ISCSI_PARAM_ABORT_TMO:
2614
case ISCSI_PARAM_LU_RESET_TMO:
2615
case ISCSI_PARAM_TGT_RESET_TMO:
2616
case ISCSI_PARAM_IFACE_NAME:
2617
case ISCSI_PARAM_INITIATOR_NAME:
2626
EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
2628
static int __init libcxgbi_init_module(void)
2630
sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
2631
sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
2633
pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
2634
ISCSI_ITT_MASK, sw_tag_idx_bits,
2635
ISCSI_AGE_MASK, sw_tag_age_bits);
2637
ddp_setup_host_page_size();
2641
static void __exit libcxgbi_exit_module(void)
2643
cxgbi_device_unregister_all(0xFF);
2647
module_init(libcxgbi_init_module);
2648
module_exit(libcxgbi_exit_module);