101
102
module_param(cong_flavor, uint, 0644);
102
103
MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
104
static void process_work(struct work_struct *work);
105
105
static struct workqueue_struct *workq;
106
static DECLARE_WORK(skb_work, process_work);
108
107
static struct sk_buff_head rxq;
109
static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
111
109
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
112
110
static void ep_timeout(unsigned long arg);
301
299
put_ep(&ep->com);
304
static void process_work(struct work_struct *work)
306
struct sk_buff *skb = NULL;
311
while ((skb = skb_dequeue(&rxq))) {
312
ep = *((void **) (skb->cb));
313
tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
314
ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
315
if (ret & CPL_RET_BUF_DONE)
319
* ep was referenced in sched(), and is freed here.
321
put_ep((struct iwch_ep_common *)ep);
325
302
static int status2errno(int status)
327
304
switch (status) {
486
463
V_MSS_IDX(mtu_idx) |
487
464
V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
488
465
opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
489
opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
466
opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
467
V_CONG_CONTROL_FLAVOR(cong_flavor);
490
468
skb->priority = CPL_PRIORITY_SETUP;
491
469
set_arp_failure_handler(skb, act_open_req_arp_failure);
1115
1093
PDBG("%s ep %p credits %u\n", __func__, ep, credits);
1117
1095
if (credits == 0) {
1118
PDBG(KERN_ERR "%s 0 credit ack ep %p state %u\n",
1119
__func__, ep, state_read(&ep->com));
1096
PDBG("%s 0 credit ack ep %p state %u\n",
1097
__func__, ep, state_read(&ep->com));
1120
1098
return CPL_RET_BUF_DONE;
1303
1281
V_MSS_IDX(mtu_idx) |
1304
1282
V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1305
1283
opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1306
opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
1284
opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
1285
V_CONG_CONTROL_FLAVOR(cong_flavor);
1308
1287
rpl = cplhdr(skb);
1309
1288
rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1371
1350
tim.mac_addr = req->dst_mac;
1372
1351
tim.vlan_tag = ntohs(req->vlan_tag);
1373
1352
if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
1375
"%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
1353
printk(KERN_ERR "%s bad dst mac %pM\n",
1354
__func__, req->dst_mac);
2165
2137
* All the CM events are handled on a work queue to have a safe context.
2138
* These are the real handlers that are called from the work queue.
2140
static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
2141
[CPL_ACT_ESTABLISH] = act_establish,
2142
[CPL_ACT_OPEN_RPL] = act_open_rpl,
2143
[CPL_RX_DATA] = rx_data,
2144
[CPL_TX_DMA_ACK] = tx_ack,
2145
[CPL_ABORT_RPL_RSS] = abort_rpl,
2146
[CPL_ABORT_RPL] = abort_rpl,
2147
[CPL_PASS_OPEN_RPL] = pass_open_rpl,
2148
[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2149
[CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2150
[CPL_PASS_ESTABLISH] = pass_establish,
2151
[CPL_PEER_CLOSE] = peer_close,
2152
[CPL_ABORT_REQ_RSS] = peer_abort,
2153
[CPL_CLOSE_CON_RPL] = close_con_rpl,
2154
[CPL_RDMA_TERMINATE] = terminate,
2155
[CPL_RDMA_EC_STATUS] = ec_status,
2158
static void process_work(struct work_struct *work)
2160
struct sk_buff *skb = NULL;
2162
struct t3cdev *tdev;
2165
while ((skb = skb_dequeue(&rxq))) {
2166
ep = *((void **) (skb->cb));
2167
tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
2168
ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
2169
if (ret & CPL_RET_BUF_DONE)
2173
* ep was referenced in sched(), and is freed here.
2175
put_ep((struct iwch_ep_common *)ep);
2179
static DECLARE_WORK(skb_work, process_work);
2167
2181
static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2169
2183
struct iwch_ep_common *epc = ctx;
2195
2209
return CPL_RET_BUF_DONE;
2213
* All upcalls from the T3 Core go to sched() to schedule the
2214
* processing on a work queue.
2216
cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
2217
[CPL_ACT_ESTABLISH] = sched,
2218
[CPL_ACT_OPEN_RPL] = sched,
2219
[CPL_RX_DATA] = sched,
2220
[CPL_TX_DMA_ACK] = sched,
2221
[CPL_ABORT_RPL_RSS] = sched,
2222
[CPL_ABORT_RPL] = sched,
2223
[CPL_PASS_OPEN_RPL] = sched,
2224
[CPL_CLOSE_LISTSRV_RPL] = sched,
2225
[CPL_PASS_ACCEPT_REQ] = sched,
2226
[CPL_PASS_ESTABLISH] = sched,
2227
[CPL_PEER_CLOSE] = sched,
2228
[CPL_CLOSE_CON_RPL] = sched,
2229
[CPL_ABORT_REQ_RSS] = sched,
2230
[CPL_RDMA_TERMINATE] = sched,
2231
[CPL_RDMA_EC_STATUS] = sched,
2232
[CPL_SET_TCB_RPL] = set_tcb_rpl,
2198
2235
int __init iwch_cm_init(void)
2200
2237
skb_queue_head_init(&rxq);
2204
2241
return -ENOMEM;
2207
* All upcalls from the T3 Core go to sched() to
2208
* schedule the processing on a work queue.
2210
t3c_handlers[CPL_ACT_ESTABLISH] = sched;
2211
t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
2212
t3c_handlers[CPL_RX_DATA] = sched;
2213
t3c_handlers[CPL_TX_DMA_ACK] = sched;
2214
t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
2215
t3c_handlers[CPL_ABORT_RPL] = sched;
2216
t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
2217
t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2218
t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2219
t3c_handlers[CPL_PASS_ESTABLISH] = sched;
2220
t3c_handlers[CPL_PEER_CLOSE] = sched;
2221
t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
2222
t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
2223
t3c_handlers[CPL_RDMA_TERMINATE] = sched;
2224
t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
2225
t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
2228
* These are the real handlers that are called from a
2231
work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2232
work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2233
work_handlers[CPL_RX_DATA] = rx_data;
2234
work_handlers[CPL_TX_DMA_ACK] = tx_ack;
2235
work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2236
work_handlers[CPL_ABORT_RPL] = abort_rpl;
2237
work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2238
work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2239
work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2240
work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2241
work_handlers[CPL_PEER_CLOSE] = peer_close;
2242
work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2243
work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2244
work_handlers[CPL_RDMA_TERMINATE] = terminate;
2245
work_handlers[CPL_RDMA_EC_STATUS] = ec_status;