2
* linux/drivers/net/ehea/ehea_main.c
4
* eHEA ethernet device driver for IBM eServer System p
6
* (C) Copyright IBM Corp. 2006
9
* Christoph Raisch <raisch@de.ibm.com>
10
* Jan-Bernd Themann <themann@de.ibm.com>
11
* Thomas Klein <tklein@de.ibm.com>
14
* This program is free software; you can redistribute it and/or modify
15
* it under the terms of the GNU General Public License as published by
16
* the Free Software Foundation; either version 2, or (at your option)
19
* This program is distributed in the hope that it will be useful,
20
* but WITHOUT ANY WARRANTY; without even the implied warranty of
21
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22
* GNU General Public License for more details.
24
* You should have received a copy of the GNU General Public License
25
* along with this program; if not, write to the Free Software
26
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
#include <linux/tcp.h>
34
#include <linux/udp.h>
36
#include <linux/list.h>
37
#include <linux/slab.h>
38
#include <linux/if_ether.h>
39
#include <linux/notifier.h>
40
#include <linux/reboot.h>
41
#include <linux/memory.h>
42
#include <asm/kexec.h>
43
#include <linux/mutex.h>
44
#include <linux/prefetch.h>
50
#include "ehea_phyp.h"
53
MODULE_LICENSE("GPL");
54
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
55
MODULE_DESCRIPTION("IBM eServer HEA Driver");
56
MODULE_VERSION(DRV_VERSION);
59
static int msg_level = -1;
60
static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
61
static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
62
static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
63
static int sq_entries = EHEA_DEF_ENTRIES_SQ;
64
static int use_mcs = 1;
65
static int prop_carrier_state;
67
module_param(msg_level, int, 0);
68
module_param(rq1_entries, int, 0);
69
module_param(rq2_entries, int, 0);
70
module_param(rq3_entries, int, 0);
71
module_param(sq_entries, int, 0);
72
module_param(prop_carrier_state, int, 0);
73
module_param(use_mcs, int, 0);
75
MODULE_PARM_DESC(msg_level, "msg_level");
76
MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
77
"port to stack. 1:yes, 0:no. Default = 0 ");
78
MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
79
"[2^x - 1], x = [6..14]. Default = "
80
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
81
MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
82
"[2^x - 1], x = [6..14]. Default = "
83
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
84
MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
85
"[2^x - 1], x = [6..14]. Default = "
86
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
87
MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
88
"[2^x - 1], x = [6..14]. Default = "
89
__MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
90
MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
93
static int port_name_cnt;
94
static LIST_HEAD(adapter_list);
95
static unsigned long ehea_driver_flags;
96
static DEFINE_MUTEX(dlpar_mem_lock);
97
struct ehea_fw_handle_array ehea_fw_handles;
98
struct ehea_bcmc_reg_array ehea_bcmc_regs;
101
static int __devinit ehea_probe_adapter(struct platform_device *dev,
102
const struct of_device_id *id);
104
static int __devexit ehea_remove(struct platform_device *dev);
106
static struct of_device_id ehea_device_table[] = {
109
.compatible = "IBM,lhea",
113
MODULE_DEVICE_TABLE(of, ehea_device_table);
115
static struct of_platform_driver ehea_driver = {
118
.owner = THIS_MODULE,
119
.of_match_table = ehea_device_table,
121
.probe = ehea_probe_adapter,
122
.remove = ehea_remove,
125
void ehea_dump(void *adr, int len, char *msg)
128
unsigned char *deb = adr;
129
for (x = 0; x < len; x += 16) {
130
pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
131
msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
136
void ehea_schedule_port_reset(struct ehea_port *port)
138
if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
139
schedule_work(&port->reset_task);
142
static void ehea_update_firmware_handles(void)
144
struct ehea_fw_handle_entry *arr = NULL;
145
struct ehea_adapter *adapter;
146
int num_adapters = 0;
150
int num_fw_handles, k, l;
152
/* Determine number of handles */
153
mutex_lock(&ehea_fw_handles.lock);
155
list_for_each_entry(adapter, &adapter_list, list) {
158
for (k = 0; k < EHEA_MAX_PORTS; k++) {
159
struct ehea_port *port = adapter->port[k];
161
if (!port || (port->state != EHEA_PORT_UP))
165
num_portres += port->num_def_qps;
169
num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
170
num_ports * EHEA_NUM_PORT_FW_HANDLES +
171
num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
173
if (num_fw_handles) {
174
arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
176
goto out; /* Keep the existing array */
180
list_for_each_entry(adapter, &adapter_list, list) {
181
if (num_adapters == 0)
184
for (k = 0; k < EHEA_MAX_PORTS; k++) {
185
struct ehea_port *port = adapter->port[k];
187
if (!port || (port->state != EHEA_PORT_UP) ||
191
for (l = 0; l < port->num_def_qps; l++) {
192
struct ehea_port_res *pr = &port->port_res[l];
194
arr[i].adh = adapter->handle;
195
arr[i++].fwh = pr->qp->fw_handle;
196
arr[i].adh = adapter->handle;
197
arr[i++].fwh = pr->send_cq->fw_handle;
198
arr[i].adh = adapter->handle;
199
arr[i++].fwh = pr->recv_cq->fw_handle;
200
arr[i].adh = adapter->handle;
201
arr[i++].fwh = pr->eq->fw_handle;
202
arr[i].adh = adapter->handle;
203
arr[i++].fwh = pr->send_mr.handle;
204
arr[i].adh = adapter->handle;
205
arr[i++].fwh = pr->recv_mr.handle;
207
arr[i].adh = adapter->handle;
208
arr[i++].fwh = port->qp_eq->fw_handle;
212
arr[i].adh = adapter->handle;
213
arr[i++].fwh = adapter->neq->fw_handle;
215
if (adapter->mr.handle) {
216
arr[i].adh = adapter->handle;
217
arr[i++].fwh = adapter->mr.handle;
223
kfree(ehea_fw_handles.arr);
224
ehea_fw_handles.arr = arr;
225
ehea_fw_handles.num_entries = i;
227
mutex_unlock(&ehea_fw_handles.lock);
230
static void ehea_update_bcmc_registrations(void)
233
struct ehea_bcmc_reg_entry *arr = NULL;
234
struct ehea_adapter *adapter;
235
struct ehea_mc_list *mc_entry;
236
int num_registrations = 0;
240
spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
242
/* Determine number of registrations */
243
list_for_each_entry(adapter, &adapter_list, list)
244
for (k = 0; k < EHEA_MAX_PORTS; k++) {
245
struct ehea_port *port = adapter->port[k];
247
if (!port || (port->state != EHEA_PORT_UP))
250
num_registrations += 2; /* Broadcast registrations */
252
list_for_each_entry(mc_entry, &port->mc_list->list,list)
253
num_registrations += 2;
256
if (num_registrations) {
257
arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
259
goto out; /* Keep the existing array */
263
list_for_each_entry(adapter, &adapter_list, list) {
264
for (k = 0; k < EHEA_MAX_PORTS; k++) {
265
struct ehea_port *port = adapter->port[k];
267
if (!port || (port->state != EHEA_PORT_UP))
270
if (num_registrations == 0)
273
arr[i].adh = adapter->handle;
274
arr[i].port_id = port->logical_port_id;
275
arr[i].reg_type = EHEA_BCMC_BROADCAST |
277
arr[i++].macaddr = port->mac_addr;
279
arr[i].adh = adapter->handle;
280
arr[i].port_id = port->logical_port_id;
281
arr[i].reg_type = EHEA_BCMC_BROADCAST |
282
EHEA_BCMC_VLANID_ALL;
283
arr[i++].macaddr = port->mac_addr;
284
num_registrations -= 2;
286
list_for_each_entry(mc_entry,
287
&port->mc_list->list, list) {
288
if (num_registrations == 0)
291
arr[i].adh = adapter->handle;
292
arr[i].port_id = port->logical_port_id;
293
arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
294
EHEA_BCMC_MULTICAST |
296
arr[i++].macaddr = mc_entry->macaddr;
298
arr[i].adh = adapter->handle;
299
arr[i].port_id = port->logical_port_id;
300
arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
301
EHEA_BCMC_MULTICAST |
302
EHEA_BCMC_VLANID_ALL;
303
arr[i++].macaddr = mc_entry->macaddr;
304
num_registrations -= 2;
310
kfree(ehea_bcmc_regs.arr);
311
ehea_bcmc_regs.arr = arr;
312
ehea_bcmc_regs.num_entries = i;
314
spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
317
static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
318
struct rtnl_link_stats64 *stats)
320
struct ehea_port *port = netdev_priv(dev);
321
u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
324
for (i = 0; i < port->num_def_qps; i++) {
325
rx_packets += port->port_res[i].rx_packets;
326
rx_bytes += port->port_res[i].rx_bytes;
329
for (i = 0; i < port->num_def_qps; i++) {
330
tx_packets += port->port_res[i].tx_packets;
331
tx_bytes += port->port_res[i].tx_bytes;
334
stats->tx_packets = tx_packets;
335
stats->rx_bytes = rx_bytes;
336
stats->tx_bytes = tx_bytes;
337
stats->rx_packets = rx_packets;
342
static void ehea_update_stats(struct work_struct *work)
344
struct ehea_port *port =
345
container_of(work, struct ehea_port, stats_work.work);
346
struct net_device *dev = port->netdev;
347
struct rtnl_link_stats64 *stats = &port->stats;
348
struct hcp_ehea_port_cb2 *cb2;
351
cb2 = (void *)get_zeroed_page(GFP_KERNEL);
353
netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
357
hret = ehea_h_query_ehea_port(port->adapter->handle,
358
port->logical_port_id,
359
H_PORT_CB2, H_PORT_CB2_ALL, cb2);
360
if (hret != H_SUCCESS) {
361
netdev_err(dev, "query_ehea_port failed\n");
365
if (netif_msg_hw(port))
366
ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
368
stats->multicast = cb2->rxmcp;
369
stats->rx_errors = cb2->rxuerr;
372
free_page((unsigned long)cb2);
374
schedule_delayed_work(&port->stats_work,
375
round_jiffies_relative(msecs_to_jiffies(1000)));
378
static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
380
struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
381
struct net_device *dev = pr->port->netdev;
382
int max_index_mask = pr->rq1_skba.len - 1;
383
int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
387
pr->rq1_skba.os_skbs = 0;
389
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
391
pr->rq1_skba.index = index;
392
pr->rq1_skba.os_skbs = fill_wqes;
396
for (i = 0; i < fill_wqes; i++) {
397
if (!skb_arr_rq1[index]) {
398
skb_arr_rq1[index] = netdev_alloc_skb(dev,
400
if (!skb_arr_rq1[index]) {
401
netdev_info(dev, "Unable to allocate enough skb in the array\n");
402
pr->rq1_skba.os_skbs = fill_wqes - i;
407
index &= max_index_mask;
415
ehea_update_rq1a(pr->qp, adder);
418
static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
420
struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
421
struct net_device *dev = pr->port->netdev;
424
if (nr_rq1a > pr->rq1_skba.len) {
425
netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
429
for (i = 0; i < nr_rq1a; i++) {
430
skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
431
if (!skb_arr_rq1[i]) {
432
netdev_info(dev, "Not enough memory to allocate skb array\n");
437
ehea_update_rq1a(pr->qp, i - 1);
440
static int ehea_refill_rq_def(struct ehea_port_res *pr,
441
struct ehea_q_skb_arr *q_skba, int rq_nr,
442
int num_wqes, int wqe_type, int packet_size)
444
struct net_device *dev = pr->port->netdev;
445
struct ehea_qp *qp = pr->qp;
446
struct sk_buff **skb_arr = q_skba->arr;
447
struct ehea_rwqe *rwqe;
448
int i, index, max_index_mask, fill_wqes;
452
fill_wqes = q_skba->os_skbs + num_wqes;
455
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
456
q_skba->os_skbs = fill_wqes;
460
index = q_skba->index;
461
max_index_mask = q_skba->len - 1;
462
for (i = 0; i < fill_wqes; i++) {
466
skb = netdev_alloc_skb_ip_align(dev, packet_size);
468
q_skba->os_skbs = fill_wqes - i;
469
if (q_skba->os_skbs == q_skba->len - 2) {
470
netdev_info(pr->port->netdev,
471
"rq%i ran dry - no mem for skb\n",
478
skb_arr[index] = skb;
479
tmp_addr = ehea_map_vaddr(skb->data);
480
if (tmp_addr == -1) {
482
q_skba->os_skbs = fill_wqes - i;
487
rwqe = ehea_get_next_rwqe(qp, rq_nr);
488
rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
489
| EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
490
rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
491
rwqe->sg_list[0].vaddr = tmp_addr;
492
rwqe->sg_list[0].len = packet_size;
493
rwqe->data_segments = 1;
496
index &= max_index_mask;
500
q_skba->index = index;
507
ehea_update_rq2a(pr->qp, adder);
509
ehea_update_rq3a(pr->qp, adder);
515
static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
517
return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
518
nr_of_wqes, EHEA_RWQE2_TYPE,
523
static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
525
return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
526
nr_of_wqes, EHEA_RWQE3_TYPE,
527
EHEA_MAX_PACKET_SIZE);
530
static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
532
*rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
533
if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
535
if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
536
(cqe->header_length == 0))
541
static inline void ehea_fill_skb(struct net_device *dev,
542
struct sk_buff *skb, struct ehea_cqe *cqe,
543
struct ehea_port_res *pr)
545
int length = cqe->num_bytes_transfered - 4; /*remove CRC */
547
skb_put(skb, length);
548
skb->protocol = eth_type_trans(skb, dev);
550
/* The packet was not an IPV4 packet so a complemented checksum was
551
calculated. The value is found in the Internet Checksum field. */
552
if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
553
skb->ip_summed = CHECKSUM_COMPLETE;
554
skb->csum = csum_unfold(~cqe->inet_checksum_value);
556
skb->ip_summed = CHECKSUM_UNNECESSARY;
558
skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
561
static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
563
struct ehea_cqe *cqe)
565
int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
576
prefetchw(pref + EHEA_CACHE_LINE);
578
pref = (skb_array[x]->data);
580
prefetch(pref + EHEA_CACHE_LINE);
581
prefetch(pref + EHEA_CACHE_LINE * 2);
582
prefetch(pref + EHEA_CACHE_LINE * 3);
585
skb = skb_array[skb_index];
586
skb_array[skb_index] = NULL;
590
static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
591
int arr_len, int wqe_index)
603
prefetchw(pref + EHEA_CACHE_LINE);
605
pref = (skb_array[x]->data);
607
prefetchw(pref + EHEA_CACHE_LINE);
610
skb = skb_array[wqe_index];
611
skb_array[wqe_index] = NULL;
615
static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
616
struct ehea_cqe *cqe, int *processed_rq2,
621
if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
622
pr->p_stats.err_tcp_cksum++;
623
if (cqe->status & EHEA_CQE_STAT_ERR_IP)
624
pr->p_stats.err_ip_cksum++;
625
if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
626
pr->p_stats.err_frame_crc++;
630
skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
632
} else if (rq == 3) {
634
skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
638
if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
639
if (netif_msg_rx_err(pr->port)) {
640
pr_err("Critical receive error for QP %d. Resetting port.\n",
641
pr->qp->init_attr.qp_nr);
642
ehea_dump(cqe, sizeof(*cqe), "CQE");
644
ehea_schedule_port_reset(pr->port);
651
static int ehea_proc_rwqes(struct net_device *dev,
652
struct ehea_port_res *pr,
655
struct ehea_port *port = pr->port;
656
struct ehea_qp *qp = pr->qp;
657
struct ehea_cqe *cqe;
659
struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
660
struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
661
struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
662
int skb_arr_rq1_len = pr->rq1_skba.len;
663
int skb_arr_rq2_len = pr->rq2_skba.len;
664
int skb_arr_rq3_len = pr->rq3_skba.len;
665
int processed, processed_rq1, processed_rq2, processed_rq3;
666
u64 processed_bytes = 0;
667
int wqe_index, last_wqe_index, rq, port_reset;
669
processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
672
cqe = ehea_poll_rq1(qp, &wqe_index);
673
while ((processed < budget) && cqe) {
677
if (netif_msg_rx_status(port))
678
ehea_dump(cqe, sizeof(*cqe), "CQE");
680
last_wqe_index = wqe_index;
682
if (!ehea_check_cqe(cqe, &rq)) {
685
skb = get_skb_by_index_ll(skb_arr_rq1,
688
if (unlikely(!skb)) {
689
netif_info(port, rx_err, dev,
690
"LL rq1: skb=NULL\n");
692
skb = netdev_alloc_skb(dev,
695
netdev_err(dev, "Not enough memory to allocate skb\n");
699
skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
700
cqe->num_bytes_transfered - 4);
701
ehea_fill_skb(dev, skb, cqe, pr);
702
} else if (rq == 2) {
704
skb = get_skb_by_index(skb_arr_rq2,
705
skb_arr_rq2_len, cqe);
706
if (unlikely(!skb)) {
707
netif_err(port, rx_err, dev,
711
ehea_fill_skb(dev, skb, cqe, pr);
715
skb = get_skb_by_index(skb_arr_rq3,
716
skb_arr_rq3_len, cqe);
717
if (unlikely(!skb)) {
718
netif_err(port, rx_err, dev,
722
ehea_fill_skb(dev, skb, cqe, pr);
726
processed_bytes += skb->len;
728
if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
729
__vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
731
napi_gro_receive(&pr->napi, skb);
733
pr->p_stats.poll_receive_errors++;
734
port_reset = ehea_treat_poll_error(pr, rq, cqe,
740
cqe = ehea_poll_rq1(qp, &wqe_index);
743
pr->rx_packets += processed;
744
pr->rx_bytes += processed_bytes;
746
ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
747
ehea_refill_rq2(pr, processed_rq2);
748
ehea_refill_rq3(pr, processed_rq3);
753
#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
755
static void reset_sq_restart_flag(struct ehea_port *port)
759
for (i = 0; i < port->num_def_qps; i++) {
760
struct ehea_port_res *pr = &port->port_res[i];
761
pr->sq_restart_flag = 0;
763
wake_up(&port->restart_wq);
766
static void check_sqs(struct ehea_port *port)
768
struct ehea_swqe *swqe;
772
for (i = 0; i < port->num_def_qps; i++) {
773
struct ehea_port_res *pr = &port->port_res[i];
776
swqe = ehea_get_swqe(pr->qp, &swqe_index);
777
memset(swqe, 0, SWQE_HEADER_SIZE);
778
atomic_dec(&pr->swqe_avail);
780
swqe->tx_control |= EHEA_SWQE_PURGE;
781
swqe->wr_id = SWQE_RESTART_CHECK;
782
swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
783
swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
784
swqe->immediate_data_length = 80;
786
ehea_post_swqe(pr->qp, swqe);
788
ret = wait_event_timeout(port->restart_wq,
789
pr->sq_restart_flag == 0,
790
msecs_to_jiffies(100));
793
pr_err("HW/SW queues out of sync\n");
794
ehea_schedule_port_reset(pr->port);
801
static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
804
struct ehea_cq *send_cq = pr->send_cq;
805
struct ehea_cqe *cqe;
806
int quota = my_quota;
810
struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
811
pr - &pr->port->port_res[0]);
813
cqe = ehea_poll_cq(send_cq);
814
while (cqe && (quota > 0)) {
815
ehea_inc_cq(send_cq);
820
if (cqe->wr_id == SWQE_RESTART_CHECK) {
821
pr->sq_restart_flag = 1;
826
if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
827
pr_err("Bad send completion status=0x%04X\n",
830
if (netif_msg_tx_err(pr->port))
831
ehea_dump(cqe, sizeof(*cqe), "Send CQE");
833
if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
834
pr_err("Resetting port\n");
835
ehea_schedule_port_reset(pr->port);
840
if (netif_msg_tx_done(pr->port))
841
ehea_dump(cqe, sizeof(*cqe), "CQE");
843
if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
844
== EHEA_SWQE2_TYPE)) {
846
index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
847
skb = pr->sq_skba.arr[index];
849
pr->sq_skba.arr[index] = NULL;
852
swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
855
cqe = ehea_poll_cq(send_cq);
858
ehea_update_feca(send_cq, cqe_counter);
859
atomic_add(swqe_av, &pr->swqe_avail);
861
if (unlikely(netif_tx_queue_stopped(txq) &&
862
(atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
863
__netif_tx_lock(txq, smp_processor_id());
864
if (netif_tx_queue_stopped(txq) &&
865
(atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
866
netif_tx_wake_queue(txq);
867
__netif_tx_unlock(txq);
870
wake_up(&pr->port->swqe_avail_wq);
875
#define EHEA_POLL_MAX_CQES 65535
877
static int ehea_poll(struct napi_struct *napi, int budget)
879
struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
881
struct net_device *dev = pr->port->netdev;
882
struct ehea_cqe *cqe;
883
struct ehea_cqe *cqe_skb = NULL;
887
cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
888
rx += ehea_proc_rwqes(dev, pr, budget - rx);
890
while (rx != budget) {
892
ehea_reset_cq_ep(pr->recv_cq);
893
ehea_reset_cq_ep(pr->send_cq);
894
ehea_reset_cq_n1(pr->recv_cq);
895
ehea_reset_cq_n1(pr->send_cq);
897
cqe = ehea_poll_rq1(pr->qp, &wqe_index);
898
cqe_skb = ehea_poll_cq(pr->send_cq);
900
if (!cqe && !cqe_skb)
903
if (!napi_reschedule(napi))
906
cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
907
rx += ehea_proc_rwqes(dev, pr, budget - rx);
913
#ifdef CONFIG_NET_POLL_CONTROLLER
914
static void ehea_netpoll(struct net_device *dev)
916
struct ehea_port *port = netdev_priv(dev);
919
for (i = 0; i < port->num_def_qps; i++)
920
napi_schedule(&port->port_res[i].napi);
924
static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
926
struct ehea_port_res *pr = param;
928
napi_schedule(&pr->napi);
933
static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
935
struct ehea_port *port = param;
936
struct ehea_eqe *eqe;
939
u64 resource_type, aer, aerr;
942
eqe = ehea_poll_eq(port->qp_eq);
945
qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
946
pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
947
eqe->entry, qp_token);
949
qp = port->port_res[qp_token].qp;
951
resource_type = ehea_error_data(port->adapter, qp->fw_handle,
954
if (resource_type == EHEA_AER_RESTYPE_QP) {
955
if ((aer & EHEA_AER_RESET_MASK) ||
956
(aerr & EHEA_AERR_RESET_MASK))
959
reset_port = 1; /* Reset in case of CQ or EQ error */
961
eqe = ehea_poll_eq(port->qp_eq);
965
pr_err("Resetting port\n");
966
ehea_schedule_port_reset(port);
972
static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
977
for (i = 0; i < EHEA_MAX_PORTS; i++)
978
if (adapter->port[i])
979
if (adapter->port[i]->logical_port_id == logical_port)
980
return adapter->port[i];
984
int ehea_sense_port_attr(struct ehea_port *port)
988
struct hcp_ehea_port_cb0 *cb0;
990
/* may be called via ehea_neq_tasklet() */
991
cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
993
pr_err("no mem for cb0\n");
998
hret = ehea_h_query_ehea_port(port->adapter->handle,
999
port->logical_port_id, H_PORT_CB0,
1000
EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
1002
if (hret != H_SUCCESS) {
1008
port->mac_addr = cb0->port_mac_addr << 16;
1010
if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
1011
ret = -EADDRNOTAVAIL;
1016
switch (cb0->port_speed) {
1018
port->port_speed = EHEA_SPEED_10M;
1019
port->full_duplex = 0;
1022
port->port_speed = EHEA_SPEED_10M;
1023
port->full_duplex = 1;
1025
case H_SPEED_100M_H:
1026
port->port_speed = EHEA_SPEED_100M;
1027
port->full_duplex = 0;
1029
case H_SPEED_100M_F:
1030
port->port_speed = EHEA_SPEED_100M;
1031
port->full_duplex = 1;
1034
port->port_speed = EHEA_SPEED_1G;
1035
port->full_duplex = 1;
1038
port->port_speed = EHEA_SPEED_10G;
1039
port->full_duplex = 1;
1042
port->port_speed = 0;
1043
port->full_duplex = 0;
1048
port->num_mcs = cb0->num_default_qps;
1050
/* Number of default QPs */
1052
port->num_def_qps = cb0->num_default_qps;
1054
port->num_def_qps = 1;
1056
if (!port->num_def_qps) {
1063
if (ret || netif_msg_probe(port))
1064
ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1065
free_page((unsigned long)cb0);
1070
int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1072
struct hcp_ehea_port_cb4 *cb4;
1076
cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1078
pr_err("no mem for cb4\n");
1083
cb4->port_speed = port_speed;
1085
netif_carrier_off(port->netdev);
1087
hret = ehea_h_modify_ehea_port(port->adapter->handle,
1088
port->logical_port_id,
1089
H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1090
if (hret == H_SUCCESS) {
1091
port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1093
hret = ehea_h_query_ehea_port(port->adapter->handle,
1094
port->logical_port_id,
1095
H_PORT_CB4, H_PORT_CB4_SPEED,
1097
if (hret == H_SUCCESS) {
1098
switch (cb4->port_speed) {
1100
port->port_speed = EHEA_SPEED_10M;
1101
port->full_duplex = 0;
1104
port->port_speed = EHEA_SPEED_10M;
1105
port->full_duplex = 1;
1107
case H_SPEED_100M_H:
1108
port->port_speed = EHEA_SPEED_100M;
1109
port->full_duplex = 0;
1111
case H_SPEED_100M_F:
1112
port->port_speed = EHEA_SPEED_100M;
1113
port->full_duplex = 1;
1116
port->port_speed = EHEA_SPEED_1G;
1117
port->full_duplex = 1;
1120
port->port_speed = EHEA_SPEED_10G;
1121
port->full_duplex = 1;
1124
port->port_speed = 0;
1125
port->full_duplex = 0;
1129
pr_err("Failed sensing port speed\n");
1133
if (hret == H_AUTHORITY) {
1134
pr_info("Hypervisor denied setting port speed\n");
1138
pr_err("Failed setting port speed\n");
1141
if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1142
netif_carrier_on(port->netdev);
1144
free_page((unsigned long)cb4);
1149
static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1154
struct ehea_port *port;
1155
struct net_device *dev;
1157
ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1158
portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1159
port = ehea_get_port(adapter, portnum);
1163
case EHEA_EC_PORTSTATE_CHG: /* port state change */
1166
netdev_err(dev, "unknown portnum %x\n", portnum);
1170
if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1171
if (!netif_carrier_ok(dev)) {
1172
ret = ehea_sense_port_attr(port);
1174
netdev_err(dev, "failed resensing port attributes\n");
1178
netif_info(port, link, dev,
1179
"Logical port up: %dMbps %s Duplex\n",
1181
port->full_duplex == 1 ?
1184
netif_carrier_on(dev);
1185
netif_wake_queue(dev);
1188
if (netif_carrier_ok(dev)) {
1189
netif_info(port, link, dev,
1190
"Logical port down\n");
1191
netif_carrier_off(dev);
1192
netif_tx_disable(dev);
1195
if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1196
port->phy_link = EHEA_PHY_LINK_UP;
1197
netif_info(port, link, dev,
1198
"Physical port up\n");
1199
if (prop_carrier_state)
1200
netif_carrier_on(dev);
1202
port->phy_link = EHEA_PHY_LINK_DOWN;
1203
netif_info(port, link, dev,
1204
"Physical port down\n");
1205
if (prop_carrier_state)
1206
netif_carrier_off(dev);
1209
if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1211
"External switch port is primary port\n");
1214
"External switch port is backup port\n");
1217
case EHEA_EC_ADAPTER_MALFUNC:
1218
netdev_err(dev, "Adapter malfunction\n");
1220
case EHEA_EC_PORT_MALFUNC:
1221
netdev_info(dev, "Port malfunction\n");
1222
netif_carrier_off(dev);
1223
netif_tx_disable(dev);
1226
netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1231
static void ehea_neq_tasklet(unsigned long data)
1233
struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1234
struct ehea_eqe *eqe;
1237
eqe = ehea_poll_eq(adapter->neq);
1238
pr_debug("eqe=%p\n", eqe);
1241
pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1242
ehea_parse_eqe(adapter, eqe->entry);
1243
eqe = ehea_poll_eq(adapter->neq);
1244
pr_debug("next eqe=%p\n", eqe);
1247
event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1248
| EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1249
| EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1251
ehea_h_reset_events(adapter->handle,
1252
adapter->neq->fw_handle, event_mask);
1255
static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1257
struct ehea_adapter *adapter = param;
1258
tasklet_hi_schedule(&adapter->neq_tasklet);
1263
static int ehea_fill_port_res(struct ehea_port_res *pr)
1266
struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1268
ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1270
ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1272
ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1277
static int ehea_reg_interrupts(struct net_device *dev)
1279
struct ehea_port *port = netdev_priv(dev);
1280
struct ehea_port_res *pr;
1284
snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1287
ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1288
ehea_qp_aff_irq_handler,
1289
IRQF_DISABLED, port->int_aff_name, port);
1291
netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1292
port->qp_eq->attr.ist1);
1296
netif_info(port, ifup, dev,
1297
"irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1298
port->qp_eq->attr.ist1);
1301
for (i = 0; i < port->num_def_qps; i++) {
1302
pr = &port->port_res[i];
1303
snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1304
"%s-queue%d", dev->name, i);
1305
ret = ibmebus_request_irq(pr->eq->attr.ist1,
1306
ehea_recv_irq_handler,
1307
IRQF_DISABLED, pr->int_send_name,
1310
netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1311
i, pr->eq->attr.ist1);
1314
netif_info(port, ifup, dev,
1315
"irq_handle 0x%X for function ehea_queue_int %d registered\n",
1316
pr->eq->attr.ist1, i);
1324
u32 ist = port->port_res[i].eq->attr.ist1;
1325
ibmebus_free_irq(ist, &port->port_res[i]);
1329
ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1330
i = port->num_def_qps;
1336
static void ehea_free_interrupts(struct net_device *dev)
1338
struct ehea_port *port = netdev_priv(dev);
1339
struct ehea_port_res *pr;
1344
for (i = 0; i < port->num_def_qps; i++) {
1345
pr = &port->port_res[i];
1346
ibmebus_free_irq(pr->eq->attr.ist1, pr);
1347
netif_info(port, intr, dev,
1348
"free send irq for res %d with handle 0x%X\n",
1349
i, pr->eq->attr.ist1);
1352
/* associated events */
1353
ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1354
netif_info(port, intr, dev,
1355
"associated event interrupt for handle 0x%X freed\n",
1356
port->qp_eq->attr.ist1);
1359
static int ehea_configure_port(struct ehea_port *port)
1363
struct hcp_ehea_port_cb0 *cb0;
1366
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1370
cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1371
| EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1372
| EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1373
| EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1374
| EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1375
PXLY_RC_VLAN_FILTER)
1376
| EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1378
for (i = 0; i < port->num_mcs; i++)
1380
cb0->default_qpn_arr[i] =
1381
port->port_res[i].qp->init_attr.qp_nr;
1383
cb0->default_qpn_arr[i] =
1384
port->port_res[0].qp->init_attr.qp_nr;
1386
if (netif_msg_ifup(port))
1387
ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1389
mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1390
| EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1392
hret = ehea_h_modify_ehea_port(port->adapter->handle,
1393
port->logical_port_id,
1394
H_PORT_CB0, mask, cb0);
1396
if (hret != H_SUCCESS)
1402
free_page((unsigned long)cb0);
1407
int ehea_gen_smrs(struct ehea_port_res *pr)
1410
struct ehea_adapter *adapter = pr->port->adapter;
1412
ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1416
ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1423
ehea_rem_mr(&pr->send_mr);
1425
pr_err("Generating SMRS failed\n");
1429
int ehea_rem_smrs(struct ehea_port_res *pr)
1431
if ((ehea_rem_mr(&pr->send_mr)) ||
1432
(ehea_rem_mr(&pr->recv_mr)))
1438
static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1440
int arr_size = sizeof(void *) * max_q_entries;
1442
q_skba->arr = vzalloc(arr_size);
1446
q_skba->len = max_q_entries;
1448
q_skba->os_skbs = 0;
1453
static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1454
struct port_res_cfg *pr_cfg, int queue_token)
1456
struct ehea_adapter *adapter = port->adapter;
1457
enum ehea_eq_type eq_type = EHEA_EQ;
1458
struct ehea_qp_init_attr *init_attr = NULL;
1460
u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1462
tx_bytes = pr->tx_bytes;
1463
tx_packets = pr->tx_packets;
1464
rx_bytes = pr->rx_bytes;
1465
rx_packets = pr->rx_packets;
1467
memset(pr, 0, sizeof(struct ehea_port_res));
1469
pr->tx_bytes = rx_bytes;
1470
pr->tx_packets = tx_packets;
1471
pr->rx_bytes = rx_bytes;
1472
pr->rx_packets = rx_packets;
1476
pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1478
pr_err("create_eq failed (eq)\n");
1482
pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1484
port->logical_port_id);
1486
pr_err("create_cq failed (cq_recv)\n");
1490
pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1492
port->logical_port_id);
1494
pr_err("create_cq failed (cq_send)\n");
1498
if (netif_msg_ifup(port))
1499
pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1500
pr->send_cq->attr.act_nr_of_cqes,
1501
pr->recv_cq->attr.act_nr_of_cqes);
1503
init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1506
pr_err("no mem for ehea_qp_init_attr\n");
1510
init_attr->low_lat_rq1 = 1;
1511
init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1512
init_attr->rq_count = 3;
1513
init_attr->qp_token = queue_token;
1514
init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1515
init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1516
init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1517
init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1518
init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1519
init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1520
init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1521
init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1522
init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1523
init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1524
init_attr->port_nr = port->logical_port_id;
1525
init_attr->send_cq_handle = pr->send_cq->fw_handle;
1526
init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1527
init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1529
pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1531
pr_err("create_qp failed\n");
1536
if (netif_msg_ifup(port))
1537
pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1539
init_attr->act_nr_send_wqes,
1540
init_attr->act_nr_rwqes_rq1,
1541
init_attr->act_nr_rwqes_rq2,
1542
init_attr->act_nr_rwqes_rq3);
1544
pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1546
ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1547
ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1548
ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1549
ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1553
pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1554
if (ehea_gen_smrs(pr) != 0) {
1559
atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1563
netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1570
vfree(pr->sq_skba.arr);
1571
vfree(pr->rq1_skba.arr);
1572
vfree(pr->rq2_skba.arr);
1573
vfree(pr->rq3_skba.arr);
1574
ehea_destroy_qp(pr->qp);
1575
ehea_destroy_cq(pr->send_cq);
1576
ehea_destroy_cq(pr->recv_cq);
1577
ehea_destroy_eq(pr->eq);
1582
static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1587
netif_napi_del(&pr->napi);
1589
ret = ehea_destroy_qp(pr->qp);
1592
ehea_destroy_cq(pr->send_cq);
1593
ehea_destroy_cq(pr->recv_cq);
1594
ehea_destroy_eq(pr->eq);
1596
for (i = 0; i < pr->rq1_skba.len; i++)
1597
if (pr->rq1_skba.arr[i])
1598
dev_kfree_skb(pr->rq1_skba.arr[i]);
1600
for (i = 0; i < pr->rq2_skba.len; i++)
1601
if (pr->rq2_skba.arr[i])
1602
dev_kfree_skb(pr->rq2_skba.arr[i]);
1604
for (i = 0; i < pr->rq3_skba.len; i++)
1605
if (pr->rq3_skba.arr[i])
1606
dev_kfree_skb(pr->rq3_skba.arr[i]);
1608
for (i = 0; i < pr->sq_skba.len; i++)
1609
if (pr->sq_skba.arr[i])
1610
dev_kfree_skb(pr->sq_skba.arr[i]);
1612
vfree(pr->rq1_skba.arr);
1613
vfree(pr->rq2_skba.arr);
1614
vfree(pr->rq3_skba.arr);
1615
vfree(pr->sq_skba.arr);
1616
ret = ehea_rem_smrs(pr);
1621
static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1624
int skb_data_size = skb_headlen(skb);
1625
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1626
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1627
unsigned int immediate_len = SWQE2_MAX_IMM;
1629
swqe->descriptors = 0;
1631
if (skb_is_gso(skb)) {
1632
swqe->tx_control |= EHEA_SWQE_TSO;
1633
swqe->mss = skb_shinfo(skb)->gso_size;
1635
* For TSO packets we only copy the headers into the
1638
immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1641
if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1642
skb_copy_from_linear_data(skb, imm_data, immediate_len);
1643
swqe->immediate_data_length = immediate_len;
1645
if (skb_data_size > immediate_len) {
1646
sg1entry->l_key = lkey;
1647
sg1entry->len = skb_data_size - immediate_len;
1649
ehea_map_vaddr(skb->data + immediate_len);
1650
swqe->descriptors++;
1653
skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1654
swqe->immediate_data_length = skb_data_size;
1658
static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1659
struct ehea_swqe *swqe, u32 lkey)
1661
struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1663
int nfrags, sg1entry_contains_frag_data, i;
1665
nfrags = skb_shinfo(skb)->nr_frags;
1666
sg1entry = &swqe->u.immdata_desc.sg_entry;
1667
sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1668
sg1entry_contains_frag_data = 0;
1670
write_swqe2_immediate(skb, swqe, lkey);
1672
/* write descriptors */
1674
if (swqe->descriptors == 0) {
1675
/* sg1entry not yet used */
1676
frag = &skb_shinfo(skb)->frags[0];
1678
/* copy sg1entry data */
1679
sg1entry->l_key = lkey;
1680
sg1entry->len = skb_frag_size(frag);
1682
ehea_map_vaddr(skb_frag_address(frag));
1683
swqe->descriptors++;
1684
sg1entry_contains_frag_data = 1;
1687
for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1689
frag = &skb_shinfo(skb)->frags[i];
1690
sgentry = &sg_list[i - sg1entry_contains_frag_data];
1692
sgentry->l_key = lkey;
1693
sgentry->len = skb_frag_size(frag);
1694
sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1695
swqe->descriptors++;
1700
static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1706
/* De/Register untagged packets */
1707
reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1708
hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1709
port->logical_port_id,
1710
reg_type, port->mac_addr, 0, hcallid);
1711
if (hret != H_SUCCESS) {
1712
pr_err("%sregistering bc address failed (tagged)\n",
1713
hcallid == H_REG_BCMC ? "" : "de");
1718
/* De/Register VLAN packets */
1719
reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1720
hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1721
port->logical_port_id,
1722
reg_type, port->mac_addr, 0, hcallid);
1723
if (hret != H_SUCCESS) {
1724
pr_err("%sregistering bc address failed (vlan)\n",
1725
hcallid == H_REG_BCMC ? "" : "de");
1732
static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1734
struct ehea_port *port = netdev_priv(dev);
1735
struct sockaddr *mac_addr = sa;
1736
struct hcp_ehea_port_cb0 *cb0;
1740
if (!is_valid_ether_addr(mac_addr->sa_data)) {
1741
ret = -EADDRNOTAVAIL;
1745
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1747
pr_err("no mem for cb0\n");
1752
memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1754
cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1756
hret = ehea_h_modify_ehea_port(port->adapter->handle,
1757
port->logical_port_id, H_PORT_CB0,
1758
EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1759
if (hret != H_SUCCESS) {
1764
memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1766
/* Deregister old MAC in pHYP */
1767
if (port->state == EHEA_PORT_UP) {
1768
ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1773
port->mac_addr = cb0->port_mac_addr << 16;
1775
/* Register new MAC in pHYP */
1776
if (port->state == EHEA_PORT_UP) {
1777
ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1785
ehea_update_bcmc_registrations();
1787
free_page((unsigned long)cb0);
1792
static void ehea_promiscuous_error(u64 hret, int enable)
1794
if (hret == H_AUTHORITY)
1795
pr_info("Hypervisor denied %sabling promiscuous mode\n",
1796
enable == 1 ? "en" : "dis");
1798
pr_err("failed %sabling promiscuous mode\n",
1799
enable == 1 ? "en" : "dis");
1802
static void ehea_promiscuous(struct net_device *dev, int enable)
1804
struct ehea_port *port = netdev_priv(dev);
1805
struct hcp_ehea_port_cb7 *cb7;
1808
if (enable == port->promisc)
1811
cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1813
pr_err("no mem for cb7\n");
1817
/* Modify Pxs_DUCQPN in CB7 */
1818
cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1820
hret = ehea_h_modify_ehea_port(port->adapter->handle,
1821
port->logical_port_id,
1822
H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1824
ehea_promiscuous_error(hret, enable);
1828
port->promisc = enable;
1830
free_page((unsigned long)cb7);
1833
static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1839
reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1840
| EHEA_BCMC_UNTAGGED;
1842
hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1843
port->logical_port_id,
1844
reg_type, mc_mac_addr, 0, hcallid);
1848
reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1849
| EHEA_BCMC_VLANID_ALL;
1851
hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1852
port->logical_port_id,
1853
reg_type, mc_mac_addr, 0, hcallid);
1858
static int ehea_drop_multicast_list(struct net_device *dev)
1860
struct ehea_port *port = netdev_priv(dev);
1861
struct ehea_mc_list *mc_entry = port->mc_list;
1862
struct list_head *pos;
1863
struct list_head *temp;
1867
list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1868
mc_entry = list_entry(pos, struct ehea_mc_list, list);
1870
hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1873
pr_err("failed deregistering mcast MAC\n");
1883
static void ehea_allmulti(struct net_device *dev, int enable)
1885
struct ehea_port *port = netdev_priv(dev);
1888
if (!port->allmulti) {
1890
/* Enable ALLMULTI */
1891
ehea_drop_multicast_list(dev);
1892
hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1897
"failed enabling IFF_ALLMULTI\n");
1901
/* Disable ALLMULTI */
1902
hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1907
"failed disabling IFF_ALLMULTI\n");
1911
static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1913
struct ehea_mc_list *ehea_mcl_entry;
1916
ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1917
if (!ehea_mcl_entry) {
1918
pr_err("no mem for mcl_entry\n");
1922
INIT_LIST_HEAD(&ehea_mcl_entry->list);
1924
memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1926
hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1929
list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1931
pr_err("failed registering mcast MAC\n");
1932
kfree(ehea_mcl_entry);
1936
static void ehea_set_multicast_list(struct net_device *dev)
1938
struct ehea_port *port = netdev_priv(dev);
1939
struct netdev_hw_addr *ha;
1942
if (port->promisc) {
1943
ehea_promiscuous(dev, 1);
1946
ehea_promiscuous(dev, 0);
1948
if (dev->flags & IFF_ALLMULTI) {
1949
ehea_allmulti(dev, 1);
1952
ehea_allmulti(dev, 0);
1954
if (!netdev_mc_empty(dev)) {
1955
ret = ehea_drop_multicast_list(dev);
1957
/* Dropping the current multicast list failed.
1958
* Enabling ALL_MULTI is the best we can do.
1960
ehea_allmulti(dev, 1);
1963
if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
1964
pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
1965
port->adapter->max_mc_mac);
1969
netdev_for_each_mc_addr(ha, dev)
1970
ehea_add_multicast_entry(port, ha->addr);
1974
ehea_update_bcmc_registrations();
1977
static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1979
if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1985
static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1987
swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1989
if (skb->protocol != htons(ETH_P_IP))
1992
if (skb->ip_summed == CHECKSUM_PARTIAL)
1993
swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
1995
swqe->ip_start = skb_network_offset(skb);
1996
swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
1998
switch (ip_hdr(skb)->protocol) {
2000
if (skb->ip_summed == CHECKSUM_PARTIAL)
2001
swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2003
swqe->tcp_offset = swqe->ip_end + 1 +
2004
offsetof(struct udphdr, check);
2008
if (skb->ip_summed == CHECKSUM_PARTIAL)
2009
swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2011
swqe->tcp_offset = swqe->ip_end + 1 +
2012
offsetof(struct tcphdr, check);
2017
static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2018
struct ehea_swqe *swqe, u32 lkey)
2020
swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
2022
xmit_common(skb, swqe);
2024
write_swqe2_data(skb, dev, swqe, lkey);
2027
static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2028
struct ehea_swqe *swqe)
2030
u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2032
xmit_common(skb, swqe);
2035
skb_copy_from_linear_data(skb, imm_data, skb->len);
2037
skb_copy_bits(skb, 0, imm_data, skb->len);
2039
swqe->immediate_data_length = skb->len;
2043
static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2045
struct ehea_port *port = netdev_priv(dev);
2046
struct ehea_swqe *swqe;
2049
struct ehea_port_res *pr;
2050
struct netdev_queue *txq;
2052
pr = &port->port_res[skb_get_queue_mapping(skb)];
2053
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2055
swqe = ehea_get_swqe(pr->qp, &swqe_index);
2056
memset(swqe, 0, SWQE_HEADER_SIZE);
2057
atomic_dec(&pr->swqe_avail);
2059
if (vlan_tx_tag_present(skb)) {
2060
swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2061
swqe->vlan_tag = vlan_tx_tag_get(skb);
2065
pr->tx_bytes += skb->len;
2067
if (skb->len <= SWQE3_MAX_IMM) {
2068
u32 sig_iv = port->sig_comp_iv;
2069
u32 swqe_num = pr->swqe_id_counter;
2070
ehea_xmit3(skb, dev, swqe);
2071
swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2072
| EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2073
if (pr->swqe_ll_count >= (sig_iv - 1)) {
2074
swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2076
swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2077
pr->swqe_ll_count = 0;
2079
pr->swqe_ll_count += 1;
2082
EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2083
| EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2084
| EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2085
| EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2086
pr->sq_skba.arr[pr->sq_skba.index] = skb;
2088
pr->sq_skba.index++;
2089
pr->sq_skba.index &= (pr->sq_skba.len - 1);
2091
lkey = pr->send_mr.lkey;
2092
ehea_xmit2(skb, dev, swqe, lkey);
2093
swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2095
pr->swqe_id_counter += 1;
2097
netif_info(port, tx_queued, dev,
2098
"post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2099
if (netif_msg_tx_queued(port))
2100
ehea_dump(swqe, 512, "swqe");
2102
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2103
netif_tx_stop_queue(txq);
2104
swqe->tx_control |= EHEA_SWQE_PURGE;
2107
ehea_post_swqe(pr->qp, swqe);
2109
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2110
pr->p_stats.queue_stopped++;
2111
netif_tx_stop_queue(txq);
2114
return NETDEV_TX_OK;
2117
static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2119
struct ehea_port *port = netdev_priv(dev);
2120
struct ehea_adapter *adapter = port->adapter;
2121
struct hcp_ehea_port_cb1 *cb1;
2125
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2127
pr_err("no mem for cb1\n");
2131
hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2132
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2133
if (hret != H_SUCCESS) {
2134
pr_err("query_ehea_port failed\n");
2139
cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2141
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2142
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2143
if (hret != H_SUCCESS)
2144
pr_err("modify_ehea_port failed\n");
2146
free_page((unsigned long)cb1);
2150
static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2152
struct ehea_port *port = netdev_priv(dev);
2153
struct ehea_adapter *adapter = port->adapter;
2154
struct hcp_ehea_port_cb1 *cb1;
2158
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2160
pr_err("no mem for cb1\n");
2164
hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2165
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2166
if (hret != H_SUCCESS) {
2167
pr_err("query_ehea_port failed\n");
2172
cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2174
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2175
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2176
if (hret != H_SUCCESS)
2177
pr_err("modify_ehea_port failed\n");
2179
free_page((unsigned long)cb1);
2182
int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2188
struct hcp_modify_qp_cb0 *cb0;
2190
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2196
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2197
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2198
if (hret != H_SUCCESS) {
2199
pr_err("query_ehea_qp failed (1)\n");
2203
cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2204
hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2205
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2206
&dummy64, &dummy64, &dummy16, &dummy16);
2207
if (hret != H_SUCCESS) {
2208
pr_err("modify_ehea_qp failed (1)\n");
2212
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2213
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2214
if (hret != H_SUCCESS) {
2215
pr_err("query_ehea_qp failed (2)\n");
2219
cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2220
hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2221
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2222
&dummy64, &dummy64, &dummy16, &dummy16);
2223
if (hret != H_SUCCESS) {
2224
pr_err("modify_ehea_qp failed (2)\n");
2228
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2229
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2230
if (hret != H_SUCCESS) {
2231
pr_err("query_ehea_qp failed (3)\n");
2235
cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2236
hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2237
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2238
&dummy64, &dummy64, &dummy16, &dummy16);
2239
if (hret != H_SUCCESS) {
2240
pr_err("modify_ehea_qp failed (3)\n");
2244
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2245
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2246
if (hret != H_SUCCESS) {
2247
pr_err("query_ehea_qp failed (4)\n");
2253
free_page((unsigned long)cb0);
2257
static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
2260
struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2261
enum ehea_eq_type eq_type = EHEA_EQ;
2263
port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2264
EHEA_MAX_ENTRIES_EQ, 1);
2267
pr_err("ehea_create_eq failed (qp_eq)\n");
2271
pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2272
pr_cfg.max_entries_scq = sq_entries * 2;
2273
pr_cfg.max_entries_sq = sq_entries;
2274
pr_cfg.max_entries_rq1 = rq1_entries;
2275
pr_cfg.max_entries_rq2 = rq2_entries;
2276
pr_cfg.max_entries_rq3 = rq3_entries;
2278
pr_cfg_small_rx.max_entries_rcq = 1;
2279
pr_cfg_small_rx.max_entries_scq = sq_entries;
2280
pr_cfg_small_rx.max_entries_sq = sq_entries;
2281
pr_cfg_small_rx.max_entries_rq1 = 1;
2282
pr_cfg_small_rx.max_entries_rq2 = 1;
2283
pr_cfg_small_rx.max_entries_rq3 = 1;
2285
for (i = 0; i < def_qps; i++) {
2286
ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2290
for (i = def_qps; i < def_qps; i++) {
2291
ret = ehea_init_port_res(port, &port->port_res[i],
2292
&pr_cfg_small_rx, i);
2301
ehea_clean_portres(port, &port->port_res[i]);
2304
ehea_destroy_eq(port->qp_eq);
2308
static int ehea_clean_all_portres(struct ehea_port *port)
2313
for (i = 0; i < port->num_def_qps; i++)
2314
ret |= ehea_clean_portres(port, &port->port_res[i]);
2316
ret |= ehea_destroy_eq(port->qp_eq);
2321
static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2323
if (adapter->active_ports)
2326
ehea_rem_mr(&adapter->mr);
2329
static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2331
if (adapter->active_ports)
2334
return ehea_reg_kernel_mr(adapter, &adapter->mr);
2337
static int ehea_up(struct net_device *dev)
2340
struct ehea_port *port = netdev_priv(dev);
2342
if (port->state == EHEA_PORT_UP)
2345
ret = ehea_port_res_setup(port, port->num_def_qps);
2347
netdev_err(dev, "port_res_failed\n");
2351
/* Set default QP for this port */
2352
ret = ehea_configure_port(port);
2354
netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2358
ret = ehea_reg_interrupts(dev);
2360
netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2364
for (i = 0; i < port->num_def_qps; i++) {
2365
ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2367
netdev_err(dev, "activate_qp failed\n");
2372
for (i = 0; i < port->num_def_qps; i++) {
2373
ret = ehea_fill_port_res(&port->port_res[i]);
2375
netdev_err(dev, "out_free_irqs\n");
2380
ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2386
port->state = EHEA_PORT_UP;
2392
ehea_free_interrupts(dev);
2395
ehea_clean_all_portres(port);
2398
netdev_info(dev, "Failed starting. ret=%i\n", ret);
2400
ehea_update_bcmc_registrations();
2401
ehea_update_firmware_handles();
2406
static void port_napi_disable(struct ehea_port *port)
2410
for (i = 0; i < port->num_def_qps; i++)
2411
napi_disable(&port->port_res[i].napi);
2414
static void port_napi_enable(struct ehea_port *port)
2418
for (i = 0; i < port->num_def_qps; i++)
2419
napi_enable(&port->port_res[i].napi);
2422
static int ehea_open(struct net_device *dev)
2425
struct ehea_port *port = netdev_priv(dev);
2427
mutex_lock(&port->port_lock);
2429
netif_info(port, ifup, dev, "enabling port\n");
2433
port_napi_enable(port);
2434
netif_tx_start_all_queues(dev);
2437
mutex_unlock(&port->port_lock);
2438
schedule_delayed_work(&port->stats_work,
2439
round_jiffies_relative(msecs_to_jiffies(1000)));
2444
static int ehea_down(struct net_device *dev)
2447
struct ehea_port *port = netdev_priv(dev);
2449
if (port->state == EHEA_PORT_DOWN)
2452
ehea_drop_multicast_list(dev);
2453
ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2455
ehea_free_interrupts(dev);
2457
port->state = EHEA_PORT_DOWN;
2459
ehea_update_bcmc_registrations();
2461
ret = ehea_clean_all_portres(port);
2463
netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2465
ehea_update_firmware_handles();
2470
static int ehea_stop(struct net_device *dev)
2473
struct ehea_port *port = netdev_priv(dev);
2475
netif_info(port, ifdown, dev, "disabling port\n");
2477
set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2478
cancel_work_sync(&port->reset_task);
2479
cancel_delayed_work_sync(&port->stats_work);
2480
mutex_lock(&port->port_lock);
2481
netif_tx_stop_all_queues(dev);
2482
port_napi_disable(port);
2483
ret = ehea_down(dev);
2484
mutex_unlock(&port->port_lock);
2485
clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2489
static void ehea_purge_sq(struct ehea_qp *orig_qp)
2491
struct ehea_qp qp = *orig_qp;
2492
struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2493
struct ehea_swqe *swqe;
2497
for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2498
swqe = ehea_get_swqe(&qp, &wqe_index);
2499
swqe->tx_control |= EHEA_SWQE_PURGE;
2503
static void ehea_flush_sq(struct ehea_port *port)
2507
for (i = 0; i < port->num_def_qps; i++) {
2508
struct ehea_port_res *pr = &port->port_res[i];
2509
int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2512
ret = wait_event_timeout(port->swqe_avail_wq,
2513
atomic_read(&pr->swqe_avail) >= swqe_max,
2514
msecs_to_jiffies(100));
2517
pr_err("WARNING: sq not flushed completely\n");
2523
int ehea_stop_qps(struct net_device *dev)
2525
struct ehea_port *port = netdev_priv(dev);
2526
struct ehea_adapter *adapter = port->adapter;
2527
struct hcp_modify_qp_cb0 *cb0;
2535
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2541
for (i = 0; i < (port->num_def_qps); i++) {
2542
struct ehea_port_res *pr = &port->port_res[i];
2543
struct ehea_qp *qp = pr->qp;
2545
/* Purge send queue */
2548
/* Disable queue pair */
2549
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2550
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2552
if (hret != H_SUCCESS) {
2553
pr_err("query_ehea_qp failed (1)\n");
2557
cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2558
cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2560
hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2561
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2563
&dummy64, &dummy16, &dummy16);
2564
if (hret != H_SUCCESS) {
2565
pr_err("modify_ehea_qp failed (1)\n");
2569
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2570
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2572
if (hret != H_SUCCESS) {
2573
pr_err("query_ehea_qp failed (2)\n");
2577
/* deregister shared memory regions */
2578
dret = ehea_rem_smrs(pr);
2580
pr_err("unreg shared memory region failed\n");
2587
free_page((unsigned long)cb0);
2592
void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2594
struct ehea_qp qp = *orig_qp;
2595
struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2596
struct ehea_rwqe *rwqe;
2597
struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2598
struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2599
struct sk_buff *skb;
2600
u32 lkey = pr->recv_mr.lkey;
2606
for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2607
rwqe = ehea_get_next_rwqe(&qp, 2);
2608
rwqe->sg_list[0].l_key = lkey;
2609
index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2610
skb = skba_rq2[index];
2612
rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2615
for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2616
rwqe = ehea_get_next_rwqe(&qp, 3);
2617
rwqe->sg_list[0].l_key = lkey;
2618
index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2619
skb = skba_rq3[index];
2621
rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2625
int ehea_restart_qps(struct net_device *dev)
2627
struct ehea_port *port = netdev_priv(dev);
2628
struct ehea_adapter *adapter = port->adapter;
2632
struct hcp_modify_qp_cb0 *cb0;
2637
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2643
for (i = 0; i < (port->num_def_qps); i++) {
2644
struct ehea_port_res *pr = &port->port_res[i];
2645
struct ehea_qp *qp = pr->qp;
2647
ret = ehea_gen_smrs(pr);
2649
netdev_err(dev, "creation of shared memory regions failed\n");
2653
ehea_update_rqs(qp, pr);
2655
/* Enable queue pair */
2656
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2657
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2659
if (hret != H_SUCCESS) {
2660
netdev_err(dev, "query_ehea_qp failed (1)\n");
2664
cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2665
cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2667
hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2668
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2670
&dummy64, &dummy16, &dummy16);
2671
if (hret != H_SUCCESS) {
2672
netdev_err(dev, "modify_ehea_qp failed (1)\n");
2676
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2677
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2679
if (hret != H_SUCCESS) {
2680
netdev_err(dev, "query_ehea_qp failed (2)\n");
2684
/* refill entire queue */
2685
ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2686
ehea_refill_rq2(pr, 0);
2687
ehea_refill_rq3(pr, 0);
2690
free_page((unsigned long)cb0);
2695
static void ehea_reset_port(struct work_struct *work)
2698
struct ehea_port *port =
2699
container_of(work, struct ehea_port, reset_task);
2700
struct net_device *dev = port->netdev;
2702
mutex_lock(&dlpar_mem_lock);
2704
mutex_lock(&port->port_lock);
2705
netif_tx_disable(dev);
2707
port_napi_disable(port);
2715
ehea_set_multicast_list(dev);
2717
netif_info(port, timer, dev, "reset successful\n");
2719
port_napi_enable(port);
2721
netif_tx_wake_all_queues(dev);
2723
mutex_unlock(&port->port_lock);
2724
mutex_unlock(&dlpar_mem_lock);
2727
static void ehea_rereg_mrs(void)
2730
struct ehea_adapter *adapter;
2732
pr_info("LPAR memory changed - re-initializing driver\n");
2734
list_for_each_entry(adapter, &adapter_list, list)
2735
if (adapter->active_ports) {
2736
/* Shutdown all ports */
2737
for (i = 0; i < EHEA_MAX_PORTS; i++) {
2738
struct ehea_port *port = adapter->port[i];
2739
struct net_device *dev;
2746
if (dev->flags & IFF_UP) {
2747
mutex_lock(&port->port_lock);
2748
netif_tx_disable(dev);
2749
ehea_flush_sq(port);
2750
ret = ehea_stop_qps(dev);
2752
mutex_unlock(&port->port_lock);
2755
port_napi_disable(port);
2756
mutex_unlock(&port->port_lock);
2758
reset_sq_restart_flag(port);
2761
/* Unregister old memory region */
2762
ret = ehea_rem_mr(&adapter->mr);
2764
pr_err("unregister MR failed - driver inoperable!\n");
2769
clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2771
list_for_each_entry(adapter, &adapter_list, list)
2772
if (adapter->active_ports) {
2773
/* Register new memory region */
2774
ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2776
pr_err("register MR failed - driver inoperable!\n");
2780
/* Restart all ports */
2781
for (i = 0; i < EHEA_MAX_PORTS; i++) {
2782
struct ehea_port *port = adapter->port[i];
2785
struct net_device *dev = port->netdev;
2787
if (dev->flags & IFF_UP) {
2788
mutex_lock(&port->port_lock);
2789
ret = ehea_restart_qps(dev);
2792
port_napi_enable(port);
2793
netif_tx_wake_all_queues(dev);
2795
netdev_err(dev, "Unable to restart QPS\n");
2797
mutex_unlock(&port->port_lock);
2802
pr_info("re-initializing driver complete\n");
2807
static void ehea_tx_watchdog(struct net_device *dev)
2809
struct ehea_port *port = netdev_priv(dev);
2811
if (netif_carrier_ok(dev) &&
2812
!test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2813
ehea_schedule_port_reset(port);
2816
int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2818
struct hcp_query_ehea *cb;
2822
cb = (void *)get_zeroed_page(GFP_KERNEL);
2828
hret = ehea_h_query_ehea(adapter->handle, cb);
2830
if (hret != H_SUCCESS) {
2835
adapter->max_mc_mac = cb->max_mc_mac - 1;
2839
free_page((unsigned long)cb);
2844
int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2846
struct hcp_ehea_port_cb4 *cb4;
2852
/* (Try to) enable *jumbo frames */
2853
cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2855
pr_err("no mem for cb4\n");
2859
hret = ehea_h_query_ehea_port(port->adapter->handle,
2860
port->logical_port_id,
2862
H_PORT_CB4_JUMBO, cb4);
2863
if (hret == H_SUCCESS) {
2864
if (cb4->jumbo_frame)
2867
cb4->jumbo_frame = 1;
2868
hret = ehea_h_modify_ehea_port(port->adapter->
2875
if (hret == H_SUCCESS)
2881
free_page((unsigned long)cb4);
2887
static ssize_t ehea_show_port_id(struct device *dev,
2888
struct device_attribute *attr, char *buf)
2890
struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2891
return sprintf(buf, "%d", port->logical_port_id);
2894
static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2897
static void __devinit logical_port_release(struct device *dev)
2899
struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2900
of_node_put(port->ofdev.dev.of_node);
2903
static struct device *ehea_register_port(struct ehea_port *port,
2904
struct device_node *dn)
2908
port->ofdev.dev.of_node = of_node_get(dn);
2909
port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2910
port->ofdev.dev.bus = &ibmebus_bus_type;
2912
dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
2913
port->ofdev.dev.release = logical_port_release;
2915
ret = of_device_register(&port->ofdev);
2917
pr_err("failed to register device. ret=%d\n", ret);
2921
ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2923
pr_err("failed to register attributes, ret=%d\n", ret);
2924
goto out_unreg_of_dev;
2927
return &port->ofdev.dev;
2930
of_device_unregister(&port->ofdev);
2935
static void ehea_unregister_port(struct ehea_port *port)
2937
device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2938
of_device_unregister(&port->ofdev);
2941
static const struct net_device_ops ehea_netdev_ops = {
2942
.ndo_open = ehea_open,
2943
.ndo_stop = ehea_stop,
2944
.ndo_start_xmit = ehea_start_xmit,
2945
#ifdef CONFIG_NET_POLL_CONTROLLER
2946
.ndo_poll_controller = ehea_netpoll,
2948
.ndo_get_stats64 = ehea_get_stats64,
2949
.ndo_set_mac_address = ehea_set_mac_addr,
2950
.ndo_validate_addr = eth_validate_addr,
2951
.ndo_set_rx_mode = ehea_set_multicast_list,
2952
.ndo_change_mtu = ehea_change_mtu,
2953
.ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
2954
.ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
2955
.ndo_tx_timeout = ehea_tx_watchdog,
2958
struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2959
u32 logical_port_id,
2960
struct device_node *dn)
2963
struct net_device *dev;
2964
struct ehea_port *port;
2965
struct device *port_dev;
2968
/* allocate memory for the port structures */
2969
dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
2972
pr_err("no mem for net_device\n");
2977
port = netdev_priv(dev);
2979
mutex_init(&port->port_lock);
2980
port->state = EHEA_PORT_DOWN;
2981
port->sig_comp_iv = sq_entries / 10;
2983
port->adapter = adapter;
2985
port->logical_port_id = logical_port_id;
2987
port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2989
port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2990
if (!port->mc_list) {
2992
goto out_free_ethdev;
2995
INIT_LIST_HEAD(&port->mc_list->list);
2997
ret = ehea_sense_port_attr(port);
2999
goto out_free_mc_list;
3001
netif_set_real_num_rx_queues(dev, port->num_def_qps);
3002
netif_set_real_num_tx_queues(dev, port->num_def_qps);
3004
port_dev = ehea_register_port(port, dn);
3006
goto out_free_mc_list;
3008
SET_NETDEV_DEV(dev, port_dev);
3010
/* initialize net_device structure */
3011
memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3013
dev->netdev_ops = &ehea_netdev_ops;
3014
ehea_set_ethtool_ops(dev);
3016
dev->hw_features = NETIF_F_SG | NETIF_F_TSO
3017
| NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
3018
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3019
| NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3020
| NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3022
dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
3024
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3026
INIT_WORK(&port->reset_task, ehea_reset_port);
3027
INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3029
init_waitqueue_head(&port->swqe_avail_wq);
3030
init_waitqueue_head(&port->restart_wq);
3032
memset(&port->stats, 0, sizeof(struct net_device_stats));
3033
ret = register_netdev(dev);
3035
pr_err("register_netdev failed. ret=%d\n", ret);
3036
goto out_unreg_port;
3039
ret = ehea_get_jumboframe_status(port, &jumbo);
3041
netdev_err(dev, "failed determining jumbo frame status\n");
3043
netdev_info(dev, "Jumbo frames are %sabled\n",
3044
jumbo == 1 ? "en" : "dis");
3046
adapter->active_ports++;
3051
ehea_unregister_port(port);
3054
kfree(port->mc_list);
3060
pr_err("setting up logical port with id=%d failed, ret=%d\n",
3061
logical_port_id, ret);
3065
static void ehea_shutdown_single_port(struct ehea_port *port)
3067
struct ehea_adapter *adapter = port->adapter;
3069
cancel_work_sync(&port->reset_task);
3070
cancel_delayed_work_sync(&port->stats_work);
3071
unregister_netdev(port->netdev);
3072
ehea_unregister_port(port);
3073
kfree(port->mc_list);
3074
free_netdev(port->netdev);
3075
adapter->active_ports--;
3078
static int ehea_setup_ports(struct ehea_adapter *adapter)
3080
struct device_node *lhea_dn;
3081
struct device_node *eth_dn = NULL;
3083
const u32 *dn_log_port_id;
3086
lhea_dn = adapter->ofdev->dev.of_node;
3087
while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3089
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3091
if (!dn_log_port_id) {
3092
pr_err("bad device node: eth_dn name=%s\n",
3097
if (ehea_add_adapter_mr(adapter)) {
3098
pr_err("creating MR failed\n");
3099
of_node_put(eth_dn);
3103
adapter->port[i] = ehea_setup_single_port(adapter,
3106
if (adapter->port[i])
3107
netdev_info(adapter->port[i]->netdev,
3108
"logical port id #%d\n", *dn_log_port_id);
3110
ehea_remove_adapter_mr(adapter);
3117
static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3118
u32 logical_port_id)
3120
struct device_node *lhea_dn;
3121
struct device_node *eth_dn = NULL;
3122
const u32 *dn_log_port_id;
3124
lhea_dn = adapter->ofdev->dev.of_node;
3125
while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3127
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3130
if (*dn_log_port_id == logical_port_id)
3137
static ssize_t ehea_probe_port(struct device *dev,
3138
struct device_attribute *attr,
3139
const char *buf, size_t count)
3141
struct ehea_adapter *adapter = dev_get_drvdata(dev);
3142
struct ehea_port *port;
3143
struct device_node *eth_dn = NULL;
3146
u32 logical_port_id;
3148
sscanf(buf, "%d", &logical_port_id);
3150
port = ehea_get_port(adapter, logical_port_id);
3153
netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3158
eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3161
pr_info("no logical port with id %d found\n", logical_port_id);
3165
if (ehea_add_adapter_mr(adapter)) {
3166
pr_err("creating MR failed\n");
3170
port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3172
of_node_put(eth_dn);
3175
for (i = 0; i < EHEA_MAX_PORTS; i++)
3176
if (!adapter->port[i]) {
3177
adapter->port[i] = port;
3181
netdev_info(port->netdev, "added: (logical port id=%d)\n",
3184
ehea_remove_adapter_mr(adapter);
3188
return (ssize_t) count;
3191
static ssize_t ehea_remove_port(struct device *dev,
3192
struct device_attribute *attr,
3193
const char *buf, size_t count)
3195
struct ehea_adapter *adapter = dev_get_drvdata(dev);
3196
struct ehea_port *port;
3198
u32 logical_port_id;
3200
sscanf(buf, "%d", &logical_port_id);
3202
port = ehea_get_port(adapter, logical_port_id);
3205
netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3208
ehea_shutdown_single_port(port);
3210
for (i = 0; i < EHEA_MAX_PORTS; i++)
3211
if (adapter->port[i] == port) {
3212
adapter->port[i] = NULL;
3216
pr_err("removing port with logical port id=%d failed. port not configured.\n",
3221
ehea_remove_adapter_mr(adapter);
3223
return (ssize_t) count;
3226
static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3227
static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3229
int ehea_create_device_sysfs(struct platform_device *dev)
3231
int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3235
ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3240
void ehea_remove_device_sysfs(struct platform_device *dev)
3242
device_remove_file(&dev->dev, &dev_attr_probe_port);
3243
device_remove_file(&dev->dev, &dev_attr_remove_port);
3246
static int __devinit ehea_probe_adapter(struct platform_device *dev,
3247
const struct of_device_id *id)
3249
struct ehea_adapter *adapter;
3250
const u64 *adapter_handle;
3253
if (!dev || !dev->dev.of_node) {
3254
pr_err("Invalid ibmebus device probed\n");
3258
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3261
dev_err(&dev->dev, "no mem for ehea_adapter\n");
3265
list_add(&adapter->list, &adapter_list);
3267
adapter->ofdev = dev;
3269
adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3272
adapter->handle = *adapter_handle;
3274
if (!adapter->handle) {
3275
dev_err(&dev->dev, "failed getting handle for adapter"
3276
" '%s'\n", dev->dev.of_node->full_name);
3281
adapter->pd = EHEA_PD_ID;
3283
dev_set_drvdata(&dev->dev, adapter);
3286
/* initialize adapter and ports */
3287
/* get adapter properties */
3288
ret = ehea_sense_adapter_attr(adapter);
3290
dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3294
adapter->neq = ehea_create_eq(adapter,
3295
EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3296
if (!adapter->neq) {
3298
dev_err(&dev->dev, "NEQ creation failed\n");
3302
tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3303
(unsigned long)adapter);
3305
ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3306
ehea_interrupt_neq, IRQF_DISABLED,
3307
"ehea_neq", adapter);
3309
dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3313
ret = ehea_create_device_sysfs(dev);
3317
ret = ehea_setup_ports(adapter);
3319
dev_err(&dev->dev, "setup_ports failed\n");
3320
goto out_rem_dev_sysfs;
3327
ehea_remove_device_sysfs(dev);
3330
ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3333
ehea_destroy_eq(adapter->neq);
3336
list_del(&adapter->list);
3340
ehea_update_firmware_handles();
3345
static int __devexit ehea_remove(struct platform_device *dev)
3347
struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
3350
for (i = 0; i < EHEA_MAX_PORTS; i++)
3351
if (adapter->port[i]) {
3352
ehea_shutdown_single_port(adapter->port[i]);
3353
adapter->port[i] = NULL;
3356
ehea_remove_device_sysfs(dev);
3358
ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3359
tasklet_kill(&adapter->neq_tasklet);
3361
ehea_destroy_eq(adapter->neq);
3362
ehea_remove_adapter_mr(adapter);
3363
list_del(&adapter->list);
3366
ehea_update_firmware_handles();
3371
void ehea_crash_handler(void)
3375
if (ehea_fw_handles.arr)
3376
for (i = 0; i < ehea_fw_handles.num_entries; i++)
3377
ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3378
ehea_fw_handles.arr[i].fwh,
3381
if (ehea_bcmc_regs.arr)
3382
for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3383
ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3384
ehea_bcmc_regs.arr[i].port_id,
3385
ehea_bcmc_regs.arr[i].reg_type,
3386
ehea_bcmc_regs.arr[i].macaddr,
3390
static int ehea_mem_notifier(struct notifier_block *nb,
3391
unsigned long action, void *data)
3393
int ret = NOTIFY_BAD;
3394
struct memory_notify *arg = data;
3396
mutex_lock(&dlpar_mem_lock);
3399
case MEM_CANCEL_OFFLINE:
3400
pr_info("memory offlining canceled");
3401
/* Readd canceled memory block */
3403
pr_info("memory is going online");
3404
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3405
if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3409
case MEM_GOING_OFFLINE:
3410
pr_info("memory is going offline");
3411
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3412
if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3420
ehea_update_firmware_handles();
3424
mutex_unlock(&dlpar_mem_lock);
3428
static struct notifier_block ehea_mem_nb = {
3429
.notifier_call = ehea_mem_notifier,
3432
static int ehea_reboot_notifier(struct notifier_block *nb,
3433
unsigned long action, void *unused)
3435
if (action == SYS_RESTART) {
3436
pr_info("Reboot: freeing all eHEA resources\n");
3437
ibmebus_unregister_driver(&ehea_driver);
3442
static struct notifier_block ehea_reboot_nb = {
3443
.notifier_call = ehea_reboot_notifier,
3446
static int check_module_parm(void)
3450
if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3451
(rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3452
pr_info("Bad parameter: rq1_entries\n");
3455
if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3456
(rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3457
pr_info("Bad parameter: rq2_entries\n");
3460
if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3461
(rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3462
pr_info("Bad parameter: rq3_entries\n");
3465
if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3466
(sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3467
pr_info("Bad parameter: sq_entries\n");
3474
static ssize_t ehea_show_capabilities(struct device_driver *drv,
3477
return sprintf(buf, "%d", EHEA_CAPABILITIES);
3480
static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3481
ehea_show_capabilities, NULL);
3483
int __init ehea_module_init(void)
3487
pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3489
memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3490
memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3492
mutex_init(&ehea_fw_handles.lock);
3493
spin_lock_init(&ehea_bcmc_regs.lock);
3495
ret = check_module_parm();
3499
ret = ehea_create_busmap();
3503
ret = register_reboot_notifier(&ehea_reboot_nb);
3505
pr_info("failed registering reboot notifier\n");
3507
ret = register_memory_notifier(&ehea_mem_nb);
3509
pr_info("failed registering memory remove notifier\n");
3511
ret = crash_shutdown_register(ehea_crash_handler);
3513
pr_info("failed registering crash handler\n");
3515
ret = ibmebus_register_driver(&ehea_driver);
3517
pr_err("failed registering eHEA device driver on ebus\n");
3521
ret = driver_create_file(&ehea_driver.driver,
3522
&driver_attr_capabilities);
3524
pr_err("failed to register capabilities attribute, ret=%d\n",
3532
ibmebus_unregister_driver(&ehea_driver);
3534
unregister_memory_notifier(&ehea_mem_nb);
3535
unregister_reboot_notifier(&ehea_reboot_nb);
3536
crash_shutdown_unregister(ehea_crash_handler);
3541
static void __exit ehea_module_exit(void)
3545
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3546
ibmebus_unregister_driver(&ehea_driver);
3547
unregister_reboot_notifier(&ehea_reboot_nb);
3548
ret = crash_shutdown_unregister(ehea_crash_handler);
3550
pr_info("failed unregistering crash handler\n");
3551
unregister_memory_notifier(&ehea_mem_nb);
3552
kfree(ehea_fw_handles.arr);
3553
kfree(ehea_bcmc_regs.arr);
3554
ehea_destroy_busmap();
3557
module_init(ehea_module_init);
3558
module_exit(ehea_module_exit);