1
/**********************************************************************
2
* Author: Cavium Networks
4
* Contact: support@caviumnetworks.com
5
* This file is part of the OCTEON SDK
7
* Copyright (c) 2003-2010 Cavium Networks
9
* This file is free software; you can redistribute it and/or modify
10
* it under the terms of the GNU General Public License, Version 2, as
11
* published by the Free Software Foundation.
13
* This file is distributed in the hope that it will be useful, but
14
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16
* NONINFRINGEMENT. See the GNU General Public License for more
19
* You should have received a copy of the GNU General Public License
20
* along with this file; if not, write to the Free Software
21
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22
* or visit http://www.gnu.org/licenses/.
24
* This file may also be available under a different license from Cavium.
25
* Contact Cavium Networks for more information
26
**********************************************************************/
27
#include <linux/module.h>
28
#include <linux/kernel.h>
29
#include <linux/cache.h>
30
#include <linux/cpumask.h>
31
#include <linux/netdevice.h>
32
#include <linux/init.h>
33
#include <linux/etherdevice.h>
35
#include <linux/string.h>
36
#include <linux/prefetch.h>
37
#include <linux/ratelimit.h>
38
#include <linux/smp.h>
41
#include <linux/xfrm.h>
43
#endif /* CONFIG_XFRM */
45
#include <linux/atomic.h>
47
#include <asm/octeon/octeon.h>
49
#include "ethernet-defines.h"
50
#include "ethernet-mem.h"
51
#include "ethernet-rx.h"
52
#include "octeon-ethernet.h"
53
#include "ethernet-util.h"
55
#include "cvmx-helper.h"
60
#include "cvmx-scratch.h"
62
#include "cvmx-gmxx-defs.h"
64
struct cvm_napi_wrapper {
65
struct napi_struct napi;
66
} ____cacheline_aligned_in_smp;
68
static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp;
70
struct cvm_oct_core_state {
73
* The number of additional cores that could be processing
76
atomic_t available_cores;
78
} ____cacheline_aligned_in_smp;
80
static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
82
static void cvm_oct_enable_napi(void *_)
84
int cpu = smp_processor_id();
85
napi_schedule(&cvm_oct_napi[cpu].napi);
88
static void cvm_oct_enable_one_cpu(void)
93
/* Check to see if more CPUs are available for receive processing... */
94
v = atomic_sub_if_positive(1, &core_state.available_cores);
98
/* ... if a CPU is available, Turn on NAPI polling for that CPU. */
99
for_each_online_cpu(cpu) {
100
if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
101
v = smp_call_function_single(cpu, cvm_oct_enable_napi,
104
panic("Can't enable NAPI.");
110
static void cvm_oct_no_more_work(void)
112
int cpu = smp_processor_id();
115
* CPU zero is special. It always has the irq enabled when
116
* waiting for incoming packets.
119
enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
123
cpu_clear(cpu, core_state.cpu_state);
124
atomic_add(1, &core_state.available_cores);
128
* cvm_oct_do_interrupt - interrupt handler.
130
* The interrupt occurs whenever the POW has packets in our group.
133
static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
135
/* Disable the IRQ and start napi_poll. */
136
disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
137
cvm_oct_enable_napi(NULL);
143
* cvm_oct_check_rcv_error - process receive errors
144
* @work: Work queue entry pointing to the packet.
146
* Returns Non-zero if the packet can be dropped, zero otherwise.
148
static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
150
if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
152
* Ignore length errors on min size packets. Some
153
* equipment incorrectly pads packets to 64+4FCS
154
* instead of 60+4FCS. Note these packets still get
155
* counted as frame errors.
158
if (USE_10MBPS_PREAMBLE_WORKAROUND
159
&& ((work->word2.snoip.err_code == 5)
160
|| (work->word2.snoip.err_code == 7))) {
163
* We received a packet with either an alignment error
164
* or a FCS error. This may be signalling that we are
165
* running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK}
166
* off. If this is the case we need to parse the
167
* packet to determine if we can remove a non spec
168
* preamble and generate a correct packet.
170
int interface = cvmx_helper_get_interface_num(work->ipprt);
171
int index = cvmx_helper_get_interface_index_num(work->ipprt);
172
union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
173
gmxx_rxx_frm_ctl.u64 =
174
cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
175
if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
178
cvmx_phys_to_ptr(work->packet_ptr.s.addr);
181
while (i < work->len - 1) {
190
printk_ratelimited("Port %d received 0xd5 preamble\n", work->ipprt);
192
work->packet_ptr.s.addr += i + 1;
194
} else if ((*ptr & 0xf) == 0xd) {
196
printk_ratelimited("Port %d received 0x?d preamble\n", work->ipprt);
198
work->packet_ptr.s.addr += i;
200
for (i = 0; i < work->len; i++) {
202
((*ptr & 0xf0) >> 4) |
203
((*(ptr + 1) & 0xf) << 4);
207
printk_ratelimited("Port %d unknown preamble, packet "
211
cvmx_helper_dump_packet(work);
213
cvm_oct_free_work(work);
218
printk_ratelimited("Port %d receive error code %d, packet dropped\n",
219
work->ipprt, work->word2.snoip.err_code);
220
cvm_oct_free_work(work);
228
* cvm_oct_napi_poll - the NAPI poll function.
229
* @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
230
* @budget: Maximum number of packets to receive.
232
* Returns the number of packets processed.
234
static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
236
const int coreid = cvmx_get_core_num();
237
uint64_t old_group_mask;
238
uint64_t old_scratch;
240
int did_work_request = 0;
241
int packet_not_copied;
243
/* Prefetch cvm_oct_device since we know we need it soon */
244
prefetch(cvm_oct_device);
246
if (USE_ASYNC_IOBDMA) {
247
/* Save scratch in case userspace is using it */
249
old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
252
/* Only allow work for our group (and preserve priorities) */
253
old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
254
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
255
(old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
257
if (USE_ASYNC_IOBDMA) {
258
cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
259
did_work_request = 1;
262
while (rx_count < budget) {
263
struct sk_buff *skb = NULL;
264
struct sk_buff **pskb = NULL;
268
if (USE_ASYNC_IOBDMA && did_work_request)
269
work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
271
work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
274
did_work_request = 0;
276
union cvmx_pow_wq_int wq_int;
278
wq_int.s.iq_dis = 1 << pow_receive_group;
279
wq_int.s.wq_int = 1 << pow_receive_group;
280
cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
283
pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
286
if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
287
cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
288
did_work_request = 1;
293
* First time through, see if there is enough
294
* work waiting to merit waking another
297
union cvmx_pow_wq_int_cntx counts;
299
int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores);
300
counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group));
301
backlog = counts.s.iq_cnt + counts.s.ds_cnt;
302
if (backlog > budget * cores_in_use && napi != NULL)
303
cvm_oct_enable_one_cpu();
306
skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
307
if (likely(skb_in_hw)) {
309
prefetch(&skb->head);
312
prefetch(cvm_oct_device[work->ipprt]);
314
/* Immediately throw away all packets with receive errors */
315
if (unlikely(work->word2.snoip.rcv_error)) {
316
if (cvm_oct_check_rcv_error(work))
321
* We can only use the zero copy path if skbuffs are
322
* in the FPA pool and the packet fits in a single
325
if (likely(skb_in_hw)) {
326
skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head);
328
skb->len = work->len;
329
skb_set_tail_pointer(skb, skb->len);
330
packet_not_copied = 1;
333
* We have to copy the packet. First allocate
336
skb = dev_alloc_skb(work->len);
338
printk_ratelimited("Port %d failed to allocate "
339
"skbuff, packet dropped\n",
341
cvm_oct_free_work(work);
346
* Check if we've received a packet that was
347
* entirely stored in the work entry.
349
if (unlikely(work->word2.s.bufs == 0)) {
350
uint8_t *ptr = work->packet_data;
352
if (likely(!work->word2.s.not_IP)) {
354
* The beginning of the packet
355
* moves for IP packets.
357
if (work->word2.s.is_v6)
362
memcpy(skb_put(skb, work->len), ptr, work->len);
363
/* No packet buffers to free */
365
int segments = work->word2.s.bufs;
366
union cvmx_buf_ptr segment_ptr = work->packet_ptr;
370
union cvmx_buf_ptr next_ptr =
371
*(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
374
* Octeon Errata PKI-100: The segment size is
375
* wrong. Until it is fixed, calculate the
376
* segment size based on the packet pool
377
* buffer size. When it is fixed, the
378
* following line should be replaced with this
379
* one: int segment_size =
380
* segment_ptr.s.size;
382
int segment_size = CVMX_FPA_PACKET_POOL_SIZE -
383
(segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
385
* Don't copy more than what
386
* is left in the packet.
388
if (segment_size > len)
390
/* Copy the data into the packet */
391
memcpy(skb_put(skb, segment_size),
392
cvmx_phys_to_ptr(segment_ptr.s.addr),
395
segment_ptr = next_ptr;
398
packet_not_copied = 0;
401
if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
402
cvm_oct_device[work->ipprt])) {
403
struct net_device *dev = cvm_oct_device[work->ipprt];
404
struct octeon_ethernet *priv = netdev_priv(dev);
407
* Only accept packets for devices that are
410
if (likely(dev->flags & IFF_UP)) {
411
skb->protocol = eth_type_trans(skb, dev);
414
if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc ||
415
work->word2.s.L4_error || !work->word2.s.tcp_or_udp))
416
skb->ip_summed = CHECKSUM_NONE;
418
skb->ip_summed = CHECKSUM_UNNECESSARY;
420
/* Increment RX stats for virtual ports */
421
if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
423
atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
424
atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
426
atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
427
atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
430
netif_receive_skb(skb);
433
/* Drop any packet received for a device that isn't up */
435
printk_ratelimited("%s: Device not up, packet dropped\n",
439
atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
441
atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
443
dev_kfree_skb_irq(skb);
447
* Drop any packet received for a device that
450
printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
452
dev_kfree_skb_irq(skb);
455
* Check to see if the skbuff and work share the same
458
if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) {
460
* This buffer needs to be replaced, increment
461
* the number of buffers we need to free by
464
cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
467
cvmx_fpa_free(work, CVMX_FPA_WQE_POOL,
470
cvm_oct_free_work(work);
473
/* Restore the original POW group mask */
474
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
475
if (USE_ASYNC_IOBDMA) {
476
/* Restore the scratch area */
477
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
479
cvm_oct_rx_refill_pool(0);
481
if (rx_count < budget && napi != NULL) {
484
cvm_oct_no_more_work();
489
#ifdef CONFIG_NET_POLL_CONTROLLER
491
* cvm_oct_poll_controller - poll for receive packets
494
* @dev: Device to poll. Unused
496
void cvm_oct_poll_controller(struct net_device *dev)
498
cvm_oct_napi_poll(NULL, 16);
502
void cvm_oct_rx_initialize(void)
505
struct net_device *dev_for_napi = NULL;
506
union cvmx_pow_wq_int_thrx int_thr;
507
union cvmx_pow_wq_int_pc int_pc;
509
for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
510
if (cvm_oct_device[i]) {
511
dev_for_napi = cvm_oct_device[i];
516
if (NULL == dev_for_napi)
517
panic("No net_devices were allocated.");
519
if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus())
520
atomic_set(&core_state.available_cores, max_rx_cpus);
522
atomic_set(&core_state.available_cores, num_online_cpus());
523
core_state.baseline_cores = atomic_read(&core_state.available_cores);
525
core_state.cpu_state = CPU_MASK_NONE;
526
for_each_possible_cpu(i) {
527
netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
528
cvm_oct_napi_poll, rx_napi_weight);
529
napi_enable(&cvm_oct_napi[i].napi);
531
/* Register an IRQ hander for to receive POW interrupts */
532
i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
533
cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
536
panic("Could not acquire Ethernet IRQ %d\n",
537
OCTEON_IRQ_WORKQ0 + pow_receive_group);
539
disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
543
int_thr.s.tc_thr = 1;
544
/* Enable POW interrupt when our port has at least one packet */
545
cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
549
cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
552
/* Scheduld NAPI now. This will indirectly enable interrupts. */
553
cvm_oct_enable_one_cpu();
556
void cvm_oct_rx_shutdown(void)
559
/* Shutdown all of the NAPIs */
560
for_each_possible_cpu(i)
561
netif_napi_del(&cvm_oct_napi[i].napi);