~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise

« back to all changes in this revision

Viewing changes to drivers/net/sfc/tx.c

  • Committer: Bazaar Package Importer
  • Author(s): Paolo Pisati
  • Date: 2011-06-29 15:23:51 UTC
  • mfrom: (26.1.1 natty-proposed)
  • Revision ID: james.westby@ubuntu.com-20110629152351-xs96tm303d95rpbk
Tags: 3.0.0-1200.2
* Rebased against 3.0.0-6.7
* BSP from TI based on 3.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
1
/****************************************************************************
2
2
 * Driver for Solarflare Solarstorm network controllers and boards
3
3
 * Copyright 2005-2006 Fen Systems Ltd.
4
 
 * Copyright 2005-2009 Solarflare Communications Inc.
 
4
 * Copyright 2005-2010 Solarflare Communications Inc.
5
5
 *
6
6
 * This program is free software; you can redistribute it and/or modify it
7
7
 * under the terms of the GNU General Public License version 2 as published
205
205
                                        goto unwind;
206
206
                                }
207
207
                                smp_mb();
208
 
                                netif_tx_start_queue(tx_queue->core_txq);
 
208
                                if (likely(!efx->loopback_selftest))
 
209
                                        netif_tx_start_queue(
 
210
                                                tx_queue->core_txq);
209
211
                        }
210
212
 
211
213
                        insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
336
338
{
337
339
        struct efx_nic *efx = netdev_priv(net_dev);
338
340
        struct efx_tx_queue *tx_queue;
339
 
 
340
 
        if (unlikely(efx->port_inhibited))
341
 
                return NETDEV_TX_BUSY;
342
 
 
343
 
        tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
344
 
                                    skb->ip_summed == CHECKSUM_PARTIAL ?
345
 
                                    EFX_TXQ_TYPE_OFFLOAD : 0);
 
341
        unsigned index, type;
 
342
 
 
343
        EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
 
344
 
 
345
        index = skb_get_queue_mapping(skb);
 
346
        type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
 
347
        if (index >= efx->n_tx_channels) {
 
348
                index -= efx->n_tx_channels;
 
349
                type |= EFX_TXQ_TYPE_HIGHPRI;
 
350
        }
 
351
        tx_queue = efx_get_tx_queue(efx, index, type);
346
352
 
347
353
        return efx_enqueue_skb(tx_queue, skb);
348
354
}
349
355
 
 
356
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
 
357
{
 
358
        struct efx_nic *efx = tx_queue->efx;
 
359
 
 
360
        /* Must be inverse of queue lookup in efx_hard_start_xmit() */
 
361
        tx_queue->core_txq =
 
362
                netdev_get_tx_queue(efx->net_dev,
 
363
                                    tx_queue->queue / EFX_TXQ_TYPES +
 
364
                                    ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
 
365
                                     efx->n_tx_channels : 0));
 
366
}
 
367
 
 
368
int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
 
369
{
 
370
        struct efx_nic *efx = netdev_priv(net_dev);
 
371
        struct efx_channel *channel;
 
372
        struct efx_tx_queue *tx_queue;
 
373
        unsigned tc;
 
374
        int rc;
 
375
 
 
376
        if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
 
377
                return -EINVAL;
 
378
 
 
379
        if (num_tc == net_dev->num_tc)
 
380
                return 0;
 
381
 
 
382
        for (tc = 0; tc < num_tc; tc++) {
 
383
                net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
 
384
                net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
 
385
        }
 
386
 
 
387
        if (num_tc > net_dev->num_tc) {
 
388
                /* Initialise high-priority queues as necessary */
 
389
                efx_for_each_channel(channel, efx) {
 
390
                        efx_for_each_possible_channel_tx_queue(tx_queue,
 
391
                                                               channel) {
 
392
                                if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
 
393
                                        continue;
 
394
                                if (!tx_queue->buffer) {
 
395
                                        rc = efx_probe_tx_queue(tx_queue);
 
396
                                        if (rc)
 
397
                                                return rc;
 
398
                                }
 
399
                                if (!tx_queue->initialised)
 
400
                                        efx_init_tx_queue(tx_queue);
 
401
                                efx_init_tx_queue_core_txq(tx_queue);
 
402
                        }
 
403
                }
 
404
        } else {
 
405
                /* Reduce number of classes before number of queues */
 
406
                net_dev->num_tc = num_tc;
 
407
        }
 
408
 
 
409
        rc = netif_set_real_num_tx_queues(net_dev,
 
410
                                          max_t(int, num_tc, 1) *
 
411
                                          efx->n_tx_channels);
 
412
        if (rc)
 
413
                return rc;
 
414
 
 
415
        /* Do not destroy high-priority queues when they become
 
416
         * unused.  We would have to flush them first, and it is
 
417
         * fairly difficult to flush a subset of TX queues.  Leave
 
418
         * it to efx_fini_channels().
 
419
         */
 
420
 
 
421
        net_dev->num_tc = num_tc;
 
422
        return 0;
 
423
}
 
424
 
350
425
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
351
426
{
352
427
        unsigned fill_level;
361
436
         * queue state. */
362
437
        smp_mb();
363
438
        if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
364
 
            likely(efx->port_enabled)) {
 
439
            likely(efx->port_enabled) &&
 
440
            likely(netif_device_present(efx->net_dev))) {
365
441
                fill_level = tx_queue->insert_count - tx_queue->read_count;
366
442
                if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
367
443
                        EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
430
506
 
431
507
        /* Set up TX descriptor ring */
432
508
        efx_nic_init_tx(tx_queue);
 
509
 
 
510
        tx_queue->initialised = true;
433
511
}
434
512
 
435
513
void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
452
530
 
453
531
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
454
532
{
 
533
        if (!tx_queue->initialised)
 
534
                return;
 
535
 
455
536
        netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
456
537
                  "shutting down TX queue %d\n", tx_queue->queue);
457
538
 
 
539
        tx_queue->initialised = false;
 
540
 
458
541
        /* Flush TX queue, remove descriptor ring */
459
542
        efx_nic_fini_tx(tx_queue);
460
543
 
466
549
 
467
550
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
468
551
{
 
552
        if (!tx_queue->buffer)
 
553
                return;
 
554
 
469
555
        netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
470
556
                  "destroying TX queue %d\n", tx_queue->queue);
471
557
        efx_nic_remove_tx(tx_queue);