1
/*********************************************************
2
* Copyright (C) 2009 VMware, Inc. All rights reserved.
4
* This program is free software; you can redistribute it and/or modify it
5
* under the terms of the GNU General Public License as published by the
6
* Free Software Foundation version 2 and no later version.
8
* This program is distributed in the hope that it will be useful, but
9
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
10
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* You should have received a copy of the GNU General Public License along
14
* with this program; if not, write to the Free Software Foundation, Inc.,
15
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17
*********************************************************/
22
* Shared memory infrastructure for VMXNET3 linux driver. Used by the
23
* VMXNET3 driver to back its rings with memory from a shared memory
24
* pool that is shared with user space.
26
#include "driver-config.h"
28
#include "vmxnet3_int.h"
29
#include "vmnet_def.h"
30
#include "vm_device_version.h"
31
#include "vmxnet3_shm.h"
35
vmxnet3_shm_consume_user_tx_queue(struct vmxnet3_shm_pool *shm);
38
vmxnet3_shm_tq_xmit(struct sk_buff *skb,
39
struct vmxnet3_tx_queue *tq,
40
struct vmxnet3_adapter *adapter,
41
struct net_device *netdev);
44
*----------------------------------------------------------------------------
49
* Kernel's current shared memory RX ring index
54
*----------------------------------------------------------------------------
58
kernel_rx_idx(const struct vmxnet3_shm_pool *shm)
60
return shm->ctl.ptr->kernel_rxi;
65
*----------------------------------------------------------------------------
67
* inc_kernel_rx_idx --
73
* Increment the kernel's shared memory RX ring index
75
*----------------------------------------------------------------------------
79
inc_kernel_rx_idx(const struct vmxnet3_shm_pool *shm)
81
shm->ctl.ptr->kernel_rxi = (shm->ctl.ptr->kernel_rxi + 1) % SHM_RX_RING_SIZE;
86
*----------------------------------------------------------------------------
91
* Kernel's current shared memory RX ring index
96
*----------------------------------------------------------------------------
100
kernel_tx_idx(const struct vmxnet3_shm_pool *shm)
102
return shm->ctl.ptr->kernel_txi;
107
*----------------------------------------------------------------------------
109
* inc_kernel_tx_idx --
115
* Increment the kernel's shared memory TX ring index
117
*----------------------------------------------------------------------------
121
inc_kernel_tx_idx(const struct vmxnet3_shm_pool *shm)
123
shm->ctl.ptr->kernel_txi = (shm->ctl.ptr->kernel_txi + 1) % SHM_TX_RING_SIZE;
128
*----------------------------------------------------------------------------
133
* Users's current shared memory RX ring index
138
*----------------------------------------------------------------------------
142
user_rx_idx(const struct vmxnet3_shm_pool *shm)
144
return shm->ctl.ptr->user_rxi;
148
*----------------------------------------------------------------------------
153
* Kernel's current shared memory RX ring entry
158
*----------------------------------------------------------------------------
161
static inline struct vmxnet3_shm_ringentry *
162
kernel_rx_entry(const struct vmxnet3_shm_pool *shm)
164
return &shm->ctl.ptr->rx_ring[kernel_rx_idx(shm)];
168
*----------------------------------------------------------------------------
173
* Kernel's current shared memory TX ring entry
178
*----------------------------------------------------------------------------
181
static inline struct vmxnet3_shm_ringentry *
182
kernel_tx_entry(const struct vmxnet3_shm_pool *shm)
184
return &shm->ctl.ptr->tx_ring[kernel_tx_idx(shm)];
189
*----------------------------------------------------------------------------
193
* Used by vmxnet3_shm_chardev_poll
196
* User's current shared memory RX ring entry
201
*----------------------------------------------------------------------------
204
static inline struct vmxnet3_shm_ringentry *
205
user_rx_entry(const struct vmxnet3_shm_pool *shm)
207
return &shm->ctl.ptr->rx_ring[user_rx_idx(shm)];
213
vmxnet3_shm_pool_release(struct kobject *kobj);
215
static const struct kobj_type vmxnet3_shm_pool_type = {
216
.release = vmxnet3_shm_pool_release
220
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
222
vmxnet3_shm_chardev_fault(struct vm_area_struct *vma,
223
struct vm_fault *vmf);
225
static struct vm_operations_struct vmxnet3_shm_vm_ops = {
226
.fault = vmxnet3_shm_chardev_fault,
228
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)
230
vmxnet3_shm_chardev_nopage(struct vm_area_struct *vma,
231
unsigned long address,
234
static struct vm_operations_struct vmxnet3_shm_vm_ops = {
235
.nopage = vmxnet3_shm_chardev_nopage,
239
vmxnet3_shm_chardev_nopage(struct vm_area_struct *vma,
240
unsigned long address,
243
static struct vm_operations_struct vmxnet3_shm_vm_ops = {
244
.nopage = vmxnet3_shm_chardev_nopage,
248
/* file operations */
249
static int vmxnet3_shm_chardev_mmap(struct file *filp,
250
struct vm_area_struct *vma);
252
static int vmxnet3_shm_chardev_open(struct inode * inode,
255
static int vmxnet3_shm_chardev_release(struct inode * inode,
258
static unsigned int vmxnet3_shm_chardev_poll(struct file *filp,
261
static long vmxnet3_shm_chardev_ioctl(struct file *filp,
265
#ifndef HAVE_UNLOCKED_IOCTL
266
static int vmxnet3_shm_chardev_old_ioctl(struct inode *inode,
272
static struct file_operations shm_fops = {
273
.owner = THIS_MODULE,
274
.mmap = vmxnet3_shm_chardev_mmap,
275
.open = vmxnet3_shm_chardev_open,
276
.release = vmxnet3_shm_chardev_release,
277
.poll = vmxnet3_shm_chardev_poll,
278
#ifdef HAVE_UNLOCKED_IOCTL
279
.unlocked_ioctl = vmxnet3_shm_chardev_ioctl,
281
.compat_ioctl = vmxnet3_shm_chardev_ioctl,
284
.ioctl = vmxnet3_shm_chardev_old_ioctl,
288
static LIST_HEAD(vmxnet3_shm_list);
289
static spinlock_t vmxnet3_shm_list_lock = SPIN_LOCK_UNLOCKED;
291
/* vmxnet3_shm_pool kobject */
296
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
297
#define compat_kobject_init(kobj, ktype) { kobject_init(kobj, (struct kobj_type *) ktype); }
299
#define compat_kobject_init(kobj, _ktype) { \
300
(kobj)->ktype = (struct kobj_type *) _ktype; \
301
kobject_init(kobj); \
307
*----------------------------------------------------------------------------
309
* vmxnet3_shm_init_allocator --
311
* Zero all shared memory data pages and fill the allocator with them.
316
*----------------------------------------------------------------------------
320
vmxnet3_shm_init_allocator(struct vmxnet3_shm_pool *shm)
324
shm->allocator.count = 0;
325
for (i = 1; i < shm->data.num_pages; i++) {
326
struct page *page = VMXNET3_SHM_IDX2PAGE(shm, i);
327
void *virt = kmap(page);
328
memset(virt, 0, PAGE_SIZE);
331
shm->allocator.stack[shm->allocator.count++] = i;
333
BUG_ON(i == SHM_INVALID_IDX);
335
BUG_ON(shm->allocator.count > shm->data.num_pages);
340
*-----------------------------------------------------------------------------
342
* vmxnet3_shm_pool_reset --
344
* Clean up after userspace has closed the device
352
*-----------------------------------------------------------------------------
356
vmxnet3_shm_pool_reset(struct vmxnet3_shm_pool *shm)
359
printk(KERN_INFO "resetting shm pool\n");
362
* Reset_work may be in the middle of resetting the device, wait for its
365
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &shm->adapter->state))
368
if (compat_netif_running(shm->adapter->netdev))
369
vmxnet3_quiesce_dev(shm->adapter);
371
vmxnet3_shm_init_allocator(shm);
373
if (compat_netif_running(shm->adapter->netdev))
374
err = vmxnet3_activate_dev(shm->adapter);
376
memset(shm->ctl.ptr, 0, PAGE_SIZE);
378
clear_bit(VMXNET3_STATE_BIT_RESETTING, &shm->adapter->state);
381
vmxnet3_force_close(shm->adapter);
385
*-----------------------------------------------------------------------------
387
* vmxnet3_shm_pool_create --
389
* Allocate and initialize shared memory pool. Allocates the data and
390
* control pages, resets them to zero, initializes locks, registers the
391
* character device, etc. Creates virtual address mappings for the pool,
392
* but does not set up DMA yet.
395
* The new shared memory pool object, or NULL on failure.
400
*-----------------------------------------------------------------------------
403
struct vmxnet3_shm_pool *
404
vmxnet3_shm_pool_create(struct vmxnet3_adapter *adapter,
405
char *name, int pool_size)
409
struct vmxnet3_shm_pool *shm;
410
struct vmxnet3_shm_ctl *ctl_ptr;
411
struct page *ctl_page;
412
int shm_size = sizeof(*shm) +
413
pool_size * sizeof(uint16) +
414
pool_size * sizeof(struct vmxnet3_shm_mapped_page);
416
/* Allocate shm_pool kobject */
417
shm = vmalloc(shm_size);
421
memset(shm, 0, sizeof(*shm));
422
compat_kobject_init(&shm->kobj, &vmxnet3_shm_pool_type);
423
/* shm->kobj.ktype = &vmxnet3_shm_pool_type; */
424
/* kobject_init(&shm->kobj); */
425
snprintf(shm->name, sizeof(shm->name), "vmxnet_%s_shm", name);
426
kobject_set_name(&shm->kobj, shm->name);
427
shm->adapter = adapter;
429
/* Allocate data pages */
430
shm->data.num_pages = pool_size;
431
for (i = 1; i < shm->data.num_pages; i++) {
432
struct page *page = alloc_page(GFP_KERNEL);
436
VMXNET3_SHM_SET_IDX2PAGE(shm, i, page);
438
BUG_ON(i == SHM_INVALID_IDX);
441
/* Allocate control page */
442
ctl_page = alloc_page(GFP_KERNEL);
443
if (ctl_page == NULL)
446
ctl_ptr = (void*)kmap(ctl_page);
447
shm->ctl.pages[0] = ctl_page;
448
shm->ctl.ptr = ctl_ptr;
450
/* Reset data and control pages */
451
vmxnet3_shm_init_allocator(shm);
452
memset(shm->ctl.ptr, 0, PAGE_SIZE);
454
/* Register char device */
455
shm->misc_dev.minor = MISC_DYNAMIC_MINOR;
456
shm->misc_dev.name = shm->name;
457
shm->misc_dev.fops = &shm_fops;
458
if (misc_register(&shm->misc_dev)) {
459
printk(KERN_ERR "failed to register vmxnet3_shm character device\n");
463
/* Initialize locks */
464
spin_lock_init(&shm->alloc_lock);
465
spin_lock_init(&shm->tx_lock);
466
spin_lock_init(&shm->rx_lock);
467
init_waitqueue_head(&shm->rxq);
469
spin_lock_irqsave(&vmxnet3_shm_list_lock, flags);
470
list_add(&shm->list, &vmxnet3_shm_list);
471
spin_unlock_irqrestore(&vmxnet3_shm_list_lock, flags);
473
printk(KERN_INFO "created vmxnet shared memory pool %s\n", shm->name);
479
__free_page(ctl_page);
483
for (i = 0; i < shm->data.num_pages; i++) {
484
if (VMXNET3_SHM_IDX2PAGE(shm, i) != NULL)
485
__free_page(VMXNET3_SHM_IDX2PAGE(shm, i));
496
*-----------------------------------------------------------------------------
498
* vmxnet3_shm_pool_release --
500
* Release a shared memory pool.
508
*-----------------------------------------------------------------------------
512
vmxnet3_shm_pool_release(struct kobject *kobj)
516
struct vmxnet3_shm_pool *shm = container_of(kobj, struct vmxnet3_shm_pool, kobj);
518
spin_lock_irqsave(&vmxnet3_shm_list_lock, flags);
519
list_del(&shm->list);
520
spin_unlock_irqrestore(&vmxnet3_shm_list_lock, flags);
522
misc_deregister(&shm->misc_dev);
524
/* Free control pages */
525
for (i = 0; i < SHM_CTL_SIZE; i++) {
526
kunmap(shm->ctl.pages[i]);
527
__free_page(shm->ctl.pages[i]);
530
/* Free data pages */
531
for (i = 1; i < shm->data.num_pages; i++)
532
__free_page(VMXNET3_SHM_IDX2PAGE(shm, i));
534
printk(KERN_INFO "destroyed vmxnet shared memory pool %s\n", shm->name);
540
/* Shared memory pool management */
543
*-----------------------------------------------------------------------------
545
* vmxnet3_shm_alloc_page --
547
* Allocate a page from the shared memory area.
550
* Index to page or SHM_INVALID_IDX on failure.
555
*-----------------------------------------------------------------------------
559
vmxnet3_shm_alloc_page(struct vmxnet3_shm_pool *shm)
564
spin_lock_irqsave(&shm->alloc_lock, flags);
565
if (shm->allocator.count == 0) {
566
idx = SHM_INVALID_IDX;
568
idx = shm->allocator.stack[--shm->allocator.count];
569
BUG_ON(idx == SHM_INVALID_IDX);
571
spin_unlock_irqrestore(&shm->alloc_lock, flags);
578
*-----------------------------------------------------------------------------
580
* vmxnet3_shm_free_page --
582
* Free a page back to the shared memory area
590
*-----------------------------------------------------------------------------
594
vmxnet3_shm_free_page(struct vmxnet3_shm_pool *shm,
599
spin_lock_irqsave(&shm->alloc_lock, flags);
600
BUG_ON(shm->allocator.count >= shm->data.num_pages);
601
shm->allocator.stack[shm->allocator.count++] = idx;
602
spin_unlock_irqrestore(&shm->alloc_lock, flags);
609
*-----------------------------------------------------------------------------
611
* vmxnet3_shm_addr2idx --
613
* Convert user space address into index into the shared memory pool.
621
*-----------------------------------------------------------------------------
624
static inline unsigned long
625
vmxnet3_shm_addr2idx(struct vm_area_struct *vma,
626
unsigned long address)
628
return vma->vm_pgoff + ((address - vma->vm_start) >> PAGE_SHIFT);
632
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
634
*-----------------------------------------------------------------------------
636
* vmxnet3_shm_chardev_fault --
638
* mmap fault handler. Called if the user space requests a page for
639
* which there is no shared memory mapping yet. We need to lookup
640
* the page we want to back the shared memory mapping with.
643
* The page backing the user space address.
648
*-----------------------------------------------------------------------------
652
vmxnet3_shm_chardev_fault(struct vm_area_struct *vma,
653
struct vm_fault *vmf)
655
struct vmxnet3_shm_pool *shm = vma->vm_private_data;
656
unsigned long address = (unsigned long)vmf->virtual_address;
657
unsigned long idx = vmxnet3_shm_addr2idx(vma, address);
658
struct page *pageptr;
660
if (idx >= SHM_DATA_START && idx < SHM_DATA_START + shm->data.num_pages)
661
pageptr = VMXNET3_SHM_IDX2PAGE(shm, idx - SHM_DATA_START);
662
else if (idx >= SHM_CTL_START && idx < SHM_CTL_START + SHM_CTL_SIZE)
663
pageptr = shm->ctl.pages[idx - SHM_CTL_START];
672
return pageptr ? VM_FAULT_MINOR : VM_FAULT_ERROR;
674
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)
677
*-----------------------------------------------------------------------------
679
* vmxnet3_shm_chardev_nopage --
681
* mmap nopage handler. Called if the user space requests a page for
682
* which there is no shared memory mapping yet. We need to lookup
683
* the page we want to back the shared memory mapping with.
686
* The page backing the user space address.
691
*-----------------------------------------------------------------------------
695
vmxnet3_shm_chardev_nopage(struct vm_area_struct *vma,
696
unsigned long address,
699
struct vmxnet3_shm_pool *shm = vma->vm_private_data;
700
unsigned long idx = vmxnet3_shm_addr2idx(vma, address);
701
struct page *pageptr;
703
if (idx >= SHM_DATA_START && idx < SHM_DATA_START + shm->data.num_pages)
704
pageptr = VMXNET3_SHM_IDX2PAGE(shm, idx - SHM_DATA_START);
705
else if (idx >= SHM_CTL_START && idx < SHM_CTL_START + SHM_CTL_SIZE)
706
pageptr = shm->ctl.pages[idx - SHM_CTL_START];
714
*type = pageptr ? VM_FAULT_MINOR : VM_FAULT_SIGBUS;
721
*-----------------------------------------------------------------------------
723
* vmxnet3_shm_chardev_nopage --
725
* mmap nopage handler. Called if the user space requests a page for
726
* which there is no shared memory mapping yet. We need to lookup
727
* the page we want to back the shared memory mapping with.
730
* The page backing the user space address.
735
*-----------------------------------------------------------------------------
739
vmxnet3_shm_chardev_nopage(struct vm_area_struct *vma,
740
unsigned long address,
743
struct vmxnet3_shm_pool *shm = vma->vm_private_data;
744
unsigned long idx = vmxnet3_shm_addr2idx(vma, address);
745
struct page *pageptr;
747
if (idx >= SHM_DATA_START && idx < SHM_DATA_START + shm->data.num_pages)
748
pageptr = VMXNET3_SHM_IDX2PAGE(shm, idx - SHM_DATA_START);
749
else if (idx >= SHM_CTL_START && idx < SHM_CTL_START + SHM_CTL_SIZE)
750
pageptr = shm->ctl.pages[idx - SHM_CTL_START];
763
*-----------------------------------------------------------------------------
765
* vmxnet3_shm_chardev_mmap --
775
*-----------------------------------------------------------------------------
778
vmxnet3_shm_chardev_mmap(struct file *filp,
779
struct vm_area_struct *vma)
781
vma->vm_private_data = filp->private_data;
782
vma->vm_ops = &vmxnet3_shm_vm_ops;
783
vma->vm_flags |= VM_RESERVED;
789
*-----------------------------------------------------------------------------
791
* vmxnet3_shm_chardev_poll --
793
* Poll called from user space. We consume the TX queue and then go to
794
* sleep until we get woken up by an interrupt.
802
*-----------------------------------------------------------------------------
806
vmxnet3_shm_chardev_poll(struct file *filp,
809
struct vmxnet3_shm_pool *shm = filp->private_data;
810
unsigned int mask = 0;
812
struct vmxnet3_shm_ringentry *re;
814
/* consume TX queue */
815
if (vmxnet3_shm_consume_user_tx_queue(shm) == -1) {
817
* The device has been closed, let the user space
818
* know there is activity, so that it gets a chance
819
* to read the channelBad flag.
825
/* Wait on the rxq for an interrupt to wake us */
826
poll_wait(filp, &shm->rxq, wait);
828
/* Check if the user's current RX entry is full */
829
spin_lock_irqsave(&shm->rx_lock, flags);
830
re = user_rx_entry(shm);
832
/* XXX: We need a comment that explains what this does. */
833
mask |= POLLIN | POLLRDNORM;
835
spin_unlock_irqrestore(&shm->rx_lock, flags);
842
*-----------------------------------------------------------------------------
844
* vmxnet3_shm_chardev_ioctl --
846
* Handle ioctls from user space.
849
* Return code depends on ioctl.
854
*-----------------------------------------------------------------------------
858
vmxnet3_shm_chardev_ioctl(struct file *filp,
862
struct vmxnet3_shm_pool *shm = filp->private_data;
870
vmxnet3_shm_consume_user_tx_queue(shm);
873
case SHM_IOCTL_ALLOC_ONE:
874
idx = vmxnet3_shm_alloc_page(shm);
877
case SHM_IOCTL_ALLOC_MANY:
878
for (i = 0; i < arg; i++) {
879
idx = vmxnet3_shm_alloc_page(shm);
880
if (idx != SHM_INVALID_IDX) {
881
if (vmxnet3_shm_user_rx(shm, idx, 0, 1, 1)) {
882
vmxnet3_shm_free_page(shm, idx);
883
return SHM_INVALID_IDX;
886
return SHM_INVALID_IDX;
891
case SHM_IOCTL_ALLOC_ONE_AND_MANY:
892
idx1 = vmxnet3_shm_alloc_page(shm);
893
if (idx1 == SHM_INVALID_IDX)
894
return SHM_INVALID_IDX;
896
for (i = 0; i < arg - 1; i++) {
897
idx = vmxnet3_shm_alloc_page(shm);
898
if (idx != SHM_INVALID_IDX) {
899
if (vmxnet3_shm_user_rx(shm, idx, 0, 1, 1)) {
900
vmxnet3_shm_free_page(shm, idx);
901
vmxnet3_shm_free_page(shm, idx1);
902
return SHM_INVALID_IDX;
905
vmxnet3_shm_free_page(shm, idx1);
906
return SHM_INVALID_IDX;
911
case SHM_IOCTL_FREE_ONE:
912
if (arg != SHM_INVALID_IDX && arg < shm->data.num_pages)
913
vmxnet3_shm_free_page(shm, arg);
921
#ifndef HAVE_UNLOCKED_IOCTL
922
static int vmxnet3_shm_chardev_old_ioctl(struct inode *inode,
927
return vmxnet3_shm_chardev_ioctl(filp, cmd, arg);
932
*-----------------------------------------------------------------------------
934
* vmxnet3_shm_chardev_find_by_minor --
936
* Find the right shared memory pool based on the minor number of the
940
* Pointer to the shared memory pool, or NULL on error.
943
* Takes a reference on the kobj of the shm object.
945
*-----------------------------------------------------------------------------
948
static struct vmxnet3_shm_pool *
949
vmxnet3_shm_chardev_find_by_minor(unsigned int minor)
951
struct vmxnet3_shm_pool *shm, *tmp;
954
spin_lock_irqsave(&vmxnet3_shm_list_lock, flags);
956
list_for_each_entry_safe(shm, tmp, &vmxnet3_shm_list, list) {
957
if (shm->misc_dev.minor == minor && kobject_get(&shm->kobj)) {
958
spin_unlock_irqrestore(&vmxnet3_shm_list_lock, flags);
963
spin_unlock_irqrestore(&vmxnet3_shm_list_lock, flags);
970
*-----------------------------------------------------------------------------
972
* vmxnet3_shm_chardev_open --
974
* Find the right shared memory pool based on the minor number of the
978
* 0 on success or -ENODEV if no device exists with the given minor number
983
*-----------------------------------------------------------------------------
987
vmxnet3_shm_chardev_open(struct inode * inode,
990
/* Stash pointer to shm in file so file ops can use it */
991
filp->private_data = vmxnet3_shm_chardev_find_by_minor(iminor(inode));
992
if (filp->private_data == NULL)
996
/* XXX: What does this do?? */
997
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
998
/* filp->f_mapping->backing_dev_info = &directly_mappable_cdev_bdi; */
1006
*-----------------------------------------------------------------------------
1008
* vmxnet3_shm_chardev_release --
1010
* Closing the char device. Release the ref count on the shared memory
1011
* pool, perform cleanup.
1019
*-----------------------------------------------------------------------------
1023
vmxnet3_shm_chardev_release(struct inode * inode,
1026
struct vmxnet3_shm_pool *shm = filp->private_data;
1029
vmxnet3_shm_pool_reset(shm);
1031
vmxnet3_shm_init_allocator(shm);
1032
memset(shm->ctl.ptr, 0, PAGE_SIZE);
1035
kobject_put(&shm->kobj);
1044
*-----------------------------------------------------------------------------
1046
* vmxnet3_free_skbpages --
1048
* Free the shared memory pages (secretly) backing this skb.
1056
*-----------------------------------------------------------------------------
1060
vmxnet3_free_skbpages(struct vmxnet3_adapter *adapter,
1061
struct sk_buff *skb)
1065
vmxnet3_shm_free_page(adapter->shm, VMXNET3_SHM_SKB_GETIDX(skb));
1066
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1067
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1069
vmxnet3_shm_free_page(adapter->shm, (unsigned long)frag->page);
1072
skb_shinfo(skb)->nr_frags = 0;
1077
*-----------------------------------------------------------------------------
1079
* vmxnet3_shm_start_tx --
1081
* The shared memory vmxnet version of the hard_start_xmit routine.
1082
* Just frees the given packet as we do not intend to transmit any
1083
* packet given to us by the TCP/IP stack.
1086
* Always 0 for success.
1091
*-----------------------------------------------------------------------------
1095
vmxnet3_shm_start_tx(struct sk_buff *skb,
1096
struct net_device *dev)
1098
compat_dev_kfree_skb_irq(skb, FREE_WRITE);
1099
return COMPAT_NETDEV_TX_OK;
1104
*-----------------------------------------------------------------------------
1106
* vmxnet3_shm_tx_pkt --
1108
* Send a packet (collection of ring entries) using h/w tx routine.
1110
* Protected by shm.tx_lock
1113
* 0 on success. Negative value to indicate error
1118
*-----------------------------------------------------------------------------
1122
vmxnet3_shm_tx_pkt(struct vmxnet3_adapter *adapter,
1123
struct vmxnet3_shm_ringentry *res,
1126
struct sk_buff* skb;
1129
skb = dev_alloc_skb(100);
1131
for (i = 0; i < frags; i++)
1132
vmxnet3_shm_free_page(adapter->shm, res[i].idx);
1138
VMXNET3_SHM_SKB_SETIDX(skb, res[0].idx);
1139
VMXNET3_SHM_SKB_SETLEN(skb, res[0].len);
1141
for (i = 1; i < frags; i++) {
1142
struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
1143
skb_shinfo(skb)->nr_frags;
1145
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
1147
frag->page = (struct page*)(unsigned long)res[i].idx;
1148
frag->page_offset = 0;
1149
frag->size = res[i].len;
1150
skb_shinfo(skb)->nr_frags ++;
1154
struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
1156
skb->protocol = htons(ETH_P_IPV6);
1157
adapter->shm->ctl.ptr->stats.kernel_tx += frags; /* XXX: move to better place */
1158
ret = vmxnet3_shm_tq_xmit(skb, tq, adapter, adapter->netdev);
1159
if (ret == COMPAT_NETDEV_TX_BUSY)
1160
vmxnet3_dev_kfree_skb(adapter, skb);
1169
*-----------------------------------------------------------------------------
1171
* vmxnet3_shm_tq_xmit --
1173
* Wrap vmxnet3_tq_xmit holding the netdev tx lock to better emulate the
1174
* Linux stack. Also check for a stopped tx queue to avoid racing with
1178
* Same as vmxnet3_tq_xmit.
1183
*-----------------------------------------------------------------------------
1186
vmxnet3_shm_tq_xmit(struct sk_buff *skb,
1187
struct vmxnet3_tx_queue *tq,
1188
struct vmxnet3_adapter *adapter,
1189
struct net_device *netdev)
1191
int ret = COMPAT_NETDEV_TX_BUSY;
1192
compat_netif_tx_lock(netdev);
1193
if (!netif_queue_stopped(netdev))
1194
ret = vmxnet3_tq_xmit(skb, tq, adapter, netdev);
1196
compat_netif_tx_unlock(netdev);
1202
*-----------------------------------------------------------------------------
1204
* vmxnet3_shm_tx_re --
1206
* Add one entry to the partial TX array. If re->eop is set, i.e. if
1207
* the packet is complete, TX the partial packet.
1215
*-----------------------------------------------------------------------------
1219
vmxnet3_shm_tx_re(struct vmxnet3_shm_pool *shm,
1220
struct vmxnet3_shm_ringentry re)
1224
BUG_ON(shm->partial_tx.frags > VMXNET3_SHM_MAX_FRAGS);
1225
if (shm->partial_tx.frags == VMXNET3_SHM_MAX_FRAGS) {
1227
printk("dropped oversize shm packet\n");
1228
for (i = 0; i < shm->partial_tx.frags; i++)
1229
vmxnet3_shm_free_page(shm,
1230
shm->partial_tx.res[i].idx);
1232
shm->partial_tx.frags = 0;
1234
vmxnet3_shm_free_page(shm, re.idx);
1238
shm->partial_tx.res[shm->partial_tx.frags++] = re;
1241
int status = vmxnet3_shm_tx_pkt(shm->adapter,
1242
shm->partial_tx.res,
1243
shm->partial_tx.frags);
1245
printk(KERN_DEBUG "vmxnet3_shm_tx_pkt failed %d\n", status);
1247
shm->partial_tx.frags = 0;
1256
*-----------------------------------------------------------------------------
1258
* vmxnet3_shm_consume_user_tx_queue --
1260
* Consume all packets in the user TX queue and send full
1261
* packets to the device
1269
*-----------------------------------------------------------------------------
1273
vmxnet3_shm_consume_user_tx_queue(struct vmxnet3_shm_pool *shm)
1275
unsigned long flags;
1276
struct vmxnet3_shm_ringentry *re;
1278
spin_lock_irqsave(&shm->tx_lock, flags);
1280
/* Check if the device has been closed */
1281
if (shm->adapter == NULL) {
1282
spin_unlock_irqrestore(&shm->tx_lock, flags);
1287
* Loop through each full entry in the user TX ring. Discard trash frags and
1288
* add the others to the partial TX array. If an entry has eop set, TX the
1291
while ((re = kernel_tx_entry(shm))->own) {
1293
vmxnet3_shm_free_page(shm, re->idx);
1294
shm->ctl.ptr->stats.kernel_tx++;
1296
vmxnet3_shm_tx_re(shm, *re);
1298
inc_kernel_tx_idx(shm);
1302
spin_unlock_irqrestore(&shm->tx_lock, flags);
1309
*-----------------------------------------------------------------------------
1311
* vmxnet3_shm_user_desc_available --
1313
* Checks if we have num_entries ring entries available on the rx ring.
1317
* -ENOMEM for not enough entries available
1322
*-----------------------------------------------------------------------------
1326
vmxnet3_shm_user_desc_available(struct vmxnet3_shm_pool *shm,
1329
struct vmxnet3_shm_ringentry *re;
1330
u16 reIdx = kernel_rx_idx(shm);
1332
while (num_entries > 0) {
1333
re = &shm->ctl.ptr->rx_ring[reIdx];
1337
reIdx = (reIdx + 1) % SHM_RX_RING_SIZE;
1346
*-----------------------------------------------------------------------------
1348
* vmxnet3_shm_rx_skb --
1350
* Receives an skb into the rx ring. If we can't receive all fragments,
1351
* the entire skb is dropped.
1355
* -ENOMEM for not enough entries available
1360
*-----------------------------------------------------------------------------
1364
vmxnet3_shm_rx_skb(struct vmxnet3_adapter *adapter,
1365
struct sk_buff *skb)
1369
int num_entries = 1 + skb_shinfo(skb)->nr_frags;
1370
int eop = (num_entries == 1);
1372
if (vmxnet3_shm_user_desc_available(adapter->shm, num_entries) == -ENOMEM) {
1373
vmxnet3_dev_kfree_skb_irq(adapter, skb);
1377
ret = vmxnet3_shm_user_rx(adapter->shm,
1378
VMXNET3_SHM_SKB_GETIDX(skb),
1379
VMXNET3_SHM_SKB_GETLEN(skb),
1384
printk(KERN_ERR "vmxnet3_shm_user_rx failed on frag 0\n");
1387
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1388
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1389
unsigned long shm_idx = (unsigned long)frag->page;
1391
eop = (i == skb_shinfo(skb)->nr_frags - 1);
1393
ret = vmxnet3_shm_user_rx(adapter->shm,
1400
printk(KERN_ERR "vmxnet3_shm_user_rx failed on frag 1+\n");
1406
* Do NOT use the vmxnet3 version of kfree_skb, as we handed
1407
* ownership of shm pages to the user space, thus we must not
1410
skb_shinfo(skb)->nr_frags = 0;
1411
compat_dev_kfree_skb_irq(skb, FREE_WRITE);
1418
*-----------------------------------------------------------------------------
1420
* vmxnet3_shm_user_rx --
1422
* Put one packet fragment into the shared memory RX ring
1426
* Negative value on error.
1431
*-----------------------------------------------------------------------------
1435
vmxnet3_shm_user_rx(struct vmxnet3_shm_pool *shm,
1441
struct vmxnet3_shm_ringentry *re = kernel_rx_entry(shm);
1442
shm->ctl.ptr->stats.kernel_rx++;
1446
inc_kernel_rx_idx(shm);
1457
*-----------------------------------------------------------------------------
1459
* vmxnet3_shm_open --
1461
* Called when the vmxnet3 device is opened. Allocates the per-device
1462
* shared memory pool.
1466
* Negative value on error.
1471
*-----------------------------------------------------------------------------
1475
vmxnet3_shm_open(struct vmxnet3_adapter *adapter,
1476
char *name, int pool_size)
1478
if (pool_size > SHM_MAX_DATA_SIZE) {
1479
printk(KERN_ERR "vmxnet3_shm: requested pool size %d is larger than the maximum %d\n",
1480
pool_size, SHM_MAX_DATA_SIZE);
1484
adapter->shm = vmxnet3_shm_pool_create(adapter, name, pool_size);
1485
if (adapter->shm == NULL) {
1486
printk(KERN_ERR "failed to create shared memory pool\n");
1494
*-----------------------------------------------------------------------------
1496
* vmxnet3_shm_close --
1498
* Called when the vmxnet3 device is closed. Does not free the per-device
1499
* shared memory pool. The character device might still be open. Thus
1500
* freeing the shared memory pool is tied to the ref count on
1501
* shm->kobj dropping to zero instead.
1505
* Negative value on error.
1510
*-----------------------------------------------------------------------------
1514
vmxnet3_shm_close(struct vmxnet3_adapter *adapter)
1516
unsigned long flags;
1518
/* Can't unset the lp pointer if a TX is in progress */
1519
spin_lock_irqsave(&adapter->shm->tx_lock, flags);
1520
adapter->shm->adapter = NULL;
1521
spin_unlock_irqrestore(&adapter->shm->tx_lock, flags);
1523
/* Mark the channel as 'in bad state' */
1524
adapter->shm->ctl.ptr->channelBad = 1;
1526
kobject_put(&adapter->shm->kobj);
1528
wake_up(&adapter->shm->rxq);