~n-muench/ubuntu/oneiric/open-vm-tools/open-vm-tools.fix-836277

« back to all changes in this revision

Viewing changes to modules/linux/vmxnet3/vmxnet3_shm.c

  • Committer: Evan Broder
  • Date: 2010-03-21 23:26:53 UTC
  • mfrom: (1.1.9 upstream)
  • Revision ID: broder@mit.edu-20100321232653-5a57r7v7ch4o6byv
Merging shared upstream rev into target branch.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/*********************************************************
2
 
 * Copyright (C) 2009 VMware, Inc. All rights reserved.
3
 
 *
4
 
 * This program is free software; you can redistribute it and/or modify it
5
 
 * under the terms of the GNU General Public License as published by the
6
 
 * Free Software Foundation version 2 and no later version.
7
 
 *
8
 
 * This program is distributed in the hope that it will be useful, but
9
 
 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
10
 
 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
11
 
 * for more details.
12
 
 *
13
 
 * You should have received a copy of the GNU General Public License along
14
 
 * with this program; if not, write to the Free Software Foundation, Inc.,
15
 
 * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
16
 
 *
17
 
 *********************************************************/
18
 
 
19
 
/*
20
 
 * vmxnet3_shm.c --
21
 
 *
22
 
 *    Shared memory infrastructure for VMXNET3 linux driver. Used by the
23
 
 *    VMXNET3 driver to back its rings with memory from a shared memory
24
 
 *    pool that is shared with user space.
25
 
 */
26
 
#include "driver-config.h"
27
 
 
28
 
#include "vmxnet3_int.h"
29
 
#include "vmnet_def.h"
30
 
#include "vm_device_version.h"
31
 
#include "vmxnet3_shm.h"
32
 
 
33
 
 
34
 
static int
35
 
vmxnet3_shm_consume_user_tx_queue(struct vmxnet3_shm_pool *shm);
36
 
 
37
 
int
38
 
vmxnet3_shm_tq_xmit(struct sk_buff *skb,
39
 
                struct vmxnet3_tx_queue *tq,
40
 
                struct vmxnet3_adapter *adapter,
41
 
                struct net_device *netdev);
42
 
 
43
 
/*
44
 
 *----------------------------------------------------------------------------
45
 
 *
46
 
 * kernel_rx_idx --
47
 
 *
48
 
 * Result:
49
 
 *    Kernel's current shared memory RX ring index
50
 
 *
51
 
 * Side-effects:
52
 
 *    None
53
 
 *
54
 
 *----------------------------------------------------------------------------
55
 
 */
56
 
 
57
 
static inline u16
58
 
kernel_rx_idx(const struct vmxnet3_shm_pool *shm)
59
 
{
60
 
        return shm->ctl.ptr->kernel_rxi;
61
 
}
62
 
 
63
 
 
64
 
/*
65
 
 *----------------------------------------------------------------------------
66
 
 *
67
 
 * inc_kernel_rx_idx --
68
 
 *
69
 
 * Result:
70
 
 *    None
71
 
 *
72
 
 * Side-effects:
73
 
 *    Increment the kernel's shared memory RX ring index
74
 
 *
75
 
 *----------------------------------------------------------------------------
76
 
 */
77
 
 
78
 
static inline void
79
 
inc_kernel_rx_idx(const struct vmxnet3_shm_pool *shm)
80
 
{
81
 
        shm->ctl.ptr->kernel_rxi = (shm->ctl.ptr->kernel_rxi + 1) % SHM_RX_RING_SIZE;
82
 
}
83
 
 
84
 
 
85
 
/*
86
 
 *----------------------------------------------------------------------------
87
 
 *
88
 
 * kernel_rx_idx --
89
 
 *
90
 
 * Result:
91
 
 *    Kernel's current shared memory RX ring index
92
 
 *
93
 
 * Side-effects:
94
 
 *    None
95
 
 *
96
 
 *----------------------------------------------------------------------------
97
 
 */
98
 
 
99
 
static inline u16
100
 
kernel_tx_idx(const struct vmxnet3_shm_pool *shm)
101
 
{
102
 
        return shm->ctl.ptr->kernel_txi;
103
 
}
104
 
 
105
 
 
106
 
/*
107
 
 *----------------------------------------------------------------------------
108
 
 *
109
 
 * inc_kernel_tx_idx --
110
 
 *
111
 
 * Result:
112
 
 *    None
113
 
 *
114
 
 * Side-effects:
115
 
 *    Increment the kernel's shared memory TX ring index
116
 
 *
117
 
 *----------------------------------------------------------------------------
118
 
 */
119
 
 
120
 
static inline void
121
 
inc_kernel_tx_idx(const struct vmxnet3_shm_pool *shm)
122
 
{
123
 
        shm->ctl.ptr->kernel_txi = (shm->ctl.ptr->kernel_txi + 1) % SHM_TX_RING_SIZE;
124
 
}
125
 
 
126
 
 
127
 
/*
128
 
 *----------------------------------------------------------------------------
129
 
 *
130
 
 * user_rx_idx --
131
 
 *
132
 
 * Result:
133
 
 *    Users's current shared memory RX ring index
134
 
 *
135
 
 * Side-effects:
136
 
 *    None
137
 
 *
138
 
 *----------------------------------------------------------------------------
139
 
 */
140
 
 
141
 
static inline u16
142
 
user_rx_idx(const struct vmxnet3_shm_pool *shm)
143
 
{
144
 
        return shm->ctl.ptr->user_rxi;
145
 
}
146
 
 
147
 
/*
148
 
 *----------------------------------------------------------------------------
149
 
 *
150
 
 * kernel_rx_entry --
151
 
 *
152
 
 * Result:
153
 
 *    Kernel's current shared memory RX ring entry
154
 
 *
155
 
 * Side-effects:
156
 
 *    None
157
 
 *
158
 
 *----------------------------------------------------------------------------
159
 
 */
160
 
 
161
 
static inline struct vmxnet3_shm_ringentry *
162
 
kernel_rx_entry(const struct vmxnet3_shm_pool *shm)
163
 
{
164
 
        return &shm->ctl.ptr->rx_ring[kernel_rx_idx(shm)];
165
 
}
166
 
 
167
 
/*
168
 
 *----------------------------------------------------------------------------
169
 
 *
170
 
 * kernel_tx_entry --
171
 
 *
172
 
 * Result:
173
 
 *    Kernel's current shared memory TX ring entry
174
 
 *
175
 
 * Side-effects:
176
 
 *    None
177
 
 *
178
 
 *----------------------------------------------------------------------------
179
 
 */
180
 
 
181
 
static inline struct vmxnet3_shm_ringentry *
182
 
kernel_tx_entry(const struct vmxnet3_shm_pool *shm)
183
 
{
184
 
        return &shm->ctl.ptr->tx_ring[kernel_tx_idx(shm)];
185
 
}
186
 
 
187
 
 
188
 
/*
189
 
 *----------------------------------------------------------------------------
190
 
 *
191
 
 * user_rx_entry --
192
 
 *
193
 
 * Used by vmxnet3_shm_chardev_poll
194
 
 *
195
 
 * Result:
196
 
 *    User's current shared memory RX ring entry
197
 
 *
198
 
 * Side-effects:
199
 
 *    None
200
 
 *
201
 
 *----------------------------------------------------------------------------
202
 
 */
203
 
 
204
 
static inline struct vmxnet3_shm_ringentry *
205
 
user_rx_entry(const struct vmxnet3_shm_pool *shm)
206
 
{
207
 
        return &shm->ctl.ptr->rx_ring[user_rx_idx(shm)];
208
 
}
209
 
 
210
 
 
211
 
/* kobject type */
212
 
static void
213
 
vmxnet3_shm_pool_release(struct kobject *kobj);
214
 
 
215
 
static const struct kobj_type vmxnet3_shm_pool_type = {
216
 
        .release = vmxnet3_shm_pool_release
217
 
};
218
 
 
219
 
/* vm operations */
220
 
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
221
 
static int
222
 
vmxnet3_shm_chardev_fault(struct vm_area_struct *vma,
223
 
                struct vm_fault *vmf);
224
 
 
225
 
static struct vm_operations_struct vmxnet3_shm_vm_ops = {
226
 
        .fault = vmxnet3_shm_chardev_fault,
227
 
};
228
 
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)
229
 
static struct page *
230
 
vmxnet3_shm_chardev_nopage(struct vm_area_struct *vma,
231
 
                unsigned long address,
232
 
                int *type);
233
 
 
234
 
static struct vm_operations_struct vmxnet3_shm_vm_ops = {
235
 
        .nopage = vmxnet3_shm_chardev_nopage,
236
 
};
237
 
#else
238
 
static struct page *
239
 
vmxnet3_shm_chardev_nopage(struct vm_area_struct *vma,
240
 
                unsigned long address,
241
 
                int unused);
242
 
 
243
 
static struct vm_operations_struct vmxnet3_shm_vm_ops = {
244
 
        .nopage = vmxnet3_shm_chardev_nopage,
245
 
};
246
 
#endif
247
 
 
248
 
/* file operations */
249
 
static int vmxnet3_shm_chardev_mmap(struct file *filp,
250
 
                struct vm_area_struct *vma);
251
 
 
252
 
static int vmxnet3_shm_chardev_open(struct inode * inode,
253
 
                struct file * filp);
254
 
 
255
 
static int vmxnet3_shm_chardev_release(struct inode * inode,
256
 
                struct file * filp);
257
 
 
258
 
static unsigned int vmxnet3_shm_chardev_poll(struct file *filp,
259
 
                poll_table *wait);
260
 
 
261
 
static long vmxnet3_shm_chardev_ioctl(struct file *filp,
262
 
                unsigned int cmd,
263
 
                unsigned long arg);
264
 
 
265
 
#ifndef HAVE_UNLOCKED_IOCTL
266
 
static int vmxnet3_shm_chardev_old_ioctl(struct inode *inode,
267
 
                struct file *filp,
268
 
                unsigned int cmd,
269
 
                unsigned long arg);
270
 
#endif
271
 
 
272
 
static struct file_operations shm_fops = {
273
 
        .owner = THIS_MODULE,
274
 
        .mmap = vmxnet3_shm_chardev_mmap,
275
 
        .open = vmxnet3_shm_chardev_open,
276
 
        .release = vmxnet3_shm_chardev_release,
277
 
        .poll = vmxnet3_shm_chardev_poll,
278
 
#ifdef HAVE_UNLOCKED_IOCTL
279
 
        .unlocked_ioctl = vmxnet3_shm_chardev_ioctl,
280
 
#ifdef CONFIG_COMPAT
281
 
        .compat_ioctl = vmxnet3_shm_chardev_ioctl,
282
 
#endif
283
 
#else
284
 
        .ioctl = vmxnet3_shm_chardev_old_ioctl,
285
 
#endif
286
 
};
287
 
 
288
 
static LIST_HEAD(vmxnet3_shm_list);
289
 
static spinlock_t vmxnet3_shm_list_lock = SPIN_LOCK_UNLOCKED;
290
 
 
291
 
/* vmxnet3_shm_pool kobject */
292
 
 
293
 
 
294
 
/* Lifecycle */
295
 
 
296
 
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
297
 
#define compat_kobject_init(kobj, ktype) { kobject_init(kobj, (struct kobj_type *) ktype); }
298
 
#else
299
 
#define compat_kobject_init(kobj, _ktype) {  \
300
 
        (kobj)->ktype = (struct kobj_type *) _ktype; \
301
 
        kobject_init(kobj); \
302
 
}
303
 
#endif
304
 
 
305
 
 
306
 
/*
307
 
 *----------------------------------------------------------------------------
308
 
 *
309
 
 * vmxnet3_shm_init_allocator --
310
 
 *
311
 
 * Zero all shared memory data pages and fill the allocator with them.
312
 
 *
313
 
 * Result:
314
 
 *    None
315
 
 *
316
 
 *----------------------------------------------------------------------------
317
 
 */
318
 
 
319
 
static void
320
 
vmxnet3_shm_init_allocator(struct vmxnet3_shm_pool *shm)
321
 
{
322
 
        int i;
323
 
 
324
 
        shm->allocator.count = 0;
325
 
        for (i = 1; i < shm->data.num_pages; i++) {
326
 
                struct page *page = VMXNET3_SHM_IDX2PAGE(shm, i);
327
 
                void *virt = kmap(page);
328
 
                memset(virt, 0, PAGE_SIZE);
329
 
                kunmap(page);
330
 
 
331
 
                shm->allocator.stack[shm->allocator.count++] = i;
332
 
 
333
 
                BUG_ON(i == SHM_INVALID_IDX);
334
 
        }
335
 
        BUG_ON(shm->allocator.count > shm->data.num_pages);
336
 
}
337
 
 
338
 
 
339
 
/*
340
 
 *-----------------------------------------------------------------------------
341
 
 *
342
 
 * vmxnet3_shm_pool_reset --
343
 
 *
344
 
 *    Clean up after userspace has closed the device
345
 
 *
346
 
 * Results:
347
 
 *    None.
348
 
 *
349
 
 * Side effects:
350
 
 *    None.
351
 
 *
352
 
 *-----------------------------------------------------------------------------
353
 
 */
354
 
 
355
 
static void
356
 
vmxnet3_shm_pool_reset(struct vmxnet3_shm_pool *shm)
357
 
{
358
 
        int err = 0;
359
 
        printk(KERN_INFO "resetting shm pool\n");
360
 
 
361
 
        /*
362
 
         * Reset_work may be in the middle of resetting the device, wait for its
363
 
         * completion.
364
 
         */
365
 
        while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &shm->adapter->state))
366
 
                compat_msleep(1);
367
 
 
368
 
        if (compat_netif_running(shm->adapter->netdev))
369
 
                vmxnet3_quiesce_dev(shm->adapter);
370
 
 
371
 
        vmxnet3_shm_init_allocator(shm);
372
 
 
373
 
        if (compat_netif_running(shm->adapter->netdev))
374
 
                err = vmxnet3_activate_dev(shm->adapter);
375
 
 
376
 
        memset(shm->ctl.ptr, 0, PAGE_SIZE);
377
 
 
378
 
        clear_bit(VMXNET3_STATE_BIT_RESETTING, &shm->adapter->state);
379
 
 
380
 
        if (err)
381
 
                vmxnet3_force_close(shm->adapter);
382
 
}
383
 
 
384
 
/*
385
 
 *-----------------------------------------------------------------------------
386
 
 *
387
 
 * vmxnet3_shm_pool_create --
388
 
 *
389
 
 *    Allocate and initialize shared memory pool. Allocates the data and
390
 
 *    control pages, resets them to zero, initializes locks, registers the
391
 
 *    character device, etc. Creates virtual address mappings for the pool,
392
 
 *    but does not set up DMA yet.
393
 
 *
394
 
 * Results:
395
 
 *    The new shared memory pool object, or NULL on failure.
396
 
 *
397
 
 * Side effects:
398
 
 *    None.
399
 
 *
400
 
 *-----------------------------------------------------------------------------
401
 
 */
402
 
 
403
 
struct vmxnet3_shm_pool *
404
 
vmxnet3_shm_pool_create(struct vmxnet3_adapter *adapter,
405
 
                char *name, int pool_size)
406
 
{
407
 
        int i;
408
 
        unsigned long flags;
409
 
        struct vmxnet3_shm_pool *shm;
410
 
        struct vmxnet3_shm_ctl *ctl_ptr;
411
 
        struct page *ctl_page;
412
 
        int shm_size = sizeof(*shm) +
413
 
                pool_size * sizeof(uint16) +
414
 
                pool_size * sizeof(struct vmxnet3_shm_mapped_page);
415
 
 
416
 
        /* Allocate shm_pool kobject */
417
 
        shm = vmalloc(shm_size);
418
 
        if (shm == NULL)
419
 
                goto fail_shm;
420
 
 
421
 
        memset(shm, 0, sizeof(*shm));
422
 
        compat_kobject_init(&shm->kobj, &vmxnet3_shm_pool_type);
423
 
        /* shm->kobj.ktype = &vmxnet3_shm_pool_type; */
424
 
        /* kobject_init(&shm->kobj); */
425
 
        snprintf(shm->name, sizeof(shm->name), "vmxnet_%s_shm", name);
426
 
        kobject_set_name(&shm->kobj, shm->name);
427
 
        shm->adapter = adapter;
428
 
 
429
 
        /* Allocate data pages */
430
 
        shm->data.num_pages = pool_size;
431
 
        for (i = 1; i < shm->data.num_pages; i++) {
432
 
                struct page *page = alloc_page(GFP_KERNEL);
433
 
                if (page == NULL)
434
 
                        goto fail_data;
435
 
 
436
 
                VMXNET3_SHM_SET_IDX2PAGE(shm, i, page);
437
 
 
438
 
                BUG_ON(i == SHM_INVALID_IDX);
439
 
        }
440
 
 
441
 
        /* Allocate control page */
442
 
        ctl_page = alloc_page(GFP_KERNEL);
443
 
        if (ctl_page == NULL)
444
 
                goto fail_ctl;
445
 
 
446
 
        ctl_ptr = (void*)kmap(ctl_page);
447
 
        shm->ctl.pages[0] = ctl_page;
448
 
        shm->ctl.ptr = ctl_ptr;
449
 
 
450
 
        /* Reset data and control pages */
451
 
        vmxnet3_shm_init_allocator(shm);
452
 
        memset(shm->ctl.ptr, 0, PAGE_SIZE);
453
 
 
454
 
        /* Register char device */
455
 
        shm->misc_dev.minor = MISC_DYNAMIC_MINOR;
456
 
        shm->misc_dev.name = shm->name;
457
 
        shm->misc_dev.fops = &shm_fops;
458
 
        if (misc_register(&shm->misc_dev)) {
459
 
                printk(KERN_ERR "failed to register vmxnet3_shm character device\n");
460
 
                goto fail_cdev;
461
 
        }
462
 
 
463
 
        /* Initialize locks */
464
 
        spin_lock_init(&shm->alloc_lock);
465
 
        spin_lock_init(&shm->tx_lock);
466
 
        spin_lock_init(&shm->rx_lock);
467
 
        init_waitqueue_head(&shm->rxq);
468
 
 
469
 
        spin_lock_irqsave(&vmxnet3_shm_list_lock, flags);
470
 
        list_add(&shm->list, &vmxnet3_shm_list);
471
 
        spin_unlock_irqrestore(&vmxnet3_shm_list_lock, flags);
472
 
 
473
 
        printk(KERN_INFO "created vmxnet shared memory pool %s\n", shm->name);
474
 
 
475
 
        return shm;
476
 
 
477
 
fail_cdev:
478
 
        kunmap(ctl_page);
479
 
        __free_page(ctl_page);
480
 
 
481
 
fail_data:
482
 
fail_ctl:
483
 
        for (i = 0; i < shm->data.num_pages; i++) {
484
 
                if (VMXNET3_SHM_IDX2PAGE(shm, i) != NULL)
485
 
                        __free_page(VMXNET3_SHM_IDX2PAGE(shm, i));
486
 
        }
487
 
 
488
 
        kfree(shm);
489
 
 
490
 
fail_shm:
491
 
        return NULL;
492
 
}
493
 
 
494
 
 
495
 
/*
496
 
 *-----------------------------------------------------------------------------
497
 
 *
498
 
 * vmxnet3_shm_pool_release --
499
 
 *
500
 
 *    Release a shared memory pool.
501
 
 *
502
 
 * Results:
503
 
 *    None.
504
 
 *
505
 
 * Side effects:
506
 
 *    None.
507
 
 *
508
 
 *-----------------------------------------------------------------------------
509
 
 */
510
 
 
511
 
static void
512
 
vmxnet3_shm_pool_release(struct kobject *kobj)
513
 
{
514
 
        int i;
515
 
        unsigned long flags;
516
 
        struct vmxnet3_shm_pool *shm = container_of(kobj, struct vmxnet3_shm_pool, kobj);
517
 
 
518
 
        spin_lock_irqsave(&vmxnet3_shm_list_lock, flags);
519
 
        list_del(&shm->list);
520
 
        spin_unlock_irqrestore(&vmxnet3_shm_list_lock, flags);
521
 
 
522
 
        misc_deregister(&shm->misc_dev);
523
 
 
524
 
        /* Free control pages */
525
 
        for (i = 0; i < SHM_CTL_SIZE; i++) {
526
 
                kunmap(shm->ctl.pages[i]);
527
 
                __free_page(shm->ctl.pages[i]);
528
 
        }
529
 
 
530
 
        /* Free data pages */
531
 
        for (i = 1; i < shm->data.num_pages; i++)
532
 
                __free_page(VMXNET3_SHM_IDX2PAGE(shm, i));
533
 
 
534
 
        printk(KERN_INFO "destroyed vmxnet shared memory pool %s\n", shm->name);
535
 
 
536
 
        vfree(shm);
537
 
}
538
 
 
539
 
 
540
 
/* Shared memory pool management */
541
 
 
542
 
/*
543
 
 *-----------------------------------------------------------------------------
544
 
 *
545
 
 * vmxnet3_shm_alloc_page --
546
 
 *
547
 
 *    Allocate a page from the shared memory area.
548
 
 *
549
 
 * Results:
550
 
 *    Index to page or SHM_INVALID_IDX on failure.
551
 
 *
552
 
 * Side effects:
553
 
 *    None.
554
 
 *
555
 
 *-----------------------------------------------------------------------------
556
 
 */
557
 
 
558
 
uint16
559
 
vmxnet3_shm_alloc_page(struct vmxnet3_shm_pool *shm)
560
 
{
561
 
        uint16 idx;
562
 
        unsigned long flags;
563
 
 
564
 
        spin_lock_irqsave(&shm->alloc_lock, flags);
565
 
        if (shm->allocator.count == 0) {
566
 
                idx = SHM_INVALID_IDX;
567
 
        } else {
568
 
                idx = shm->allocator.stack[--shm->allocator.count];
569
 
                BUG_ON(idx == SHM_INVALID_IDX);
570
 
        }
571
 
        spin_unlock_irqrestore(&shm->alloc_lock, flags);
572
 
 
573
 
        return idx;
574
 
}
575
 
 
576
 
 
577
 
/*
578
 
 *-----------------------------------------------------------------------------
579
 
 *
580
 
 * vmxnet3_shm_free_page --
581
 
 *
582
 
 *    Free a page back to the shared memory area
583
 
 *
584
 
 * Results:
585
 
 *    None.
586
 
 *
587
 
 * Side effects:
588
 
 *    None.
589
 
 *
590
 
 *-----------------------------------------------------------------------------
591
 
 */
592
 
 
593
 
void
594
 
vmxnet3_shm_free_page(struct vmxnet3_shm_pool *shm,
595
 
                uint16 idx)
596
 
{
597
 
        unsigned long flags;
598
 
 
599
 
        spin_lock_irqsave(&shm->alloc_lock, flags);
600
 
        BUG_ON(shm->allocator.count >= shm->data.num_pages);
601
 
        shm->allocator.stack[shm->allocator.count++] = idx;
602
 
        spin_unlock_irqrestore(&shm->alloc_lock, flags);
603
 
}
604
 
 
605
 
 
606
 
/* Char device */
607
 
 
608
 
/*
609
 
 *-----------------------------------------------------------------------------
610
 
 *
611
 
 * vmxnet3_shm_addr2idx --
612
 
 *
613
 
 *    Convert user space address into index into the shared memory pool.
614
 
 *
615
 
 * Results:
616
 
 *    None.
617
 
 *
618
 
 * Side effects:
619
 
 *    None.
620
 
 *
621
 
 *-----------------------------------------------------------------------------
622
 
 */
623
 
 
624
 
static inline unsigned long
625
 
vmxnet3_shm_addr2idx(struct vm_area_struct *vma,
626
 
                unsigned long address)
627
 
{
628
 
        return vma->vm_pgoff + ((address - vma->vm_start) >> PAGE_SHIFT);
629
 
}
630
 
 
631
 
 
632
 
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
633
 
/*
634
 
 *-----------------------------------------------------------------------------
635
 
 *
636
 
 * vmxnet3_shm_chardev_fault --
637
 
 *
638
 
 *    mmap fault handler. Called if the user space requests a page for
639
 
 *    which there is no shared memory mapping yet. We need to lookup
640
 
 *    the page we want to back the shared memory mapping with.
641
 
 *
642
 
 * Results:
643
 
 *    The page backing the user space address.
644
 
 *
645
 
 * Side effects:
646
 
 *    None.
647
 
 *
648
 
 *-----------------------------------------------------------------------------
649
 
 */
650
 
 
651
 
static int
652
 
vmxnet3_shm_chardev_fault(struct vm_area_struct *vma,
653
 
                struct vm_fault *vmf)
654
 
{
655
 
        struct vmxnet3_shm_pool *shm = vma->vm_private_data;
656
 
        unsigned long address = (unsigned long)vmf->virtual_address;
657
 
        unsigned long idx = vmxnet3_shm_addr2idx(vma, address);
658
 
        struct page *pageptr;
659
 
 
660
 
        if (idx >= SHM_DATA_START && idx < SHM_DATA_START + shm->data.num_pages)
661
 
                pageptr = VMXNET3_SHM_IDX2PAGE(shm, idx - SHM_DATA_START);
662
 
        else if (idx >= SHM_CTL_START && idx < SHM_CTL_START + SHM_CTL_SIZE)
663
 
                pageptr = shm->ctl.pages[idx - SHM_CTL_START];
664
 
        else
665
 
                pageptr = NULL;
666
 
 
667
 
        if (pageptr)
668
 
                get_page(pageptr);
669
 
 
670
 
        vmf->page = pageptr;
671
 
 
672
 
        return pageptr ? VM_FAULT_MINOR : VM_FAULT_ERROR;
673
 
}
674
 
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)
675
 
 
676
 
/*
677
 
 *-----------------------------------------------------------------------------
678
 
 *
679
 
 * vmxnet3_shm_chardev_nopage --
680
 
 *
681
 
 *    mmap nopage handler. Called if the user space requests a page for
682
 
 *    which there is no shared memory mapping yet. We need to lookup
683
 
 *    the page we want to back the shared memory mapping with.
684
 
 *
685
 
 * Results:
686
 
 *    The page backing the user space address.
687
 
 *
688
 
 * Side effects:
689
 
 *    None.
690
 
 *
691
 
 *-----------------------------------------------------------------------------
692
 
 */
693
 
 
694
 
static struct page *
695
 
vmxnet3_shm_chardev_nopage(struct vm_area_struct *vma,
696
 
                unsigned long address,
697
 
                int *type)
698
 
{
699
 
        struct vmxnet3_shm_pool *shm = vma->vm_private_data;
700
 
        unsigned long idx = vmxnet3_shm_addr2idx(vma, address);
701
 
        struct page *pageptr;
702
 
 
703
 
        if (idx >= SHM_DATA_START && idx < SHM_DATA_START + shm->data.num_pages)
704
 
                pageptr = VMXNET3_SHM_IDX2PAGE(shm, idx - SHM_DATA_START);
705
 
        else if (idx >= SHM_CTL_START && idx < SHM_CTL_START + SHM_CTL_SIZE)
706
 
                pageptr = shm->ctl.pages[idx - SHM_CTL_START];
707
 
        else
708
 
                pageptr = NULL;
709
 
 
710
 
        if (pageptr)
711
 
                get_page(pageptr);
712
 
 
713
 
        if (type)
714
 
                *type = pageptr ? VM_FAULT_MINOR : VM_FAULT_SIGBUS;
715
 
 
716
 
        return pageptr;
717
 
}
718
 
 
719
 
#else
720
 
/*
721
 
 *-----------------------------------------------------------------------------
722
 
 *
723
 
 * vmxnet3_shm_chardev_nopage --
724
 
 *
725
 
 *    mmap nopage handler. Called if the user space requests a page for
726
 
 *    which there is no shared memory mapping yet. We need to lookup
727
 
 *    the page we want to back the shared memory mapping with.
728
 
 *
729
 
 * Results:
730
 
 *    The page backing the user space address.
731
 
 *
732
 
 * Side effects:
733
 
 *    None.
734
 
 *
735
 
 *-----------------------------------------------------------------------------
736
 
 */
737
 
 
738
 
static struct page *
739
 
vmxnet3_shm_chardev_nopage(struct vm_area_struct *vma,
740
 
                unsigned long address,
741
 
                int unused)
742
 
{
743
 
        struct vmxnet3_shm_pool *shm = vma->vm_private_data;
744
 
        unsigned long idx = vmxnet3_shm_addr2idx(vma, address);
745
 
        struct page *pageptr;
746
 
 
747
 
        if (idx >= SHM_DATA_START && idx < SHM_DATA_START + shm->data.num_pages)
748
 
                pageptr = VMXNET3_SHM_IDX2PAGE(shm, idx - SHM_DATA_START);
749
 
        else if (idx >= SHM_CTL_START && idx < SHM_CTL_START + SHM_CTL_SIZE)
750
 
                pageptr = shm->ctl.pages[idx - SHM_CTL_START];
751
 
        else
752
 
                pageptr = NULL;
753
 
 
754
 
        if (pageptr)
755
 
                get_page(pageptr);
756
 
 
757
 
        return pageptr;
758
 
}
759
 
#endif
760
 
 
761
 
 
762
 
/*
763
 
 *-----------------------------------------------------------------------------
764
 
 *
765
 
 * vmxnet3_shm_chardev_mmap --
766
 
 *
767
 
 *    Setup mmap.
768
 
 *
769
 
 * Results:
770
 
 *    Always 0.
771
 
 *
772
 
 * Side effects:
773
 
 *    None.
774
 
 *
775
 
 *-----------------------------------------------------------------------------
776
 
 */
777
 
static int
778
 
vmxnet3_shm_chardev_mmap(struct file *filp,
779
 
                struct vm_area_struct *vma)
780
 
{
781
 
        vma->vm_private_data = filp->private_data;
782
 
        vma->vm_ops = &vmxnet3_shm_vm_ops;
783
 
        vma->vm_flags |= VM_RESERVED;
784
 
        return 0;
785
 
}
786
 
 
787
 
 
788
 
/*
789
 
 *-----------------------------------------------------------------------------
790
 
 *
791
 
 * vmxnet3_shm_chardev_poll --
792
 
 *
793
 
 *    Poll called from user space. We consume the TX queue and then go to
794
 
 *    sleep until we get woken up by an interrupt.
795
 
 *
796
 
 * Results:
797
 
 *    Poll mask.
798
 
 *
799
 
 * Side effects:
800
 
 *    None.
801
 
 *
802
 
 *-----------------------------------------------------------------------------
803
 
 */
804
 
 
805
 
static unsigned int
806
 
vmxnet3_shm_chardev_poll(struct file *filp,
807
 
                poll_table *wait)
808
 
{
809
 
        struct vmxnet3_shm_pool *shm = filp->private_data;
810
 
        unsigned int mask = 0;
811
 
        unsigned long flags;
812
 
        struct vmxnet3_shm_ringentry *re;
813
 
 
814
 
        /* consume TX queue */
815
 
        if (vmxnet3_shm_consume_user_tx_queue(shm) == -1) {
816
 
                /*
817
 
                 * The device has been closed, let the user space
818
 
                 * know there is activity, so that it gets a chance
819
 
                 * to read the channelBad flag.
820
 
                 */
821
 
                mask |= POLLIN;
822
 
                return mask;
823
 
        }
824
 
 
825
 
        /* Wait on the rxq for an interrupt to wake us */
826
 
        poll_wait(filp, &shm->rxq, wait);
827
 
 
828
 
        /* Check if the user's current RX entry is full */
829
 
        spin_lock_irqsave(&shm->rx_lock, flags);
830
 
        re = user_rx_entry(shm);
831
 
        if (re->own)
832
 
                /* XXX: We need a comment that explains what this does. */
833
 
                mask |= POLLIN | POLLRDNORM;
834
 
 
835
 
        spin_unlock_irqrestore(&shm->rx_lock, flags);
836
 
 
837
 
        return mask;
838
 
}
839
 
 
840
 
 
841
 
/*
842
 
 *-----------------------------------------------------------------------------
843
 
 *
844
 
 * vmxnet3_shm_chardev_ioctl --
845
 
 *
846
 
 *    Handle ioctls from user space.
847
 
 *
848
 
 * Results:
849
 
 *    Return code depends on ioctl.
850
 
 *
851
 
 * Side effects:
852
 
 *    None.
853
 
 *
854
 
 *-----------------------------------------------------------------------------
855
 
 */
856
 
 
857
 
static long
858
 
vmxnet3_shm_chardev_ioctl(struct file *filp,
859
 
                unsigned int cmd,
860
 
                unsigned long arg)
861
 
{
862
 
        struct vmxnet3_shm_pool *shm = filp->private_data;
863
 
        u16 idx;
864
 
        u16 idx1;
865
 
        int i;
866
 
 
867
 
        switch(cmd) {
868
 
 
869
 
                case SHM_IOCTL_TX:
870
 
                        vmxnet3_shm_consume_user_tx_queue(shm);
871
 
                        return 0;
872
 
 
873
 
                case SHM_IOCTL_ALLOC_ONE:
874
 
                        idx = vmxnet3_shm_alloc_page(shm);
875
 
                        return idx;
876
 
 
877
 
                case SHM_IOCTL_ALLOC_MANY:
878
 
                        for (i = 0; i < arg; i++) {
879
 
                                idx = vmxnet3_shm_alloc_page(shm);
880
 
                                if (idx != SHM_INVALID_IDX) {
881
 
                                        if (vmxnet3_shm_user_rx(shm, idx, 0, 1, 1)) {
882
 
                                                vmxnet3_shm_free_page(shm, idx);
883
 
                                                return SHM_INVALID_IDX;
884
 
                                        }
885
 
                                } else {
886
 
                                        return SHM_INVALID_IDX;
887
 
                                }
888
 
                        }
889
 
                        return 0;
890
 
 
891
 
                case SHM_IOCTL_ALLOC_ONE_AND_MANY:
892
 
                        idx1 = vmxnet3_shm_alloc_page(shm);
893
 
                        if (idx1 == SHM_INVALID_IDX)
894
 
                                return SHM_INVALID_IDX;
895
 
 
896
 
                        for (i = 0; i < arg - 1; i++) {
897
 
                                idx = vmxnet3_shm_alloc_page(shm);
898
 
                                if (idx != SHM_INVALID_IDX) {
899
 
                                        if (vmxnet3_shm_user_rx(shm, idx, 0, 1, 1)) {
900
 
                                                vmxnet3_shm_free_page(shm, idx);
901
 
                                                vmxnet3_shm_free_page(shm, idx1);
902
 
                                                return SHM_INVALID_IDX;
903
 
                                        }
904
 
                                } else {
905
 
                                        vmxnet3_shm_free_page(shm, idx1);
906
 
                                        return SHM_INVALID_IDX;
907
 
                                }
908
 
                        }
909
 
                        return idx1;
910
 
 
911
 
                case SHM_IOCTL_FREE_ONE:
912
 
                        if (arg != SHM_INVALID_IDX && arg < shm->data.num_pages)
913
 
                                vmxnet3_shm_free_page(shm, arg);
914
 
 
915
 
                        return 0;
916
 
        }
917
 
 
918
 
        return -ENOTTY;
919
 
}
920
 
 
921
 
#ifndef HAVE_UNLOCKED_IOCTL
922
 
static int vmxnet3_shm_chardev_old_ioctl(struct inode *inode,
923
 
                struct file *filp,
924
 
                unsigned int cmd,
925
 
                unsigned long arg)
926
 
{
927
 
        return vmxnet3_shm_chardev_ioctl(filp, cmd, arg);
928
 
}
929
 
#endif
930
 
 
931
 
/*
932
 
 *-----------------------------------------------------------------------------
933
 
 *
934
 
 * vmxnet3_shm_chardev_find_by_minor --
935
 
 *
936
 
 *    Find the right shared memory pool based on the minor number of the
937
 
 *    char device.
938
 
 *
939
 
 * Results:
940
 
 *    Pointer to the shared memory pool, or NULL on error.
941
 
 *
942
 
 * Side effects:
943
 
 *    Takes a reference on the kobj of the shm object.
944
 
 *
945
 
 *-----------------------------------------------------------------------------
946
 
 */
947
 
 
948
 
static struct vmxnet3_shm_pool *
949
 
vmxnet3_shm_chardev_find_by_minor(unsigned int minor)
950
 
{
951
 
        struct vmxnet3_shm_pool *shm, *tmp;
952
 
        unsigned long flags;
953
 
 
954
 
        spin_lock_irqsave(&vmxnet3_shm_list_lock, flags);
955
 
 
956
 
        list_for_each_entry_safe(shm, tmp, &vmxnet3_shm_list, list) {
957
 
                if (shm->misc_dev.minor == minor && kobject_get(&shm->kobj)) {
958
 
                        spin_unlock_irqrestore(&vmxnet3_shm_list_lock, flags);
959
 
                        return shm;
960
 
                }
961
 
        }
962
 
 
963
 
        spin_unlock_irqrestore(&vmxnet3_shm_list_lock, flags);
964
 
 
965
 
        return NULL;
966
 
}
967
 
 
968
 
 
969
 
/*
970
 
 *-----------------------------------------------------------------------------
971
 
 *
972
 
 * vmxnet3_shm_chardev_open --
973
 
 *
974
 
 *    Find the right shared memory pool based on the minor number of the
975
 
 *    char device.
976
 
 *
977
 
 * Results:
978
 
 *    0 on success or -ENODEV if no device exists with the given minor number
979
 
 *
980
 
 * Side effects:
981
 
 *    None.
982
 
 *
983
 
 *-----------------------------------------------------------------------------
984
 
 */
985
 
 
986
 
static int
987
 
vmxnet3_shm_chardev_open(struct inode * inode,
988
 
                struct file * filp)
989
 
{
990
 
        /* Stash pointer to shm in file so file ops can use it */
991
 
        filp->private_data = vmxnet3_shm_chardev_find_by_minor(iminor(inode));
992
 
        if (filp->private_data == NULL)
993
 
                return -ENODEV;
994
 
 
995
 
 
996
 
        /* XXX: What does this do?? */
997
 
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
998
 
        /*   filp->f_mapping->backing_dev_info = &directly_mappable_cdev_bdi; */
999
 
#endif
1000
 
 
1001
 
        return 0;
1002
 
}
1003
 
 
1004
 
 
1005
 
/*
1006
 
 *-----------------------------------------------------------------------------
1007
 
 *
1008
 
 * vmxnet3_shm_chardev_release --
1009
 
 *
1010
 
 *    Closing the char device. Release the ref count on the shared memory
1011
 
 *    pool, perform cleanup.
1012
 
 *
1013
 
 * Results:
1014
 
 *    Always 0.
1015
 
 *
1016
 
 * Side effects:
1017
 
 *    None.
1018
 
 *
1019
 
 *-----------------------------------------------------------------------------
1020
 
 */
1021
 
 
1022
 
static int
1023
 
vmxnet3_shm_chardev_release(struct inode * inode,
1024
 
                struct file * filp)
1025
 
{
1026
 
        struct vmxnet3_shm_pool *shm = filp->private_data;
1027
 
 
1028
 
        if (shm->adapter) {
1029
 
                vmxnet3_shm_pool_reset(shm);
1030
 
        } else {
1031
 
                vmxnet3_shm_init_allocator(shm);
1032
 
                memset(shm->ctl.ptr, 0, PAGE_SIZE);
1033
 
        }
1034
 
 
1035
 
        kobject_put(&shm->kobj);
1036
 
 
1037
 
        return 0;
1038
 
}
1039
 
 
1040
 
 
1041
 
/* TX and RX */
1042
 
 
1043
 
/*
1044
 
 *-----------------------------------------------------------------------------
1045
 
 *
1046
 
 * vmxnet3_free_skbpages --
1047
 
 *
1048
 
 *    Free the shared memory pages (secretly) backing this skb.
1049
 
 *
1050
 
 * Results:
1051
 
 *    None.
1052
 
 *
1053
 
 * Side effects:
1054
 
 *    None.
1055
 
 *
1056
 
 *-----------------------------------------------------------------------------
1057
 
 */
1058
 
 
1059
 
void
1060
 
vmxnet3_free_skbpages(struct vmxnet3_adapter *adapter,
1061
 
                struct sk_buff *skb)
1062
 
{
1063
 
        int i;
1064
 
 
1065
 
        vmxnet3_shm_free_page(adapter->shm, VMXNET3_SHM_SKB_GETIDX(skb));
1066
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1067
 
                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1068
 
 
1069
 
                vmxnet3_shm_free_page(adapter->shm, (unsigned long)frag->page);
1070
 
        }
1071
 
 
1072
 
        skb_shinfo(skb)->nr_frags = 0;
1073
 
}
1074
 
 
1075
 
 
1076
 
/*
1077
 
 *-----------------------------------------------------------------------------
1078
 
 *
1079
 
 * vmxnet3_shm_start_tx --
1080
 
 *
1081
 
 *    The shared memory vmxnet version of the hard_start_xmit routine.
1082
 
 *    Just frees the given packet as we do not intend to transmit any
1083
 
 *    packet given to us by the TCP/IP stack.
1084
 
 *
1085
 
 * Results:
1086
 
 *    Always 0 for success.
1087
 
 *
1088
 
 * Side effects:
1089
 
 *    None.
1090
 
 *
1091
 
 *-----------------------------------------------------------------------------
1092
 
 */
1093
 
 
1094
 
int
1095
 
vmxnet3_shm_start_tx(struct sk_buff *skb,
1096
 
                struct net_device *dev)
1097
 
{
1098
 
        compat_dev_kfree_skb_irq(skb, FREE_WRITE);
1099
 
        return COMPAT_NETDEV_TX_OK;
1100
 
}
1101
 
 
1102
 
 
1103
 
/*
1104
 
 *-----------------------------------------------------------------------------
1105
 
 *
1106
 
 * vmxnet3_shm_tx_pkt --
1107
 
 *
1108
 
 *    Send a packet (collection of ring entries) using h/w tx routine.
1109
 
 *
1110
 
 *    Protected by shm.tx_lock
1111
 
 *
1112
 
 * Results:
1113
 
 *    0 on success. Negative value to indicate error
1114
 
 *
1115
 
 * Side effects:
1116
 
 *    None.
1117
 
 *
1118
 
 *-----------------------------------------------------------------------------
1119
 
 */
1120
 
 
1121
 
static int
1122
 
vmxnet3_shm_tx_pkt(struct vmxnet3_adapter *adapter,
1123
 
                struct vmxnet3_shm_ringentry *res,
1124
 
                int frags)
1125
 
{
1126
 
        struct sk_buff* skb;
1127
 
        int i;
1128
 
 
1129
 
        skb = dev_alloc_skb(100);
1130
 
        if (skb == NULL) {
1131
 
                for (i = 0; i < frags; i++)
1132
 
                        vmxnet3_shm_free_page(adapter->shm, res[i].idx);
1133
 
 
1134
 
                BUG_ON(TRUE);
1135
 
                return -ENOMEM;
1136
 
        }
1137
 
 
1138
 
        VMXNET3_SHM_SKB_SETIDX(skb, res[0].idx);
1139
 
        VMXNET3_SHM_SKB_SETLEN(skb, res[0].len);
1140
 
 
1141
 
        for (i = 1; i < frags; i++) {
1142
 
                struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
1143
 
                        skb_shinfo(skb)->nr_frags;
1144
 
 
1145
 
                BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
1146
 
 
1147
 
                frag->page = (struct page*)(unsigned long)res[i].idx;
1148
 
                frag->page_offset = 0;
1149
 
                frag->size = res[i].len;
1150
 
                skb_shinfo(skb)->nr_frags ++;
1151
 
        }
1152
 
 
1153
 
        {
1154
 
                struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
1155
 
                int ret;
1156
 
                skb->protocol = htons(ETH_P_IPV6);
1157
 
                adapter->shm->ctl.ptr->stats.kernel_tx += frags; /* XXX: move to better place */
1158
 
                ret = vmxnet3_shm_tq_xmit(skb, tq, adapter, adapter->netdev);
1159
 
                if (ret == COMPAT_NETDEV_TX_BUSY)
1160
 
                        vmxnet3_dev_kfree_skb(adapter, skb);
1161
 
 
1162
 
                return ret;
1163
 
        }
1164
 
 
1165
 
        return 0;
1166
 
}
1167
 
 
1168
 
/*
1169
 
 *-----------------------------------------------------------------------------
1170
 
 *
1171
 
 * vmxnet3_shm_tq_xmit --
1172
 
 *
1173
 
 *    Wrap vmxnet3_tq_xmit holding the netdev tx lock to better emulate the
1174
 
 *    Linux stack. Also check for a stopped tx queue to avoid racing with
1175
 
 *    vmxnet3_close.
1176
 
 *
1177
 
 * Results:
1178
 
 *    Same as vmxnet3_tq_xmit.
1179
 
 *
1180
 
 * Side effects:
1181
 
 *    None.
1182
 
 *
1183
 
 *-----------------------------------------------------------------------------
1184
 
 */
1185
 
int
1186
 
vmxnet3_shm_tq_xmit(struct sk_buff *skb,
1187
 
                struct vmxnet3_tx_queue *tq,
1188
 
                struct vmxnet3_adapter *adapter,
1189
 
                struct net_device *netdev)
1190
 
{
1191
 
        int ret = COMPAT_NETDEV_TX_BUSY;
1192
 
        compat_netif_tx_lock(netdev);
1193
 
        if (!netif_queue_stopped(netdev))
1194
 
                ret = vmxnet3_tq_xmit(skb, tq, adapter, netdev);
1195
 
 
1196
 
        compat_netif_tx_unlock(netdev);
1197
 
        return ret;
1198
 
}
1199
 
 
1200
 
 
1201
 
/*
1202
 
 *-----------------------------------------------------------------------------
1203
 
 *
1204
 
 * vmxnet3_shm_tx_re --
1205
 
 *
1206
 
 *    Add one entry to the partial TX array. If re->eop is set, i.e. if
1207
 
 *    the packet is complete, TX the partial packet.
1208
 
 *
1209
 
 * Results:
1210
 
 *    1 if eop
1211
 
 *
1212
 
 * Side effects:
1213
 
 *    None.
1214
 
 *
1215
 
 *-----------------------------------------------------------------------------
1216
 
 */
1217
 
 
1218
 
static int
1219
 
vmxnet3_shm_tx_re(struct vmxnet3_shm_pool *shm,
1220
 
                struct vmxnet3_shm_ringentry re)
1221
 
{
1222
 
        int i;
1223
 
 
1224
 
        BUG_ON(shm->partial_tx.frags > VMXNET3_SHM_MAX_FRAGS);
1225
 
        if (shm->partial_tx.frags == VMXNET3_SHM_MAX_FRAGS) {
1226
 
                if (re.eop) {
1227
 
                        printk("dropped oversize shm packet\n");
1228
 
                        for (i = 0; i < shm->partial_tx.frags; i++)
1229
 
                                vmxnet3_shm_free_page(shm,
1230
 
                                                shm->partial_tx.res[i].idx);
1231
 
 
1232
 
                        shm->partial_tx.frags = 0;
1233
 
                }
1234
 
                vmxnet3_shm_free_page(shm, re.idx);
1235
 
                return re.eop;
1236
 
        }
1237
 
 
1238
 
        shm->partial_tx.res[shm->partial_tx.frags++] = re;
1239
 
 
1240
 
        if (re.eop) {
1241
 
                int status = vmxnet3_shm_tx_pkt(shm->adapter,
1242
 
                                shm->partial_tx.res,
1243
 
                                shm->partial_tx.frags);
1244
 
                if (status < 0)
1245
 
                        printk(KERN_DEBUG "vmxnet3_shm_tx_pkt failed %d\n", status);
1246
 
 
1247
 
                shm->partial_tx.frags = 0;
1248
 
                return 1;
1249
 
        }
1250
 
 
1251
 
        return 0;
1252
 
}
1253
 
 
1254
 
 
1255
 
/*
1256
 
 *-----------------------------------------------------------------------------
1257
 
 *
1258
 
 * vmxnet3_shm_consume_user_tx_queue --
1259
 
 *
1260
 
 *    Consume all packets in the user TX queue and send full
1261
 
 *    packets to the device
1262
 
 *
1263
 
 * Results:
1264
 
 *    0 on success.
1265
 
 *
1266
 
 * Side effects:
1267
 
 *    None.
1268
 
 *
1269
 
 *-----------------------------------------------------------------------------
1270
 
 */
1271
 
 
1272
 
static int
1273
 
vmxnet3_shm_consume_user_tx_queue(struct vmxnet3_shm_pool *shm)
1274
 
{
1275
 
        unsigned long flags;
1276
 
        struct vmxnet3_shm_ringentry *re;
1277
 
 
1278
 
        spin_lock_irqsave(&shm->tx_lock, flags);
1279
 
 
1280
 
        /* Check if the device has been closed */
1281
 
        if (shm->adapter == NULL) {
1282
 
                spin_unlock_irqrestore(&shm->tx_lock, flags);
1283
 
                return -1;
1284
 
        }
1285
 
 
1286
 
        /*
1287
 
         * Loop through each full entry in the user TX ring. Discard trash frags and
1288
 
         * add the others to the partial TX array. If an entry has eop set, TX the
1289
 
         * partial packet.
1290
 
         */
1291
 
        while ((re = kernel_tx_entry(shm))->own) {
1292
 
                if (re->trash) {
1293
 
                        vmxnet3_shm_free_page(shm, re->idx);
1294
 
                        shm->ctl.ptr->stats.kernel_tx++;
1295
 
                } else {
1296
 
                        vmxnet3_shm_tx_re(shm, *re);
1297
 
                }
1298
 
                inc_kernel_tx_idx(shm);
1299
 
                *re = RE_ZERO;
1300
 
        }
1301
 
 
1302
 
        spin_unlock_irqrestore(&shm->tx_lock, flags);
1303
 
 
1304
 
        return 0;
1305
 
}
1306
 
 
1307
 
 
1308
 
/*
1309
 
 *-----------------------------------------------------------------------------
1310
 
 *
1311
 
 * vmxnet3_shm_user_desc_available --
1312
 
 *
1313
 
 *    Checks if we have num_entries ring entries available on the rx ring.
1314
 
 *
1315
 
 * Results:
1316
 
 *    0 for yes
1317
 
 *    -ENOMEM for not enough entries available
1318
 
 *
1319
 
 * Side effects:
1320
 
 *    None.
1321
 
 *
1322
 
 *-----------------------------------------------------------------------------
1323
 
 */
1324
 
 
1325
 
static int
1326
 
vmxnet3_shm_user_desc_available(struct vmxnet3_shm_pool *shm,
1327
 
                u16 num_entries)
1328
 
{
1329
 
        struct vmxnet3_shm_ringentry *re;
1330
 
        u16 reIdx = kernel_rx_idx(shm);
1331
 
 
1332
 
        while (num_entries > 0) {
1333
 
                re = &shm->ctl.ptr->rx_ring[reIdx];
1334
 
                if (re->own)
1335
 
                        return -ENOMEM;
1336
 
 
1337
 
                reIdx = (reIdx + 1) % SHM_RX_RING_SIZE;
1338
 
                num_entries--;
1339
 
        }
1340
 
 
1341
 
        return 0;
1342
 
}
1343
 
 
1344
 
 
1345
 
/*
1346
 
 *-----------------------------------------------------------------------------
1347
 
 *
1348
 
 * vmxnet3_shm_rx_skb --
1349
 
 *
1350
 
 *    Receives an skb into the rx ring. If we can't receive all fragments,
1351
 
 *    the entire skb is dropped.
1352
 
 *
1353
 
 * Results:
1354
 
 *    0 for success
1355
 
 *    -ENOMEM for not enough entries available
1356
 
 *
1357
 
 * Side effects:
1358
 
 *    None.
1359
 
 *
1360
 
 *-----------------------------------------------------------------------------
1361
 
 */
1362
 
 
1363
 
int
1364
 
vmxnet3_shm_rx_skb(struct vmxnet3_adapter *adapter,
1365
 
                struct sk_buff *skb)
1366
 
{
1367
 
        int ret;
1368
 
        int i;
1369
 
        int num_entries = 1 + skb_shinfo(skb)->nr_frags;
1370
 
        int eop = (num_entries == 1);
1371
 
 
1372
 
        if (vmxnet3_shm_user_desc_available(adapter->shm, num_entries) == -ENOMEM) {
1373
 
                vmxnet3_dev_kfree_skb_irq(adapter, skb);
1374
 
                return -ENOMEM;
1375
 
        }
1376
 
 
1377
 
        ret = vmxnet3_shm_user_rx(adapter->shm,
1378
 
                        VMXNET3_SHM_SKB_GETIDX(skb),
1379
 
                        VMXNET3_SHM_SKB_GETLEN(skb),
1380
 
                        0 /* trash */,
1381
 
                        eop);
1382
 
        if (ret != 0) {
1383
 
                BUG_ON(TRUE);
1384
 
                printk(KERN_ERR "vmxnet3_shm_user_rx failed on frag 0\n");
1385
 
        }
1386
 
 
1387
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1388
 
                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1389
 
                unsigned long shm_idx = (unsigned long)frag->page;
1390
 
 
1391
 
                eop = (i == skb_shinfo(skb)->nr_frags - 1);
1392
 
 
1393
 
                ret = vmxnet3_shm_user_rx(adapter->shm,
1394
 
                                shm_idx,
1395
 
                                frag->size,
1396
 
                                0 /* trash */,
1397
 
                                eop);
1398
 
                if (ret != 0) {
1399
 
                        BUG_ON(TRUE);
1400
 
                        printk(KERN_ERR "vmxnet3_shm_user_rx failed on frag 1+\n");
1401
 
                }
1402
 
        }
1403
 
 
1404
 
 
1405
 
        /*
1406
 
         * Do NOT use the vmxnet3 version of kfree_skb, as we handed
1407
 
         * ownership of shm pages to the user space, thus we must not
1408
 
         * free them again.
1409
 
         */
1410
 
        skb_shinfo(skb)->nr_frags = 0;
1411
 
        compat_dev_kfree_skb_irq(skb, FREE_WRITE);
1412
 
 
1413
 
        return 0;
1414
 
}
1415
 
 
1416
 
 
1417
 
/*
1418
 
 *-----------------------------------------------------------------------------
1419
 
 *
1420
 
 * vmxnet3_shm_user_rx --
1421
 
 *
1422
 
 *    Put one packet fragment into the shared memory RX ring
1423
 
 *
1424
 
 * Results:
1425
 
 *    0 on success.
1426
 
 *    Negative value on error.
1427
 
 *
1428
 
 * Side effects:
1429
 
 *    None.
1430
 
 *
1431
 
 *-----------------------------------------------------------------------------
1432
 
 */
1433
 
 
1434
 
int
1435
 
vmxnet3_shm_user_rx(struct vmxnet3_shm_pool *shm,
1436
 
                uint16 idx,
1437
 
                uint16 len,
1438
 
                int trash,
1439
 
                int eop)
1440
 
{
1441
 
        struct vmxnet3_shm_ringentry *re = kernel_rx_entry(shm);
1442
 
        shm->ctl.ptr->stats.kernel_rx++;
1443
 
        if (re->own)
1444
 
                return -ENOMEM;
1445
 
 
1446
 
        inc_kernel_rx_idx(shm);
1447
 
        re->idx = idx;
1448
 
        re->len = len;
1449
 
        re->trash = trash;
1450
 
        re->eop = eop;
1451
 
        re->own = TRUE;
1452
 
        return 0;
1453
 
}
1454
 
 
1455
 
 
1456
 
/*
1457
 
 *-----------------------------------------------------------------------------
1458
 
 *
1459
 
 * vmxnet3_shm_open --
1460
 
 *
1461
 
 *    Called when the vmxnet3 device is opened. Allocates the per-device
1462
 
 *    shared memory pool.
1463
 
 *
1464
 
 * Results:
1465
 
 *    0 on success.
1466
 
 *    Negative value on error.
1467
 
 *
1468
 
 * Side effects:
1469
 
 *    None.
1470
 
 *
1471
 
 *-----------------------------------------------------------------------------
1472
 
 */
1473
 
 
1474
 
int
1475
 
vmxnet3_shm_open(struct vmxnet3_adapter *adapter,
1476
 
                char *name, int pool_size)
1477
 
{
1478
 
        if (pool_size > SHM_MAX_DATA_SIZE) {
1479
 
                printk(KERN_ERR "vmxnet3_shm: requested pool size %d is larger than the maximum %d\n",
1480
 
                       pool_size, SHM_MAX_DATA_SIZE);
1481
 
                return -EINVAL;
1482
 
        }
1483
 
 
1484
 
        adapter->shm = vmxnet3_shm_pool_create(adapter, name, pool_size);
1485
 
        if (adapter->shm == NULL) {
1486
 
                printk(KERN_ERR "failed to create shared memory pool\n");
1487
 
                return -ENOMEM;
1488
 
        }
1489
 
        return 0;
1490
 
}
1491
 
 
1492
 
 
1493
 
/*
1494
 
 *-----------------------------------------------------------------------------
1495
 
 *
1496
 
 * vmxnet3_shm_close --
1497
 
 *
1498
 
 *    Called when the vmxnet3 device is closed. Does not free the per-device
1499
 
 *    shared memory pool. The character device might still be open. Thus
1500
 
 *    freeing the shared memory pool is tied to the ref count on
1501
 
 *    shm->kobj dropping to zero instead.
1502
 
 *
1503
 
 * Results:
1504
 
 *    0 on success.
1505
 
 *    Negative value on error.
1506
 
 *
1507
 
 * Side effects:
1508
 
 *    None.
1509
 
 *
1510
 
 *-----------------------------------------------------------------------------
1511
 
 */
1512
 
 
1513
 
int
1514
 
vmxnet3_shm_close(struct vmxnet3_adapter *adapter)
1515
 
{
1516
 
        unsigned long flags;
1517
 
 
1518
 
        /* Can't unset the lp pointer if a TX is in progress */
1519
 
        spin_lock_irqsave(&adapter->shm->tx_lock, flags);
1520
 
        adapter->shm->adapter = NULL;
1521
 
        spin_unlock_irqrestore(&adapter->shm->tx_lock, flags);
1522
 
 
1523
 
        /* Mark the channel as 'in bad state'  */
1524
 
        adapter->shm->ctl.ptr->channelBad = 1;
1525
 
 
1526
 
        kobject_put(&adapter->shm->kobj);
1527
 
 
1528
 
        wake_up(&adapter->shm->rxq);
1529
 
 
1530
 
        return 0;
1531
 
}