72
72
vmxnet3_shm_consume_user_tx_queue(struct vmxnet3_shm_pool *shm);
75
vmxnet3_shm_tq_xmit(struct sk_buff *skb,
76
struct vmxnet3_tx_queue *tq,
77
struct vmxnet3_adapter *adapter,
78
struct net_device *netdev);
76
81
*----------------------------------------------------------------------------
290
295
static unsigned int vmxnet3_shm_chardev_poll(struct file *filp,
291
296
poll_table *wait);
293
static int vmxnet3_shm_chardev_ioctl(struct inode *inode,
298
static long vmxnet3_shm_chardev_ioctl(struct file *filp,
302
#ifndef HAVE_UNLOCKED_IOCTL
303
static int vmxnet3_shm_chardev_old_ioctl(struct inode *inode,
298
309
static struct file_operations shm_fops = {
299
310
.owner = THIS_MODULE,
301
312
.open = vmxnet3_shm_chardev_open,
302
313
.release = vmxnet3_shm_chardev_release,
303
314
.poll = vmxnet3_shm_chardev_poll,
304
.ioctl = vmxnet3_shm_chardev_ioctl,
315
#ifdef HAVE_UNLOCKED_IOCTL
316
.unlocked_ioctl = vmxnet3_shm_chardev_ioctl,
318
.compat_ioctl = vmxnet3_shm_chardev_ioctl,
321
.ioctl = vmxnet3_shm_chardev_old_ioctl,
307
325
static LIST_HEAD(vmxnet3_shm_list);
344
*----------------------------------------------------------------------------
346
* vmxnet3_shm_init_allocator --
348
* Zero all shared memory data pages and fill the allocator with them.
353
*----------------------------------------------------------------------------
357
vmxnet3_shm_init_allocator(struct vmxnet3_shm_pool *shm)
361
shm->allocator.count = 0;
362
for (i = 1; i < shm->data.num_pages; i++) {
363
struct page *page = VMXNET3_SHM_IDX2PAGE(shm, i);
364
void *virt = kmap(page);
365
memset(virt, 0, PAGE_SIZE);
368
shm->allocator.stack[shm->allocator.count++] = i;
370
VMXNET3_ASSERT(i != SHM_INVALID_IDX);
372
VMXNET3_ASSERT(shm->allocator.count <= SHM_DATA_SIZE);
377
*-----------------------------------------------------------------------------
379
* vmxnet3_shm_pool_reset --
381
* Clean up after userspace has closed the device
389
*-----------------------------------------------------------------------------
393
vmxnet3_shm_pool_reset(struct vmxnet3_shm_pool *shm)
396
printk(KERN_INFO "resetting shm pool\n");
399
* Reset_work may be in the middle of resetting the device, wait for its
402
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &shm->adapter->state)) {
406
if (compat_netif_running(shm->adapter->netdev)) {
407
vmxnet3_quiesce_dev(shm->adapter);
410
vmxnet3_shm_init_allocator(shm);
412
if (compat_netif_running(shm->adapter->netdev)) {
413
err = vmxnet3_activate_dev(shm->adapter);
416
memset(shm->ctl.ptr, 0, PAGE_SIZE);
418
clear_bit(VMXNET3_STATE_BIT_RESETTING, &shm->adapter->state);
421
vmxnet3_force_close(shm->adapter);
326
426
*-----------------------------------------------------------------------------
328
428
* vmxnet3_shm_pool_create --
367
467
// Allocate data pages
368
468
shm->data.num_pages = SHM_DATA_SIZE;
369
469
for (i = 1; i < shm->data.num_pages; i++) {
371
struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
373
470
struct page *page = alloc_page(GFP_KERNEL);
375
471
if (page == NULL) {
381
void *virt = kmap(page);
382
memset(virt, 0, PAGE_SIZE);
387
475
VMXNET3_SHM_SET_IDX2PAGE(shm, i, page);
388
shm->allocator.stack[shm->allocator.count++] = i;
390
477
VMXNET3_ASSERT(i != SHM_INVALID_IDX);
392
VMXNET3_ASSERT(shm->allocator.count <= SHM_DATA_SIZE);
394
480
// Allocate control page
395
481
ctl_page = alloc_page(GFP_KERNEL);
399
485
ctl_ptr = (void*)kmap(ctl_page);
400
memset(ctl_ptr, 0, PAGE_SIZE);
401
486
shm->ctl.pages[0] = ctl_page;
402
487
shm->ctl.ptr = ctl_ptr;
489
// Reset data and control pages
490
vmxnet3_shm_init_allocator(shm);
491
memset(shm->ctl.ptr, 0, PAGE_SIZE);
404
493
// Register char device
405
494
shm->misc_dev.minor = MISC_DYNAMIC_MINOR;
406
495
shm->misc_dev.name = shm->name;
774
862
struct vmxnet3_shm_ringentry *re;
776
864
// consume TX queue
777
vmxnet3_shm_consume_user_tx_queue(shm);
865
if (vmxnet3_shm_consume_user_tx_queue(shm) == -1) {
866
// the device has been closed, let the user space
867
// know there is activity, so that it gets a chance
868
// to read the channelBad flag.
779
873
// Wait on the rxq for an interrupt to wake us
780
874
poll_wait(filp, &shm->rxq, wait);
828
921
case SHM_IOCTL_ALLOC_ONE:
829
922
idx = vmxnet3_shm_alloc_page(shm);
830
if (idx != SHM_INVALID_IDX) {
836
925
case SHM_IOCTL_ALLOC_MANY:
837
926
for (i = 0; i < arg; i++) {
839
928
if (idx != SHM_INVALID_IDX) {
840
929
if (vmxnet3_shm_user_rx(shm, idx, 0, 1, 1)) {
841
930
vmxnet3_shm_free_page(shm, idx);
931
return SHM_INVALID_IDX;
934
return SHM_INVALID_IDX;
850
939
case SHM_IOCTL_ALLOC_ONE_AND_MANY:
851
940
idx1 = vmxnet3_shm_alloc_page(shm);
852
941
if (idx1 == SHM_INVALID_IDX) {
942
return SHM_INVALID_IDX;
855
944
for (i = 0; i < arg - 1; i++) {
856
945
idx = vmxnet3_shm_alloc_page(shm);
858
947
if (vmxnet3_shm_user_rx(shm, idx, 0, 1, 1)) {
859
948
vmxnet3_shm_free_page(shm, idx);
860
949
vmxnet3_shm_free_page(shm, idx1);
950
return SHM_INVALID_IDX;
864
953
vmxnet3_shm_free_page(shm, idx1);
954
return SHM_INVALID_IDX;
969
#ifndef HAVE_UNLOCKED_IOCTL
970
static int vmxnet3_shm_chardev_old_ioctl(struct inode *inode,
975
return vmxnet3_shm_chardev_ioctl(filp, cmd, arg);
882
980
*-----------------------------------------------------------------------------
974
1072
struct file * filp)
976
1074
struct vmxnet3_shm_pool *shm = filp->private_data;
1077
vmxnet3_shm_pool_reset(shm);
1079
vmxnet3_shm_init_allocator(shm);
1080
memset(shm->ctl.ptr, 0, PAGE_SIZE);
977
1083
kobject_put(&shm->kobj);
979
// XXX: I guess we should reset the control pages here
1093
1202
struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
1095
skb->protocol = cpu_to_be16(0x86dd);
1204
skb->protocol = htons(ETH_P_IPV6);
1096
1205
adapter->shm->ctl.ptr->stats.kernel_tx += frags; // XXX: move to better place
1097
ret = vmxnet3_tq_xmit(skb, tq, adapter, adapter->netdev);
1206
ret = vmxnet3_shm_tq_xmit(skb, tq, adapter, adapter->netdev);
1098
1207
if (ret == COMPAT_NETDEV_TX_BUSY) {
1099
for (i = 0; i < frags; i++) {
1100
vmxnet3_shm_free_page(adapter->shm, res[i].idx);
1102
skb_shinfo(skb)->nr_frags = 0;
1208
vmxnet3_dev_kfree_skb(adapter, skb);
1218
*-----------------------------------------------------------------------------
1220
* vmxnet3_shm_tq_xmit --
1222
* Wrap vmxnet3_tq_xmit holding the netdev tx lock to better emulate the
1223
* Linux stack. Also check for a stopped tx queue to avoid racing with
1227
* Same as vmxnet3_tq_xmit.
1232
*-----------------------------------------------------------------------------
1235
vmxnet3_shm_tq_xmit(struct sk_buff *skb,
1236
struct vmxnet3_tx_queue *tq,
1237
struct vmxnet3_adapter *adapter,
1238
struct net_device *netdev)
1240
int ret = COMPAT_NETDEV_TX_BUSY;
1241
compat_netif_tx_lock(netdev);
1242
if (!netif_queue_stopped(netdev)) {
1243
ret = vmxnet3_tq_xmit(skb, tq, adapter, netdev);
1245
compat_netif_tx_unlock(netdev);
1114
1251
*-----------------------------------------------------------------------------
1140
1275
shm->partial_tx.res,
1141
1276
shm->partial_tx.frags);
1142
1277
if (status < 0) {
1143
for (i = 0; i < shm->partial_tx.frags; i++) {
1144
vmxnet3_shm_free_page(shm, shm->partial_tx.res[i].idx);
1278
VMXNET3_LOG("vmxnet3_shm_tx_pkt failed %d\n", status);
1147
1280
shm->partial_tx.frags = 0;
1414
1547
adapter->shm->adapter = NULL;
1415
1548
spin_unlock_irqrestore(&adapter->shm->tx_lock, flags);
1550
// Mark the channel as 'in bad state'
1551
adapter->shm->ctl.ptr->channelBad = 1;
1417
1553
kobject_put(&adapter->shm->kobj);
1555
wake_up(&adapter->shm->rxq);