~ubuntu-branches/ubuntu/quantal/open-vm-tools/quantal-201207201942

« back to all changes in this revision

Viewing changes to modules/linux/vmxnet3/vmxnet3_shm.c

  • Committer: Bazaar Package Importer
  • Author(s): Daniel Baumann
  • Date: 2009-10-18 12:28:19 UTC
  • mfrom: (1.1.7 upstream) (2.4.9 squeeze)
  • Revision ID: james.westby@ubuntu.com-20091018122819-00vqew6m0ztpqcqp
Tags: 2009.10.15-201664-1
MergingĀ upstreamĀ versionĀ 2009.10.15-201664.

Show diffs side-by-side

added added

removed removed

Lines of Context:
71
71
static int
72
72
vmxnet3_shm_consume_user_tx_queue(struct vmxnet3_shm_pool *shm);
73
73
 
 
74
int
 
75
vmxnet3_shm_tq_xmit(struct sk_buff *skb,
 
76
                    struct vmxnet3_tx_queue *tq,
 
77
                    struct vmxnet3_adapter *adapter,
 
78
                    struct net_device *netdev);
74
79
 
75
80
/*
76
81
 *----------------------------------------------------------------------------
290
295
static unsigned int vmxnet3_shm_chardev_poll(struct file *filp,
291
296
                                             poll_table *wait);
292
297
 
293
 
static int vmxnet3_shm_chardev_ioctl(struct inode *inode,
294
 
                                     struct file *filp,
295
 
                                     unsigned int cmd,
296
 
                                     unsigned long arg);
 
298
static long vmxnet3_shm_chardev_ioctl(struct file *filp,
 
299
                                      unsigned int cmd,
 
300
                                      unsigned long arg);
 
301
 
 
302
#ifndef HAVE_UNLOCKED_IOCTL
 
303
static int vmxnet3_shm_chardev_old_ioctl(struct inode *inode,
 
304
                                         struct file *filp,
 
305
                                         unsigned int cmd,
 
306
                                         unsigned long arg);
 
307
#endif
297
308
 
298
309
static struct file_operations shm_fops = {
299
310
   .owner = THIS_MODULE,
301
312
   .open = vmxnet3_shm_chardev_open,
302
313
   .release = vmxnet3_shm_chardev_release,
303
314
   .poll = vmxnet3_shm_chardev_poll,
304
 
   .ioctl = vmxnet3_shm_chardev_ioctl,
 
315
#ifdef HAVE_UNLOCKED_IOCTL
 
316
   .unlocked_ioctl = vmxnet3_shm_chardev_ioctl,
 
317
#ifdef CONFIG_COMPAT
 
318
   .compat_ioctl = vmxnet3_shm_chardev_ioctl,
 
319
#endif
 
320
#else
 
321
   .ioctl = vmxnet3_shm_chardev_old_ioctl,
 
322
#endif
305
323
};
306
324
 
307
325
static LIST_HEAD(vmxnet3_shm_list);
323
341
 
324
342
 
325
343
/*
 
344
 *----------------------------------------------------------------------------
 
345
 *
 
346
 * vmxnet3_shm_init_allocator --
 
347
 *
 
348
 * Zero all shared memory data pages and fill the allocator with them.
 
349
 *
 
350
 * Result:
 
351
 *    None
 
352
 *
 
353
 *----------------------------------------------------------------------------
 
354
 */
 
355
 
 
356
static void
 
357
vmxnet3_shm_init_allocator(struct vmxnet3_shm_pool *shm)
 
358
{
 
359
   int i;
 
360
 
 
361
   shm->allocator.count = 0;
 
362
   for (i = 1; i < shm->data.num_pages; i++) {
 
363
      struct page *page = VMXNET3_SHM_IDX2PAGE(shm, i);
 
364
      void *virt = kmap(page);
 
365
      memset(virt, 0, PAGE_SIZE);
 
366
      kunmap(page);
 
367
 
 
368
      shm->allocator.stack[shm->allocator.count++] = i;
 
369
 
 
370
      VMXNET3_ASSERT(i != SHM_INVALID_IDX);
 
371
   }
 
372
   VMXNET3_ASSERT(shm->allocator.count <= SHM_DATA_SIZE);
 
373
}
 
374
 
 
375
 
 
376
/*
 
377
 *-----------------------------------------------------------------------------
 
378
 *
 
379
 * vmxnet3_shm_pool_reset --
 
380
 *
 
381
 *    Clean up after userspace has closed the device
 
382
 *
 
383
 * Results:
 
384
 *    None.
 
385
 *
 
386
 * Side effects:
 
387
 *    None.
 
388
 *
 
389
 *-----------------------------------------------------------------------------
 
390
 */
 
391
 
 
392
static void
 
393
vmxnet3_shm_pool_reset(struct vmxnet3_shm_pool *shm)
 
394
{
 
395
   int err = 0;
 
396
   printk(KERN_INFO "resetting shm pool\n");
 
397
 
 
398
   /*
 
399
    * Reset_work may be in the middle of resetting the device, wait for its
 
400
    * completion.
 
401
    */
 
402
   while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &shm->adapter->state)) {
 
403
      compat_msleep(1);
 
404
   }
 
405
 
 
406
   if (compat_netif_running(shm->adapter->netdev)) {
 
407
      vmxnet3_quiesce_dev(shm->adapter);
 
408
   }
 
409
 
 
410
   vmxnet3_shm_init_allocator(shm);
 
411
 
 
412
   if (compat_netif_running(shm->adapter->netdev)) {
 
413
      err = vmxnet3_activate_dev(shm->adapter);
 
414
   }
 
415
 
 
416
   memset(shm->ctl.ptr, 0, PAGE_SIZE);
 
417
 
 
418
   clear_bit(VMXNET3_STATE_BIT_RESETTING, &shm->adapter->state);
 
419
 
 
420
   if (err) {
 
421
      vmxnet3_force_close(shm->adapter);
 
422
   }
 
423
}
 
424
 
 
425
/*
326
426
 *-----------------------------------------------------------------------------
327
427
 *
328
428
 * vmxnet3_shm_pool_create --
367
467
   // Allocate data pages
368
468
   shm->data.num_pages = SHM_DATA_SIZE;
369
469
   for (i = 1; i < shm->data.num_pages; i++) {
370
 
#ifdef __GFP_ZERO
371
 
      struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
372
 
#else
373
470
      struct page *page = alloc_page(GFP_KERNEL);
374
 
#endif
375
471
      if (page == NULL) {
376
472
         goto fail_data;
377
473
      }
378
474
 
379
 
#ifndef __GFP_ZERO
380
 
      {
381
 
         void *virt = kmap(page);
382
 
         memset(virt, 0, PAGE_SIZE);
383
 
         kunmap(virt);
384
 
      }
385
 
#endif
386
 
 
387
475
      VMXNET3_SHM_SET_IDX2PAGE(shm, i, page);
388
 
      shm->allocator.stack[shm->allocator.count++] = i;
389
476
 
390
477
      VMXNET3_ASSERT(i != SHM_INVALID_IDX);
391
478
   }
392
 
   VMXNET3_ASSERT(shm->allocator.count <= SHM_DATA_SIZE);
393
479
 
394
480
   // Allocate control page
395
481
   ctl_page = alloc_page(GFP_KERNEL);
397
483
      goto fail_ctl;
398
484
   }
399
485
   ctl_ptr = (void*)kmap(ctl_page);
400
 
   memset(ctl_ptr, 0, PAGE_SIZE);
401
486
   shm->ctl.pages[0] = ctl_page;
402
487
   shm->ctl.ptr = ctl_ptr;
403
488
 
 
489
   // Reset data and control pages
 
490
   vmxnet3_shm_init_allocator(shm);
 
491
   memset(shm->ctl.ptr, 0, PAGE_SIZE);
 
492
 
404
493
   // Register char device
405
494
   shm->misc_dev.minor = MISC_DYNAMIC_MINOR;
406
495
   shm->misc_dev.name = shm->name;
480
569
 
481
570
   // Free data pages
482
571
   for (i = 1; i < SHM_DATA_SIZE; i++) {
483
 
      kunmap(VMXNET3_SHM_IDX2PAGE(shm,i));
484
572
      __free_page(VMXNET3_SHM_IDX2PAGE(shm, i));
485
573
   }
486
574
 
774
862
   struct vmxnet3_shm_ringentry *re;
775
863
 
776
864
   // consume TX queue
777
 
   vmxnet3_shm_consume_user_tx_queue(shm);
 
865
   if (vmxnet3_shm_consume_user_tx_queue(shm) == -1) {
 
866
      // the device has been closed, let the user space
 
867
      // know there is activity, so that it gets a chance
 
868
      // to read the channelBad flag.
 
869
      mask |= POLLIN;
 
870
      return mask;
 
871
   }
778
872
 
779
873
   // Wait on the rxq for an interrupt to wake us
780
874
   poll_wait(filp, &shm->rxq, wait);
808
902
 *-----------------------------------------------------------------------------
809
903
 */
810
904
 
811
 
static int
812
 
vmxnet3_shm_chardev_ioctl(struct inode *inode,
813
 
                          struct file *filp,
 
905
static long
 
906
vmxnet3_shm_chardev_ioctl(struct file *filp,
814
907
                          unsigned int cmd,
815
908
                          unsigned long arg)
816
909
{
827
920
 
828
921
      case SHM_IOCTL_ALLOC_ONE:
829
922
         idx = vmxnet3_shm_alloc_page(shm);
830
 
         if (idx != SHM_INVALID_IDX) {
831
 
            return idx;
832
 
         } else {
833
 
            return -ENOMEM;
834
 
         }
 
923
         return idx;
835
924
 
836
925
      case SHM_IOCTL_ALLOC_MANY:
837
926
         for (i = 0; i < arg; i++) {
839
928
            if (idx != SHM_INVALID_IDX) {
840
929
               if (vmxnet3_shm_user_rx(shm, idx, 0, 1, 1)) {
841
930
                  vmxnet3_shm_free_page(shm, idx);
842
 
                  return -ENOMEM;
 
931
                  return SHM_INVALID_IDX;
843
932
               }
844
933
            } else {
845
 
               return -ENOMEM;
 
934
               return SHM_INVALID_IDX;
846
935
            }
847
936
         }
848
937
         return 0;
850
939
      case SHM_IOCTL_ALLOC_ONE_AND_MANY:
851
940
         idx1 = vmxnet3_shm_alloc_page(shm);
852
941
         if (idx1 == SHM_INVALID_IDX) {
853
 
            return -ENOMEM;
 
942
            return SHM_INVALID_IDX;
854
943
         }
855
944
         for (i = 0; i < arg - 1; i++) {
856
945
            idx = vmxnet3_shm_alloc_page(shm);
858
947
               if (vmxnet3_shm_user_rx(shm, idx, 0, 1, 1)) {
859
948
                  vmxnet3_shm_free_page(shm, idx);
860
949
                  vmxnet3_shm_free_page(shm, idx1);
861
 
                  return -ENOMEM;
 
950
                  return SHM_INVALID_IDX;
862
951
               }
863
952
            } else {
864
953
               vmxnet3_shm_free_page(shm, idx1);
865
 
               return -ENOMEM;
 
954
               return SHM_INVALID_IDX;
866
955
            }
867
956
         }
868
957
         return idx1;
877
966
   return -ENOTTY;
878
967
}
879
968
 
 
969
#ifndef HAVE_UNLOCKED_IOCTL
 
970
static int vmxnet3_shm_chardev_old_ioctl(struct inode *inode,
 
971
                                         struct file *filp,
 
972
                                         unsigned int cmd,
 
973
                                         unsigned long arg)
 
974
{
 
975
   return vmxnet3_shm_chardev_ioctl(filp, cmd, arg);
 
976
}
 
977
#endif
880
978
 
881
979
/*
882
980
 *-----------------------------------------------------------------------------
974
1072
                            struct file * filp)
975
1073
{
976
1074
   struct vmxnet3_shm_pool *shm = filp->private_data;
 
1075
 
 
1076
   if (shm->adapter) {
 
1077
      vmxnet3_shm_pool_reset(shm);
 
1078
   } else {
 
1079
      vmxnet3_shm_init_allocator(shm);
 
1080
      memset(shm->ctl.ptr, 0, PAGE_SIZE);
 
1081
   }
 
1082
   
977
1083
   kobject_put(&shm->kobj);
978
1084
 
979
 
   // XXX: I guess we should reset the control pages here
980
 
 
981
1085
   return 0;
982
1086
}
983
1087
 
1051
1155
 *
1052
1156
 *    Send a packet (collection of ring entries) using h/w tx routine.
1053
1157
 *
 
1158
 *    Protected by shm.tx_lock
 
1159
 *
1054
1160
 * Results:
1055
1161
 *    0 on success. Negative value to indicate error
1056
1162
 *
1070
1176
 
1071
1177
   skb = dev_alloc_skb(100);
1072
1178
   if (skb == NULL) {
 
1179
      for (i = 0; i < frags; i++) {
 
1180
         vmxnet3_shm_free_page(adapter->shm, res[i].idx);
 
1181
      }
1073
1182
      VMXNET3_ASSERT(FALSE);
1074
1183
      return -ENOMEM;
1075
1184
   }
1092
1201
   {
1093
1202
      struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
1094
1203
      int ret;
1095
 
      skb->protocol = cpu_to_be16(0x86dd);
 
1204
      skb->protocol = htons(ETH_P_IPV6);
1096
1205
      adapter->shm->ctl.ptr->stats.kernel_tx += frags; // XXX: move to better place
1097
 
      ret = vmxnet3_tq_xmit(skb, tq, adapter, adapter->netdev);
 
1206
      ret = vmxnet3_shm_tq_xmit(skb, tq, adapter, adapter->netdev);
1098
1207
      if (ret == COMPAT_NETDEV_TX_BUSY) {
1099
 
         for (i = 0; i < frags; i++) {
1100
 
            vmxnet3_shm_free_page(adapter->shm, res[i].idx);
1101
 
         }
1102
 
         skb_shinfo(skb)->nr_frags = 0;
1103
 
         kfree_skb(skb);
 
1208
         vmxnet3_dev_kfree_skb(adapter, skb);
1104
1209
      }
1105
1210
 
1106
1211
      return ret;
1109
1214
   return 0;
1110
1215
}
1111
1216
 
 
1217
/*
 
1218
 *-----------------------------------------------------------------------------
 
1219
 *
 
1220
 * vmxnet3_shm_tq_xmit --
 
1221
 *
 
1222
 *    Wrap vmxnet3_tq_xmit holding the netdev tx lock to better emulate the
 
1223
 *    Linux stack. Also check for a stopped tx queue to avoid racing with
 
1224
 *    vmxnet3_close.
 
1225
 *
 
1226
 * Results:
 
1227
 *    Same as vmxnet3_tq_xmit.
 
1228
 *
 
1229
 * Side effects:
 
1230
 *    None.
 
1231
 *
 
1232
 *-----------------------------------------------------------------------------
 
1233
 */
 
1234
int
 
1235
vmxnet3_shm_tq_xmit(struct sk_buff *skb,
 
1236
                    struct vmxnet3_tx_queue *tq,
 
1237
                    struct vmxnet3_adapter *adapter,
 
1238
                    struct net_device *netdev)
 
1239
{
 
1240
   int ret = COMPAT_NETDEV_TX_BUSY;
 
1241
   compat_netif_tx_lock(netdev);
 
1242
   if (!netif_queue_stopped(netdev)) {
 
1243
      ret = vmxnet3_tq_xmit(skb, tq, adapter, netdev);
 
1244
   }
 
1245
   compat_netif_tx_unlock(netdev);
 
1246
   return ret;
 
1247
}
 
1248
 
1112
1249
 
1113
1250
/*
1114
1251
 *-----------------------------------------------------------------------------
1131
1268
vmxnet3_shm_tx_re(struct vmxnet3_shm_pool *shm,
1132
1269
                  struct vmxnet3_shm_ringentry re)
1133
1270
{
1134
 
   int i;
1135
 
 
1136
1271
   shm->partial_tx.res[shm->partial_tx.frags++] = re;
1137
1272
 
1138
1273
   if (re.eop) {
1140
1275
                                     shm->partial_tx.res,
1141
1276
                                     shm->partial_tx.frags);
1142
1277
      if (status < 0) {
1143
 
         for (i = 0; i < shm->partial_tx.frags; i++) {
1144
 
            vmxnet3_shm_free_page(shm, shm->partial_tx.res[i].idx);
1145
 
         }
 
1278
         VMXNET3_LOG("vmxnet3_shm_tx_pkt failed %d\n", status);
1146
1279
      }
1147
1280
      shm->partial_tx.frags = 0;
1148
1281
      return 1;
1414
1547
   adapter->shm->adapter = NULL;
1415
1548
   spin_unlock_irqrestore(&adapter->shm->tx_lock, flags);
1416
1549
 
 
1550
   // Mark the channel as 'in bad state' 
 
1551
   adapter->shm->ctl.ptr->channelBad = 1;
 
1552
   
1417
1553
   kobject_put(&adapter->shm->kobj);
 
1554
   
 
1555
   wake_up(&adapter->shm->rxq);
 
1556
 
1418
1557
   return 0;
1419
1558
}