1
/*********************************************************
2
* Copyright (C) 2007 VMware, Inc. All rights reserved.
4
* This program is free software; you can redistribute it and/or modify it
5
* under the terms of the GNU General Public License as published by the
6
* Free Software Foundation version 2 and no later version.
8
* This program is distributed in the hope that it will be useful, but
9
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
10
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* You should have received a copy of the GNU General Public License along
14
* with this program; if not, write to the Free Software Foundation, Inc.,
15
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17
*********************************************************/
19
#ifndef _PUBLIC_VMCI_QUEUE_PAIR_H_
20
#define _PUBLIC_VMCI_QUEUE_PAIR_H_
24
* vmci_queue_pair.h --
26
* Defines queue layout in memory, and helper functions to enqueue and
27
* dequeue items. XXX needs checksumming?
30
#define INCLUDE_ALLOW_MODULE
31
#define INCLUDE_ALLOW_VMX
32
#include "includeCheck.h"
34
#if defined(__APPLE__) && defined(KERNEL)
35
# include "vmci_kernel_if.h"
38
#include "vm_basic_defs.h"
39
#include "vm_basic_types.h"
40
#include "vm_atomic.h"
41
#include "vmci_defs.h"
42
#include "vm_assert.h"
43
#if defined(SOLARIS) || defined(__APPLE__)
47
#if (defined(__linux__) && defined(__KERNEL__)) || defined(SOLARIS)
48
#include "vmci_kernel_if.h"
54
class IOMemoryDescriptor;
57
typedef struct IOMemoryDescriptor IOMemoryDescriptor;
58
typedef struct IOMemoryMap IOMemoryMap;
62
#if defined(__linux__) && defined(__KERNEL__)
70
* A Queue cannot stand by itself as designed. Each Queue's header
71
* contains a pointer into itself (the producerTail) and into its peer
72
* (consumerHead). The reason for the separation is one of
73
* accessibility: Each end-point can modify two things: where the next
74
* location to enqueue is within its produceQ (producerTail); and
75
* where the next location in its consumeQ (i.e., its peer's produceQ)
76
* it can dequeue (consumerHead). The end-point cannot modify the
77
* pointers of its peer (guest to guest; NOTE that in the host both
78
* queue headers are mapped r/w). But, each end-point needs access to
79
* both Queue header structures in order to determine how much space
80
* is used (or left) in the Queue. This is because for an end-point
81
* to know how full its produceQ is, it needs to use the consumerHead
82
* that points into the produceQ but -that- consumerHead is in the
83
* Queue header for that end-points consumeQ.
85
* Thoroughly confused? Sorry.
87
* producerTail: the point to enqueue new entrants. When you approach
88
* a line in a store, for example, you walk up to the tail.
90
* consumerHead: the point in the queue from which the next element is
91
* dequeued. In other words, who is next in line is he who is at the
94
* Also, producerTail points to an empty byte in the Queue, whereas
95
* consumerHead points to a valid byte of data (unless producerTail ==
96
* consumerHead in which case consumerHead does not point to a valid
99
* For a queue of buffer 'size' bytes, the tail and head pointers will be in
100
* the range [0, size-1].
102
* If produceQ->producerTail == consumeQ->consumerHead then the
106
typedef struct VMCIQueueHeader {
107
/* All fields are 64bit and aligned. */
108
VMCIHandle handle; /* Identifier. */
109
Atomic_uint64 producerTail; /* Offset in this queue. */
110
Atomic_uint64 consumerHead; /* Offset in peer queue. */
115
* If one client of a QueuePair is a 32bit entity, we restrict the QueuePair
116
* size to be less than 4GB, and use 32bit atomic operations on the head and
117
* tail pointers. 64bit atomic read on a 32bit entity involves cmpxchg8b which
118
* is an atomic read-modify-write. This will cause traces to fire when a 32bit
119
* consumer tries to read the producer's tail pointer, for example, because the
120
* consumer has read-only access to the producer's tail pointer.
122
* We provide the following macros to invoke 32bit or 64bit atomic operations
123
* based on the architecture the code is being compiled on.
126
/* Architecture independent maximum queue size. */
127
#define QP_MAX_QUEUE_SIZE_ARCH_ANY CONST64U(0xffffffff)
130
# define QP_MAX_QUEUE_SIZE_ARCH CONST64U(0xffffffffffffffff)
131
# define QPAtomic_ReadOffset(x) Atomic_Read64(x)
132
# define QPAtomic_WriteOffset(x, y) Atomic_Write64(x, y)
135
* Wrappers below are being used to call Atomic_Read32 because of the
136
* 'type punned' compilation warning received when Atomic_Read32 is
137
* called with a Atomic_uint64 pointer typecasted to Atomic_uint32
138
* pointer from QPAtomic_ReadOffset. Ditto with QPAtomic_WriteOffset.
142
TypeSafe_Atomic_Read32(void *var) // IN:
144
return Atomic_Read32((Atomic_uint32 *)(var));
148
TypeSafe_Atomic_Write32(void *var, uint32 val) // IN:
150
Atomic_Write32((Atomic_uint32 *)(var), (uint32)(val));
153
# define QP_MAX_QUEUE_SIZE_ARCH CONST64U(0xffffffff)
154
# define QPAtomic_ReadOffset(x) TypeSafe_Atomic_Read32((void *)(x))
155
# define QPAtomic_WriteOffset(x, y) \
156
TypeSafe_Atomic_Write32((void *)(x), (uint32)(y))
157
#endif /* __x86_64__ */
161
*-----------------------------------------------------------------------------
165
* Helper to add a given offset to a head or tail pointer. Wraps the value
166
* of the pointer around the max size of the queue.
174
*-----------------------------------------------------------------------------
178
AddPointer(Atomic_uint64 *var, // IN:
182
uint64 newVal = QPAtomic_ReadOffset(var);
184
if (newVal >= size - add) {
189
QPAtomic_WriteOffset(var, newVal);
194
*-----------------------------------------------------------------------------
198
* This data type contains the information about a queue.
200
* There are two queues (hence, queue pairs) per transaction model
201
* between a pair of end points, A & B. One queue is used by end
202
* point A to transmit commands and responses to B. The other queue
203
* is used by B to transmit commands and responses.
205
* There are six distinct definitions of the VMCIQueue structure.
206
* Five are found below along with descriptions of under what build
207
* type the VMCIQueue structure type is compiled. The sixth is
208
* located in a separate file and is used in the VMKernel context.
210
* VMCIQueuePair_QueueIsMapped() is a macro used to determine if the
211
* VMCIQueue struct is backed by memory and thus can be enqueued to or
212
* dequeued from. In host kernels, the macro checks the buffer
213
* pointer for the Queue which is populated in VMCIHost_FinishAttach()
214
* on the various platforms.
216
*-----------------------------------------------------------------------------
219
#if defined(VMX86_TOOLS) || defined(VMX86_VMX)
220
# define VMCIQueuePair_QueueIsMapped(q) (TRUE)
221
# if defined(__linux__) && defined(__KERNEL__)
223
* Linux Kernel Guest.
225
* The VMCIQueue is two or more machine pages where the first
226
* contains the queue header and the second and subsequent pages
227
* contain an array of struct page pointers to the actual pages
228
* that contain the queue data.
230
typedef struct VMCIQueue {
231
VMCIQueueHeader queueHeader;
232
uint8 _padding[PAGE_SIZE - sizeof(VMCIQueueHeader)];
233
struct page *page[0];
235
# elif defined(SOLARIS)
237
* Solaris Kernel Guest.
239
* kmem_cache_alloc with object size of one page is being used to get
240
* page aligned memory for queueHeader. VMCIQueue stores a pointer to
241
* the queueHeader and the array of virtual addresses of actual pages
242
* that contain the queue data.
244
typedef struct VMCIQueue {
245
VMCIQueueHeader *queueHeaderPtr;
251
* VMX App, Windows Kernel Guest, and Mac Kernel Guest
253
* The VMCIQueue is two or more machine pages where the first
254
* contains the queue header and the second and subsequent pages
255
* contain the actual queue buffer data.
257
typedef struct VMCIQueue {
258
VMCIQueueHeader queueHeader;
259
uint8 _padding[PAGE_SIZE - sizeof(VMCIQueueHeader)];
260
uint8 buffer[0]; // VMX doesn't use this today
265
* VMCIQueuePair_QueueIsMapped()
267
* This macro will return TRUE if a queue created by either the
268
* guest or host has had the memory for the queue pair mapped by the
269
* VMX and made avaiable to the host by way of the SetPageFile
272
* After SetPageFile, the queue memory will remain available to the
273
* host until the host detaches. On Linux and Mac OS platforms the
274
* memory for the queue pair remains available to the host driver
275
* after the VMX process for the guest goes away. On Windows, the
276
* memory is copied (under the protection of a mutex) so that the
277
* host can continue to operate on the queues after the VMX is gone.
279
* The version of the VMCIQueue data structure which is provided by
280
* the VMCI kernel module is protected by the VMCI
281
* QueuePairList_Lock and the VMCI QueuePairList_FindEntry()
282
* function. Therefore, host-side clients shouldn't be able to
283
* access the structure after it's gone. And, the memory the
284
* queueHeaderPtr points to is torn down (and freed) after the entry
285
* has been removed from the QueuePairList and just before the entry
286
* itself is deallocated.
289
# define VMCIQueuePair_QueueIsMapped(q) ((q)->queueHeaderPtr != NULL)
291
# if defined(__linux__) && defined(__KERNEL__)
295
* The VMCIQueueHeader has been created elsewhere and this
296
* structure (VMCIQueue) needs a pointer to that
299
* Also, the queue contents are managed by an array of struct
300
* pages which are managed elsewhere. So, this structure simply
301
* contains the address of that array of struct pages.
303
typedef struct VMCIQueue {
304
VMCIQueueHeader *queueHeaderPtr;
307
# elif defined __APPLE__
311
* The VMCIQueueHeader has been created elsewhere and this
312
* structure (VMCIQueue) needs a pointer to that
315
* Also, the queue contents are managed dynamically by
316
* mapping/unmapping the pages. All we need is the VA64
317
* base address of the buffer and the task_t of the VMX
318
* process hosting the queue file (i.e. buffer).
320
* Note, too, that the buffer contains one page of queue
321
* header information (head and tail pointers). But, that
322
* for the life of the VMCIQueue structure the
323
* buffer value contains the address past the header info.
324
* This modification happens during FinishAttach and is
327
typedef struct VMCIQueue {
328
VMCIQueueHeader *queueHeaderPtr;
329
IOMemoryDescriptor *pages;
336
* The VMCIQueueHeader has been created elsewhere and this
337
* structure (VMCIQueue) needs a pointer to that
340
* Also, the queue contents are managed by an array of bytes
341
* which are managed elsewhere. So, this structure simply
342
* contains the address of that array of bytes.
344
* We have a mutex to protect the queue from accesses within the
345
* kernel module. NOTE: the guest may be enqueuing or dequeuing
346
* while the host has the mutex locked. However, multiple
347
* kernel processes will not access the Queue simultaneously.
349
* What the mutex is trying to protect is the moment where the
350
* guest detaches from a queue. In that moment, we will
351
* allocate memory to hold the guest's produceQ and we will
352
* change the queueHeaderPtr to point to a newly allocated (in
353
* kernel memory) structure. And, the buffer pointer will be
354
* changed to point to an in-kernel queue content (even if there
355
* isn't any data in the queue for the host to consume).
357
* Note that a VMCIQueue allocated by the host passes through
358
* three states before it's torn down. First, the queue is
359
* created by the host but doesn't point to any memory and
360
* therefore can't absorb any enqueue requests. (NOTE: On
361
* Windows this could be solved because of the mutexes, as
362
* explained above [only in reverse, if you will]. But unless
363
* we added the same mutexes [or other rules about accessing the
364
* queues] we can't can't solve this generally on other
365
* operating systems.)
367
* When the guest calls SetPageStore(), then the queue is backed
368
* by valid memory and the host can enqueue.
370
* When the guest detaches, enqueues are absorbed by /dev/null,
373
typedef struct VMCIQueue {
374
VMCIQueueHeader *queueHeaderPtr;
376
Bool enqueueToDevNull;
377
FAST_MUTEX *mutex; /* Access the mutex through this */
378
FAST_MUTEX __mutex; /* Don't touch except to init */
380
#define VMCIQueuePair_EnqueueToDevNull(q) ((q)->enqueueToDevNull)
384
#ifndef VMCIQueuePair_EnqueueToDevNull
385
#define VMCIQueuePair_EnqueueToDevNull(q) (FALSE)
386
#endif /* default VMCIQueuePair_EnqueueToDevNull() definition */
389
*-----------------------------------------------------------------------------
391
* VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
392
* types are passed around to enqueue and dequeue routines. Note that
393
* often the functions passed are simply wrappers around memcpy
396
*-----------------------------------------------------------------------------
398
typedef int VMCIMemcpyToQueueFunc(VMCIQueue *queue, uint64 queueOffset,
399
const void *src, size_t srcOffset,
401
typedef int VMCIMemcpyFromQueueFunc(void *dest, size_t destOffset,
402
const VMCIQueue *queue, uint64 queueOffset,
406
* NOTE: On Windows host we have special code to access the queue
407
* contents (and QueuePair header) so that we can protect accesses
408
* during tear down of the guest that owns the mappings of the
409
* QueuePair queue contents. See
410
* bora/modules/vmcrosstalk/windows/vmciHostQueuePair.c
413
#if !defined _WIN32 || defined VMX86_TOOLS || defined VMX86_VMX
416
*-----------------------------------------------------------------------------
418
* VMCIQueue_GetHeader() --
420
* Helper routine to get the address of the VMCIQueueHeader.
421
* Note that this routine is useful because there are two ways
422
* queues are declared (at least as far as the queue header is
423
* concerned, anyway). In one way, the queue contains the header
424
* in the other the queue contains a pointer to the header. This
425
* helper hides that ambiguity from users of queues who need
426
* access to the header.
429
* The address of the queue header.
434
*-----------------------------------------------------------------------------
437
static INLINE VMCIQueueHeader *
438
VMCIQueue_GetHeader(const VMCIQueue *q)
440
ASSERT_NOT_IMPLEMENTED(VMCIQueuePair_QueueIsMapped(q));
441
#if defined(VMX86_TOOLS) || defined(VMX86_VMX)
443
* All guest variants except Solaris have a complete queue header
445
#if defined (SOLARIS)
446
return (VMCIQueueHeader *)q->queueHeaderPtr;
448
return (VMCIQueueHeader *)&q->queueHeader;
452
* All host variants have a pointer to the queue header
454
return (VMCIQueueHeader *)q->queueHeaderPtr;
460
*-----------------------------------------------------------------------------
462
* VMCIQueue_ProducerTail() --
464
* Helper routine to get the Producer Tail from the supplied queue.
467
* The contents of the queue's producer tail.
472
*-----------------------------------------------------------------------------
476
VMCIQueue_ProducerTail(const VMCIQueue *queue)
478
return QPAtomic_ReadOffset(&VMCIQueue_GetHeader(queue)->producerTail);
482
*-----------------------------------------------------------------------------
484
* VMCIQueue_ConsumerHead() --
486
* Helper routine to get the Consumer Head from the supplied queue.
489
* The contents of the queue's consumer tail.
494
*-----------------------------------------------------------------------------
498
VMCIQueue_ConsumerHead(const VMCIQueue *queue)
500
return QPAtomic_ReadOffset(&VMCIQueue_GetHeader(queue)->consumerHead);
504
*-----------------------------------------------------------------------------
506
* VMCIQueue_AddProducerTail() --
508
* Helper routine to increment the Producer Tail. Fundamentally,
509
* AddPointer() is used to manipulate the tail itself.
517
*-----------------------------------------------------------------------------
521
VMCIQueue_AddProducerTail(VMCIQueue *queue,
525
AddPointer(&VMCIQueue_GetHeader(queue)->producerTail, add, queueSize);
529
*-----------------------------------------------------------------------------
531
* VMCIQueue_AddConsumerHead() --
533
* Helper routine to increment the Consumer Head. Fundamentally,
534
* AddPointer() is used to manipulate the head itself.
542
*-----------------------------------------------------------------------------
546
VMCIQueue_AddConsumerHead(VMCIQueue *queue,
550
AddPointer(&VMCIQueue_GetHeader(queue)->consumerHead, add, queueSize);
555
*-----------------------------------------------------------------------------
557
* VMCIMemcpy{To,From}Queue[v]() prototypes (and, in some cases, an
560
* Note that these routines are NOT SAFE to call on a host end-point
561
* until the guest end of the queue pair has attached -AND-
562
* SetPageStore(). The VMX crosstalk device will issue the
563
* SetPageStore() on behalf of the guest when the guest creates a
564
* QueuePair or attaches to one created by the host. So, if the guest
565
* notifies the host that it's attached then the queue is safe to use.
566
* Also, if the host registers notification of the connection of the
567
* guest, then it will only receive that notification when the guest
568
* has issued the SetPageStore() call and not before (when the guest
571
*-----------------------------------------------------------------------------
574
#if defined (SOLARIS) || (defined(__APPLE__) && !defined (VMX86_TOOLS)) || \
575
(defined(__linux__) && defined(__KERNEL__))
576
int VMCIMemcpyToQueue(VMCIQueue *queue, uint64 queueOffset, const void *src,
577
size_t srcOffset, size_t size);
578
int VMCIMemcpyFromQueue(void *dest, size_t destOffset, const VMCIQueue *queue,
579
uint64 queueOffset, size_t size);
580
int VMCIMemcpyToQueueV(VMCIQueue *queue, uint64 queueOffset, const void *src,
581
size_t srcOffset, size_t size);
582
int VMCIMemcpyFromQueueV(void *dest, size_t destOffset, const VMCIQueue *queue,
583
uint64 queueOffset, size_t size);
584
#elif defined(_WIN32) && defined(WINNT_DDK)
585
int VMCIMemcpyToQueue(VMCIQueue *queue, uint64 queueOffset, const void *src,
586
size_t srcOffset, size_t size);
587
int VMCIMemcpyFromQueue(void *dest, size_t destOffset, const VMCIQueue *queue,
588
uint64 queueOffset, size_t size);
593
*-----------------------------------------------------------------------------
595
* VMCIMemcpyToQueue --
597
* Wrapper for memcpy --- copies from a given buffer to a VMCI Queue.
598
* Assumes that offset + size does not wrap around in the queue.
601
* Zero on success, negative error code on failure.
606
*-----------------------------------------------------------------------------
610
VMCIMemcpyToQueue(VMCIQueue *queue, // OUT:
611
uint64 queueOffset, // IN:
612
const void *src, // IN:
613
size_t srcOffset, // IN:
616
ASSERT_NOT_IMPLEMENTED(VMCIQueuePair_QueueIsMapped(queue));
617
memcpy(queue->buffer + queueOffset, (uint8 *)src + srcOffset, size);
623
*-----------------------------------------------------------------------------
625
* VMCIMemcpyFromQueue --
627
* Wrapper for memcpy --- copies to a given buffer from a VMCI Queue.
628
* Assumes that offset + size does not wrap around in the queue.
631
* Zero on success, negative error code on failure.
636
*-----------------------------------------------------------------------------
640
VMCIMemcpyFromQueue(void *dest, // OUT:
641
size_t destOffset, // IN:
642
const VMCIQueue *queue, // IN:
643
uint64 queueOffset, // IN:
646
ASSERT_NOT_IMPLEMENTED(VMCIQueuePair_QueueIsMapped(queue));
647
memcpy((uint8 *)dest + destOffset, queue->buffer + queueOffset, size);
650
#endif /* __linux__ && __KERNEL__ */
654
*-----------------------------------------------------------------------------
656
* VMCIQueue_CheckAlignment --
658
* Checks if the given queue is aligned to page boundary. Returns TRUE if
659
* the alignment is good.
667
*-----------------------------------------------------------------------------
671
VMCIQueue_CheckAlignment(const VMCIQueue *queue) // IN:
674
* Checks if header of the queue is page aligned.
676
return ((uintptr_t)VMCIQueue_GetHeader(queue) & (PAGE_SIZE - 1)) == 0;
681
*-----------------------------------------------------------------------------
683
* VMCIQueue_GetPointers --
685
* Helper routine for getting the head and the tail pointer for a queue.
686
* Both the VMCIQueues are needed to get both the pointers for one queue.
694
*-----------------------------------------------------------------------------
698
VMCIQueue_GetPointers(const VMCIQueue *produceQ,
699
const VMCIQueue *consumeQ,
700
uint64 *producerTail,
701
uint64 *consumerHead)
703
*producerTail = VMCIQueue_ProducerTail(produceQ);
704
*consumerHead = VMCIQueue_ConsumerHead(consumeQ);
709
*-----------------------------------------------------------------------------
711
* VMCIQueue_ResetPointers --
713
* Reset the tail pointer (of "this" queue) and the head pointer (of
722
*-----------------------------------------------------------------------------
726
VMCIQueue_ResetPointers(VMCIQueue *queue) // IN:
728
QPAtomic_WriteOffset(&VMCIQueue_GetHeader(queue)->producerTail, CONST64U(0));
729
QPAtomic_WriteOffset(&VMCIQueue_GetHeader(queue)->consumerHead, CONST64U(0));
734
*-----------------------------------------------------------------------------
738
* Initializes a queue's state (head & tail pointers).
746
*-----------------------------------------------------------------------------
750
VMCIQueue_Init(const VMCIHandle handle, // IN:
751
VMCIQueue *queue) // IN:
753
if (!VMCIQueuePair_QueueIsMapped(queue)) {
755
* In this case, the other end (the guest) hasn't connected yet.
756
* When the guest does connect, the queue pointers will be
762
ASSERT_NOT_IMPLEMENTED(VMCIQueue_CheckAlignment(queue));
763
VMCIQueue_GetHeader(queue)->handle = handle;
764
VMCIQueue_ResetPointers(queue);
769
*-----------------------------------------------------------------------------
771
* VMCIQueueFreeSpaceInt --
773
* Finds available free space in a produce queue to enqueue more
774
* data or reports an error if queue pair corruption is detected.
777
* Free space size in bytes.
782
*-----------------------------------------------------------------------------
786
VMCIQueueFreeSpaceInt(const VMCIQueue *produceQueue, // IN:
787
const VMCIQueue *consumeQueue, // IN:
788
const uint64 produceQSize, // IN:
789
uint64 *freeSpace) // OUT:
796
if (UNLIKELY(!VMCIQueuePair_QueueIsMapped(produceQueue) &&
797
!VMCIQueuePair_QueueIsMapped(consumeQueue))) {
798
return VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
801
tail = VMCIQueue_ProducerTail(produceQueue);
802
head = VMCIQueue_ConsumerHead(consumeQueue);
804
if (tail >= produceQSize || head >= produceQSize) {
805
return VMCI_ERROR_INVALID_SIZE;
809
* Deduct 1 to avoid tail becoming equal to head which causes ambiguity. If
810
* head and tail are equal it means that the queue is empty.
813
*freeSpace = produceQSize - (tail - head) - 1;
815
*freeSpace = head - tail - 1;
823
*-----------------------------------------------------------------------------
825
* VMCIQueue_FreeSpace --
827
* Finds available free space in a produce queue to enqueue more data.
830
* On success, free space size in bytes (up to MAX_INT64).
831
* On failure, appropriate error code.
836
*-----------------------------------------------------------------------------
840
VMCIQueue_FreeSpace(const VMCIQueue *produceQueue, // IN:
841
const VMCIQueue *consumeQueue, // IN:
842
const uint64 produceQSize) // IN:
847
retval = VMCIQueueFreeSpaceInt(produceQueue, consumeQueue, produceQSize,
850
if (retval != VMCI_SUCCESS) {
854
return MIN(freeSpace, MAX_INT64);
859
*-----------------------------------------------------------------------------
861
* VMCIQueue_BufReady --
863
* Finds available data to dequeue from a consume queue.
866
* On success, available data size in bytes (up to MAX_INT64).
867
* On failure, appropriate error code.
872
*-----------------------------------------------------------------------------
876
VMCIQueue_BufReady(const VMCIQueue *consumeQueue, // IN:
877
const VMCIQueue *produceQueue, // IN:
878
const uint64 consumeQSize) // IN:
883
if (UNLIKELY(!VMCIQueuePair_QueueIsMapped(produceQueue) &&
884
!VMCIQueuePair_QueueIsMapped(consumeQueue))) {
885
return VMCI_ERROR_QUEUEPAIR_NODATA;
888
retval = VMCIQueueFreeSpaceInt(consumeQueue, produceQueue,
889
consumeQSize, &freeSpace);
890
if (retval != VMCI_SUCCESS) {
893
uint64 available = consumeQSize - freeSpace - 1;
894
return MIN(available, MAX_INT64);
900
*-----------------------------------------------------------------------------
902
* __VMCIQueue_Enqueue --
904
* Enqueues a given buffer to the produce queue using the provided
905
* function. As many bytes as possible (space available in the queue)
909
* VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue data.
910
* VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
911
* (as defined by the queue size).
912
* Otherwise, the number of bytes written to the queue is returned.
915
* Updates the tail pointer of the produce queue.
917
*-----------------------------------------------------------------------------
920
static INLINE ssize_t
921
__VMCIQueue_Enqueue(VMCIQueue *produceQueue, // IN:
922
const VMCIQueue *consumeQueue, // IN:
923
const uint64 produceQSize, // IN:
924
const void *buf, // IN:
925
size_t bufSize, // IN:
926
VMCIMemcpyToQueueFunc memcpyToQueue) // IN:
932
if (UNLIKELY(VMCIQueuePair_EnqueueToDevNull(produceQueue))) {
936
if (UNLIKELY(!VMCIQueuePair_QueueIsMapped(produceQueue) &&
937
!VMCIQueuePair_QueueIsMapped(consumeQueue))) {
938
return VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
941
freeSpace = VMCIQueue_FreeSpace(produceQueue, consumeQueue, produceQSize);
943
return VMCI_ERROR_QUEUEPAIR_NOSPACE;
946
return (ssize_t)freeSpace;
949
written = MIN((size_t)freeSpace, bufSize);
950
tail = VMCIQueue_ProducerTail(produceQueue);
951
if (LIKELY(tail + written < produceQSize)) {
952
memcpyToQueue(produceQueue, tail, buf, 0, written);
954
/* Tail pointer wraps around. */
955
const size_t tmp = (size_t)(produceQSize - tail);
957
memcpyToQueue(produceQueue, tail, buf, 0, tmp);
958
memcpyToQueue(produceQueue, 0, buf, tmp, written - tmp);
960
AddPointer(&VMCIQueue_GetHeader(produceQueue)->producerTail, written, produceQSize);
966
*-----------------------------------------------------------------------------
968
* VMCIQueue_Enqueue --
970
* Enqueues a given buffer to the produce queue. As many bytes as possible
971
* (space available in the queue) are enqueued. If bufSize is larger than
972
* the maximum value of ssize_t the result is unspecified.
975
* VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue data.
976
* VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
977
* (as defined by the queue size).
978
* Otherwise, the number of bytes written to the queue is returned.
981
* Updates the tail pointer of the produce queue.
983
*-----------------------------------------------------------------------------
986
static INLINE ssize_t
987
VMCIQueue_Enqueue(VMCIQueue *produceQueue, // IN:
988
const VMCIQueue *consumeQueue, // IN:
989
const uint64 produceQSize, // IN:
990
const void *buf, // IN:
991
size_t bufSize) // IN:
993
return __VMCIQueue_Enqueue(produceQueue, consumeQueue, produceQSize,
994
buf, bufSize, VMCIMemcpyToQueue);
998
#if defined (SOLARIS) || (defined(__APPLE__) && !defined (VMX86_TOOLS)) || \
999
(defined(__linux__) && defined(__KERNEL__))
1001
*-----------------------------------------------------------------------------
1003
* VMCIQueue_EnqueueV --
1005
* Enqueues a given iovec to the produce queue. As many bytes as possible
1006
* (space available in the queue) are enqueued. If bufSize is larger than
1007
* the maximum value of ssize_t the result is unspecified.
1010
* VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue data.
1011
* VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
1012
* (as defined by the queue size).
1013
* Otherwise, the number of bytes written to the queue is returned.
1016
* Updates the tail pointer of the produce queue.
1018
*-----------------------------------------------------------------------------
1021
static INLINE ssize_t
1022
VMCIQueue_EnqueueV(VMCIQueue *produceQueue, // IN:
1023
const VMCIQueue *consumeQueue, // IN:
1024
const uint64 produceQSize, // IN:
1025
struct iovec *iov, // IN:
1026
size_t iovSize) // IN:
1028
return __VMCIQueue_Enqueue(produceQueue, consumeQueue, produceQSize,
1029
(void *)iov, iovSize, VMCIMemcpyToQueueV);
1031
#endif /* Systems that support struct iovec */
1035
*-----------------------------------------------------------------------------
1037
* __VMCIQueue_Dequeue --
1039
* Dequeues data (if available) from the given consume queue. Writes data
1040
* to the user provided buffer using the provided function.
1043
* VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
1044
* VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
1045
* (as defined by the queue size).
1046
* Otherwise the number of bytes dequeued is returned.
1049
* Updates the head pointer of the consume queue.
1051
*-----------------------------------------------------------------------------
1054
static INLINE ssize_t
1055
__VMCIQueue_Dequeue(VMCIQueue *produceQueue, // IN:
1056
const VMCIQueue *consumeQueue, // IN:
1057
const uint64 consumeQSize, // IN:
1059
size_t bufSize, // IN:
1060
VMCIMemcpyFromQueueFunc memcpyFromQueue, // IN:
1061
Bool updateConsumer) // IN:
1067
if (UNLIKELY(!VMCIQueuePair_QueueIsMapped(produceQueue) &&
1068
!VMCIQueuePair_QueueIsMapped(consumeQueue))) {
1069
return VMCI_ERROR_QUEUEPAIR_NODATA;
1072
bufReady = VMCIQueue_BufReady(consumeQueue, produceQueue, consumeQSize);
1074
return VMCI_ERROR_QUEUEPAIR_NODATA;
1077
return (ssize_t)bufReady;
1080
written = MIN((size_t)bufReady, bufSize);
1081
head = VMCIQueue_ConsumerHead(produceQueue);
1082
if (LIKELY(head + written < consumeQSize)) {
1083
memcpyFromQueue(buf, 0, consumeQueue, head, written);
1085
/* Head pointer wraps around. */
1086
const size_t tmp = (size_t)(consumeQSize - head);
1088
memcpyFromQueue(buf, 0, consumeQueue, head, tmp);
1089
memcpyFromQueue(buf, tmp, consumeQueue, 0, written - tmp);
1092
if (updateConsumer) {
1093
AddPointer(&VMCIQueue_GetHeader(produceQueue)->consumerHead, written,
1101
*-----------------------------------------------------------------------------
1103
* VMCIQueue_Dequeue --
1105
* Dequeues data (if available) from the given consume queue. Writes data
1106
* to the user provided buffer. If bufSize is larger than the maximum
1107
* value of ssize_t the result is unspecified.
1110
* VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
1111
* VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
1112
* (as defined by the queue size).
1113
* Otherwise the number of bytes dequeued is returned.
1116
* Updates the head pointer of the consume queue.
1118
*-----------------------------------------------------------------------------
1121
static INLINE ssize_t
1122
VMCIQueue_Dequeue(VMCIQueue *produceQueue, // IN:
1123
const VMCIQueue *consumeQueue, // IN:
1124
const uint64 consumeQSize, // IN:
1126
size_t bufSize) // IN:
1128
return __VMCIQueue_Dequeue(produceQueue, consumeQueue, consumeQSize,
1129
buf, bufSize, VMCIMemcpyFromQueue, TRUE);
1133
#if defined (SOLARIS) || (defined(__APPLE__) && !defined (VMX86_TOOLS)) || \
1134
(defined(__linux__) && defined(__KERNEL__))
1136
*-----------------------------------------------------------------------------
1138
* VMCIQueue_DequeueV --
1140
* Dequeues data (if available) from the given consume queue. Writes data
1141
* to the user provided iovec. If bufSize is larger than the maximum
1142
* value of ssize_t the result is unspecified.
1145
* VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
1146
* VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
1147
* (as defined by the queue size).
1148
* Otherwise the number of bytes dequeued is returned.
1151
* Updates the head pointer of the consume queue.
1153
*-----------------------------------------------------------------------------
1156
static INLINE ssize_t
1157
VMCIQueue_DequeueV(VMCIQueue *produceQueue, // IN:
1158
const VMCIQueue *consumeQueue, // IN:
1159
const uint64 consumeQSize, // IN:
1160
struct iovec *iov, // IN:
1161
size_t iovSize) // IN:
1163
return __VMCIQueue_Dequeue(produceQueue, consumeQueue, consumeQSize,
1164
(void *)iov, iovSize, VMCIMemcpyFromQueueV, TRUE);
1166
#endif /* Systems that support struct iovec */
1170
*-----------------------------------------------------------------------------
1174
* Reads data (if available) from the given consume queue. Copies data
1175
* to the provided user buffer but does not update the consumer counter
1179
* VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
1180
* VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
1181
* (as defined by the queue size).
1182
* Otherwise the number of bytes copied to user buffer is returned.
1187
*-----------------------------------------------------------------------------
1190
static INLINE ssize_t
1191
VMCIQueue_Peek(VMCIQueue *produceQueue, // IN:
1192
const VMCIQueue *consumeQueue, // IN:
1193
const uint64 consumeQSize, // IN:
1195
size_t bufSize) // IN:
1197
return __VMCIQueue_Dequeue(produceQueue, consumeQueue, consumeQSize,
1198
buf, bufSize, VMCIMemcpyFromQueue, FALSE);
1202
#if defined (SOLARIS) || (defined(__APPLE__) && !defined (VMX86_TOOLS)) || \
1203
(defined(__linux__) && defined(__KERNEL__))
1205
*-----------------------------------------------------------------------------
1207
* VMCIQueue_PeekV --
1209
* Reads data (if available) from the given consume queue. Copies data
1210
* to the provided user iovec but does not update the consumer counter
1214
* VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
1215
* VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
1216
* (as defined by the queue size).
1217
* Otherwise the number of bytes copied to user iovec is returned.
1222
*-----------------------------------------------------------------------------
1225
static INLINE ssize_t
1226
VMCIQueue_PeekV(VMCIQueue *produceQueue, // IN:
1227
const VMCIQueue *consumeQueue, // IN:
1228
const uint64 consumeQSize, // IN:
1229
struct iovec *iov, // IN:
1230
size_t iovSize) // IN:
1232
return __VMCIQueue_Dequeue(produceQueue, consumeQueue, consumeQSize,
1233
(void *)iov, iovSize, VMCIMemcpyFromQueueV, FALSE);
1235
#endif /* Systems that support struct iovec */
1237
#else /* Windows 32 Host defined below */
1239
int VMCIMemcpyToQueue(VMCIQueue *queue, uint64 queueOffset, const void *src,
1240
size_t srcOffset, size_t size);
1241
int VMCIMemcpyFromQueue(void *dest, size_t destOffset, const VMCIQueue *queue,
1242
uint64 queueOffset, size_t size);
1244
void VMCIQueue_Init(const VMCIHandle handle, VMCIQueue *queue);
1245
void VMCIQueue_GetPointers(const VMCIQueue *produceQ,
1246
const VMCIQueue *consumeQ,
1247
uint64 *producerTail,
1248
uint64 *consumerHead);
1249
int64 VMCIQueue_FreeSpace(const VMCIQueue *produceQueue,
1250
const VMCIQueue *consumeQueue,
1251
const uint64 produceQSize);
1252
int64 VMCIQueue_BufReady(const VMCIQueue *consumeQueue,
1253
const VMCIQueue *produceQueue,
1254
const uint64 consumeQSize);
1255
ssize_t VMCIQueue_Enqueue(VMCIQueue *produceQueue,
1256
const VMCIQueue *consumeQueue,
1257
const uint64 produceQSize,
1260
ssize_t VMCIQueue_Dequeue(VMCIQueue *produceQueue,
1261
const VMCIQueue *consumeQueue,
1262
const uint64 consumeQSize,
1265
ssize_t VMCIQueue_Peek(VMCIQueue *produceQueue,
1266
const VMCIQueue *consumeQueue,
1267
const uint64 consumeQSize,
1271
#endif /* defined _WIN32 && !defined VMX86_TOOLS */
1273
#endif /* !_PUBLIC_VMCI_QUEUE_PAIR_H_ */