2
* Helpers for getting linearized buffers from iov / filling buffers into iovs
4
* Copyright IBM, Corp. 2007, 2008
5
* Copyright (C) 2010 Red Hat, Inc.
8
* Anthony Liguori <aliguori@us.ibm.com>
9
* Amit Shah <amit.shah@redhat.com>
10
* Michael Tokarev <mjt@tls.msk.ru>
12
* This work is licensed under the terms of the GNU GPL, version 2. See
13
* the COPYING file in the top-level directory.
15
* Contributions after 2012-01-13 are licensed under the terms of the
16
* GNU GPL, version 2 or (at your option) any later version.
23
# include <winsock2.h>
25
# include <sys/types.h>
26
# include <sys/socket.h>
29
size_t iov_from_buf(const struct iovec *iov, unsigned int iov_cnt,
30
size_t offset, const void *buf, size_t bytes)
34
for (i = 0, done = 0; (offset || done < bytes) && i < iov_cnt; i++) {
35
if (offset < iov[i].iov_len) {
36
size_t len = MIN(iov[i].iov_len - offset, bytes - done);
37
memcpy(iov[i].iov_base + offset, buf + done, len);
41
offset -= iov[i].iov_len;
48
size_t iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt,
49
size_t offset, void *buf, size_t bytes)
53
for (i = 0, done = 0; (offset || done < bytes) && i < iov_cnt; i++) {
54
if (offset < iov[i].iov_len) {
55
size_t len = MIN(iov[i].iov_len - offset, bytes - done);
56
memcpy(buf + done, iov[i].iov_base + offset, len);
60
offset -= iov[i].iov_len;
67
size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt,
68
size_t offset, int fillc, size_t bytes)
72
for (i = 0, done = 0; (offset || done < bytes) && i < iov_cnt; i++) {
73
if (offset < iov[i].iov_len) {
74
size_t len = MIN(iov[i].iov_len - offset, bytes - done);
75
memset(iov[i].iov_base + offset, fillc, len);
79
offset -= iov[i].iov_len;
86
size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt)
92
for (i = 0; i < iov_cnt; i++) {
93
len += iov[i].iov_len;
98
/* helper function for iov_send_recv() */
100
do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send)
105
memset(&msg, 0, sizeof(msg));
107
msg.msg_iovlen = iov_cnt;
110
? sendmsg(sockfd, &msg, 0)
111
: recvmsg(sockfd, &msg, 0);
112
} while (ret < 0 && errno == EINTR);
115
/* else send piece-by-piece */
116
/*XXX Note: windows has WSASend() and WSARecv() */
119
while (i < iov_cnt) {
121
? send(sockfd, iov[i].iov_base, iov[i].iov_len, 0)
122
: recv(sockfd, iov[i].iov_base, iov[i].iov_len, 0);
127
} else if (errno == EINTR) {
130
/* else it is some "other" error,
131
* only return if there was no data processed. */
143
ssize_t iov_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt,
144
size_t offset, size_t bytes,
149
size_t orig_len, tail;
153
/* Find the start position, skipping `offset' bytes:
154
* first, skip all full-sized vector elements, */
155
for (niov = 0; niov < iov_cnt && offset >= iov[niov].iov_len; ++niov) {
156
offset -= iov[niov].iov_len;
159
/* niov == iov_cnt would only be valid if bytes == 0, which
160
* we already ruled out in the loop condition. */
161
assert(niov < iov_cnt);
166
/* second, skip `offset' bytes from the (now) first element,
168
iov[0].iov_base += offset;
169
iov[0].iov_len -= offset;
171
/* Find the end position skipping `bytes' bytes: */
172
/* first, skip all full-sized elements */
174
for (niov = 0; niov < iov_cnt && iov[niov].iov_len <= tail; ++niov) {
175
tail -= iov[niov].iov_len;
178
/* second, fixup the last element, and remember the original
180
assert(niov < iov_cnt);
181
assert(iov[niov].iov_len > tail);
182
orig_len = iov[niov].iov_len;
183
iov[niov++].iov_len = tail;
186
ret = do_send_recv(sockfd, iov, niov, do_send);
188
/* Undo the changes above before checking for errors */
190
iov[niov-1].iov_len = orig_len;
193
iov[0].iov_base -= offset;
194
iov[0].iov_len += offset;
198
assert(errno != EINTR);
199
if (errno == EAGAIN && total > 0) {
205
if (ret == 0 && !do_send) {
206
/* recv returns 0 when the peer has performed an orderly
211
/* Prepare for the next iteration */
221
void iov_hexdump(const struct iovec *iov, const unsigned int iov_cnt,
222
FILE *fp, const char *prefix, size_t limit)
228
for (v = 0; v < iov_cnt; v++) {
229
size += iov[v].iov_len;
231
size = size > limit ? limit : size;
232
buf = g_malloc(size);
233
iov_to_buf(iov, iov_cnt, 0, buf, size);
234
qemu_hexdump(buf, fp, prefix, size);
238
unsigned iov_copy(struct iovec *dst_iov, unsigned int dst_iov_cnt,
239
const struct iovec *iov, unsigned int iov_cnt,
240
size_t offset, size_t bytes)
244
for (i = 0, j = 0; i < iov_cnt && j < dst_iov_cnt && bytes; i++) {
245
if (offset >= iov[i].iov_len) {
246
offset -= iov[i].iov_len;
249
len = MIN(bytes, iov[i].iov_len - offset);
251
dst_iov[j].iov_base = iov[i].iov_base + offset;
252
dst_iov[j].iov_len = len;
263
void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint)
265
qiov->iov = g_malloc(alloc_hint * sizeof(struct iovec));
267
qiov->nalloc = alloc_hint;
271
void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov)
279
for (i = 0; i < niov; i++)
280
qiov->size += iov[i].iov_len;
283
void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len)
285
assert(qiov->nalloc != -1);
287
if (qiov->niov == qiov->nalloc) {
288
qiov->nalloc = 2 * qiov->nalloc + 1;
289
qiov->iov = g_realloc(qiov->iov, qiov->nalloc * sizeof(struct iovec));
291
qiov->iov[qiov->niov].iov_base = base;
292
qiov->iov[qiov->niov].iov_len = len;
298
* Concatenates (partial) iovecs from src_iov to the end of dst.
299
* It starts copying after skipping `soffset' bytes at the
300
* beginning of src and adds individual vectors from src to
301
* dst copies up to `sbytes' bytes total, or up to the end
302
* of src_iov if it comes first. This way, it is okay to specify
303
* very large value for `sbytes' to indicate "up to the end
305
* Only vector pointers are processed, not the actual data buffers.
307
void qemu_iovec_concat_iov(QEMUIOVector *dst,
308
struct iovec *src_iov, unsigned int src_cnt,
309
size_t soffset, size_t sbytes)
317
assert(dst->nalloc != -1);
318
for (i = 0, done = 0; done < sbytes && i < src_cnt; i++) {
319
if (soffset < src_iov[i].iov_len) {
320
size_t len = MIN(src_iov[i].iov_len - soffset, sbytes - done);
321
qemu_iovec_add(dst, src_iov[i].iov_base + soffset, len);
325
soffset -= src_iov[i].iov_len;
328
assert(soffset == 0); /* offset beyond end of src */
332
* Concatenates (partial) iovecs from src to the end of dst.
333
* It starts copying after skipping `soffset' bytes at the
334
* beginning of src and adds individual vectors from src to
335
* dst copies up to `sbytes' bytes total, or up to the end
336
* of src if it comes first. This way, it is okay to specify
337
* very large value for `sbytes' to indicate "up to the end
339
* Only vector pointers are processed, not the actual data buffers.
341
void qemu_iovec_concat(QEMUIOVector *dst,
342
QEMUIOVector *src, size_t soffset, size_t sbytes)
344
qemu_iovec_concat_iov(dst, src->iov, src->niov, soffset, sbytes);
347
void qemu_iovec_destroy(QEMUIOVector *qiov)
349
assert(qiov->nalloc != -1);
351
qemu_iovec_reset(qiov);
357
void qemu_iovec_reset(QEMUIOVector *qiov)
359
assert(qiov->nalloc != -1);
365
size_t qemu_iovec_to_buf(QEMUIOVector *qiov, size_t offset,
366
void *buf, size_t bytes)
368
return iov_to_buf(qiov->iov, qiov->niov, offset, buf, bytes);
371
size_t qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset,
372
const void *buf, size_t bytes)
374
return iov_from_buf(qiov->iov, qiov->niov, offset, buf, bytes);
377
size_t qemu_iovec_memset(QEMUIOVector *qiov, size_t offset,
378
int fillc, size_t bytes)
380
return iov_memset(qiov->iov, qiov->niov, offset, fillc, bytes);
383
size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt,
389
for (cur = *iov; *iov_cnt > 0; cur++) {
390
if (cur->iov_len > bytes) {
391
cur->iov_base += bytes;
392
cur->iov_len -= bytes;
397
bytes -= cur->iov_len;
398
total += cur->iov_len;
406
size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt,
416
cur = iov + (*iov_cnt - 1);
418
while (*iov_cnt > 0) {
419
if (cur->iov_len > bytes) {
420
cur->iov_len -= bytes;
425
bytes -= cur->iov_len;
426
total += cur->iov_len;