5
* Copyright IBM, Corp. 2011
8
* Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
10
* This work is licensed under the terms of the GNU GPL, version 2. See
11
* the COPYING file in the top-level directory.
15
#include "fsdev/qemu-fsdev.h"
16
#include "qemu-thread.h"
17
#include "qemu-coroutine.h"
18
#include "virtio-9p-coth.h"
20
int v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t st_mode,
24
V9fsState *s = pdu->s;
26
if (v9fs_request_cancelled(pdu)) {
29
if (s->ctx.exops.get_st_gen) {
30
v9fs_path_read_lock(s);
31
v9fs_co_run_in_worker(
33
err = s->ctx.exops.get_st_gen(&s->ctx, path, st_mode,
44
int v9fs_co_lstat(V9fsPDU *pdu, V9fsPath *path, struct stat *stbuf)
47
V9fsState *s = pdu->s;
49
if (v9fs_request_cancelled(pdu)) {
52
v9fs_path_read_lock(s);
53
v9fs_co_run_in_worker(
55
err = s->ops->lstat(&s->ctx, path, stbuf);
64
int v9fs_co_fstat(V9fsPDU *pdu, V9fsFidState *fidp, struct stat *stbuf)
67
V9fsState *s = pdu->s;
69
if (v9fs_request_cancelled(pdu)) {
72
v9fs_co_run_in_worker(
74
err = s->ops->fstat(&s->ctx, &fidp->fs, stbuf);
82
int v9fs_co_open(V9fsPDU *pdu, V9fsFidState *fidp, int flags)
85
V9fsState *s = pdu->s;
87
if (v9fs_request_cancelled(pdu)) {
90
v9fs_path_read_lock(s);
91
v9fs_co_run_in_worker(
93
err = s->ops->open(&s->ctx, &fidp->path, flags, &fidp->fs);
103
if (total_open_fd > open_fd_hw) {
104
v9fs_reclaim_fd(pdu);
110
int v9fs_co_open2(V9fsPDU *pdu, V9fsFidState *fidp, V9fsString *name, gid_t gid,
111
int flags, int mode, struct stat *stbuf)
116
V9fsState *s = pdu->s;
118
if (v9fs_request_cancelled(pdu)) {
122
cred.fc_mode = mode & 07777;
123
cred.fc_uid = fidp->uid;
126
* Hold the directory fid lock so that directory path name
127
* don't change. Read lock is fine because this fid cannot
128
* be used by any other operation.
130
v9fs_path_read_lock(s);
131
v9fs_co_run_in_worker(
133
err = s->ops->open2(&s->ctx, &fidp->path,
134
name->data, flags, &cred, &fidp->fs);
138
v9fs_path_init(&path);
139
err = v9fs_name_to_path(s, &fidp->path, name->data, &path);
141
err = s->ops->lstat(&s->ctx, &path, stbuf);
144
s->ops->close(&s->ctx, &fidp->fs);
146
v9fs_path_copy(&fidp->path, &path);
149
s->ops->close(&s->ctx, &fidp->fs);
151
v9fs_path_free(&path);
157
if (total_open_fd > open_fd_hw) {
158
v9fs_reclaim_fd(pdu);
164
int v9fs_co_close(V9fsPDU *pdu, V9fsFidOpenState *fs)
167
V9fsState *s = pdu->s;
169
if (v9fs_request_cancelled(pdu)) {
172
v9fs_co_run_in_worker(
174
err = s->ops->close(&s->ctx, fs);
185
int v9fs_co_fsync(V9fsPDU *pdu, V9fsFidState *fidp, int datasync)
188
V9fsState *s = pdu->s;
190
if (v9fs_request_cancelled(pdu)) {
193
v9fs_co_run_in_worker(
195
err = s->ops->fsync(&s->ctx, &fidp->fs, datasync);
203
int v9fs_co_link(V9fsPDU *pdu, V9fsFidState *oldfid,
204
V9fsFidState *newdirfid, V9fsString *name)
207
V9fsState *s = pdu->s;
209
if (v9fs_request_cancelled(pdu)) {
212
v9fs_path_read_lock(s);
213
v9fs_co_run_in_worker(
215
err = s->ops->link(&s->ctx, &oldfid->path,
216
&newdirfid->path, name->data);
225
int v9fs_co_pwritev(V9fsPDU *pdu, V9fsFidState *fidp,
226
struct iovec *iov, int iovcnt, int64_t offset)
229
V9fsState *s = pdu->s;
231
if (v9fs_request_cancelled(pdu)) {
234
v9fs_co_run_in_worker(
236
err = s->ops->pwritev(&s->ctx, &fidp->fs, iov, iovcnt, offset);
244
int v9fs_co_preadv(V9fsPDU *pdu, V9fsFidState *fidp,
245
struct iovec *iov, int iovcnt, int64_t offset)
248
V9fsState *s = pdu->s;
250
if (v9fs_request_cancelled(pdu)) {
253
v9fs_co_run_in_worker(
255
err = s->ops->preadv(&s->ctx, &fidp->fs, iov, iovcnt, offset);