2
* Copyright (c) 2008 Jakub Jermar
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
9
* - Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* - Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
* - The name of the author may not be used to endorse or promote products
15
* derived from this software without specific prior written permission.
17
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35
* @brief Implementation of VFS operations for the FAT file system server.
39
#include "fat_dentry.h"
41
#include "../../vfs/vfs.h"
45
#include <ipc/services.h>
46
#include <ipc/devmap.h>
50
#include <byteorder.h>
51
#include <adt/hash_table.h>
54
#include <fibril_sync.h>
58
#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
59
#define FS_NODE(node) ((node) ? (node)->bp : NULL)
61
/** Mutex protecting the list of cached free FAT nodes. */
62
static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
64
/** List of cached free FAT nodes. */
65
static LIST_INITIALIZE(ffn_head);
67
static void fat_node_initialize(fat_node_t *node)
69
fibril_mutex_initialize(&node->lock);
73
link_initialize(&node->ffn_link);
80
static void fat_node_sync(fat_node_t *node)
90
bs = block_bb_get(node->idx->dev_handle);
91
bps = uint16_t_le2host(bs->bps);
92
dps = bps / sizeof(fat_dentry_t);
94
/* Read the block that contains the dentry of interest. */
95
b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc,
96
(node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
98
d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
100
d->firstc = host2uint16_t_le(node->firstc);
101
if (node->type == FAT_FILE) {
102
d->size = host2uint32_t_le(node->size);
103
} else if (node->type == FAT_DIRECTORY) {
104
d->attr = FAT_ATTR_SUBDIR;
107
/* TODO: update other fields? (e.g time fields) */
109
b->dirty = true; /* need to sync block */
113
static fat_node_t *fat_node_get_new(void)
118
fibril_mutex_lock(&ffn_mutex);
119
if (!list_empty(&ffn_head)) {
120
/* Try to use a cached free node structure. */
122
nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
123
if (!fibril_mutex_trylock(&nodep->lock))
125
idxp_tmp = nodep->idx;
126
if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
127
fibril_mutex_unlock(&nodep->lock);
130
list_remove(&nodep->ffn_link);
131
fibril_mutex_unlock(&ffn_mutex);
133
fat_node_sync(nodep);
134
idxp_tmp->nodep = NULL;
135
fibril_mutex_unlock(&nodep->lock);
136
fibril_mutex_unlock(&idxp_tmp->lock);
140
/* Try to allocate a new node structure. */
141
fibril_mutex_unlock(&ffn_mutex);
142
fn = (fs_node_t *)malloc(sizeof(fs_node_t));
145
nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
151
fat_node_initialize(nodep);
152
fs_node_initialize(fn);
159
/** Internal version of fat_node_get().
161
* @param idxp Locked index structure.
163
static fat_node_t *fat_node_get_core(fat_idx_t *idxp)
168
fat_node_t *nodep = NULL;
176
* The node is already instantiated in memory.
178
fibril_mutex_lock(&idxp->nodep->lock);
179
if (!idxp->nodep->refcnt++)
180
list_remove(&idxp->nodep->ffn_link);
181
fibril_mutex_unlock(&idxp->nodep->lock);
186
* We must instantiate the node from the file system.
191
nodep = fat_node_get_new();
195
bs = block_bb_get(idxp->dev_handle);
196
bps = uint16_t_le2host(bs->bps);
198
dps = bps / sizeof(fat_dentry_t);
200
/* Read the block that contains the dentry of interest. */
201
b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc,
202
(idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
205
d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
206
if (d->attr & FAT_ATTR_SUBDIR) {
208
* The only directory which does not have this bit set is the
209
* root directory itself. The root directory node is handled
210
* and initialized elsewhere.
212
nodep->type = FAT_DIRECTORY;
214
* Unfortunately, the 'size' field of the FAT dentry is not
215
* defined for the directory entry type. We must determine the
216
* size of the directory by walking the FAT.
218
nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle,
219
uint16_t_le2host(d->firstc));
221
nodep->type = FAT_FILE;
222
nodep->size = uint32_t_le2host(d->size);
224
nodep->firstc = uint16_t_le2host(d->firstc);
230
/* Link the idx structure with the node structure. */
238
* Forward declarations of FAT libfs operations.
240
static fs_node_t *fat_node_get(dev_handle_t, fs_index_t);
241
static void fat_node_put(fs_node_t *);
242
static fs_node_t *fat_create_node(dev_handle_t, int);
243
static int fat_destroy_node(fs_node_t *);
244
static int fat_link(fs_node_t *, fs_node_t *, const char *);
245
static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
246
static fs_node_t *fat_match(fs_node_t *, const char *);
247
static fs_index_t fat_index_get(fs_node_t *);
248
static size_t fat_size_get(fs_node_t *);
249
static unsigned fat_lnkcnt_get(fs_node_t *);
250
static bool fat_has_children(fs_node_t *);
251
static fs_node_t *fat_root_get(dev_handle_t);
252
static char fat_plb_get_char(unsigned);
253
static bool fat_is_directory(fs_node_t *);
254
static bool fat_is_file(fs_node_t *node);
257
* FAT libfs operations.
260
/** Instantiate a FAT in-core node. */
261
fs_node_t *fat_node_get(dev_handle_t dev_handle, fs_index_t index)
266
idxp = fat_idx_get_by_index(dev_handle, index);
269
/* idxp->lock held */
270
nodep = fat_node_get_core(idxp);
271
fibril_mutex_unlock(&idxp->lock);
272
return FS_NODE(nodep);
275
void fat_node_put(fs_node_t *fn)
277
fat_node_t *nodep = FAT_NODE(fn);
278
bool destroy = false;
280
fibril_mutex_lock(&nodep->lock);
281
if (!--nodep->refcnt) {
283
fibril_mutex_lock(&ffn_mutex);
284
list_append(&nodep->ffn_link, &ffn_head);
285
fibril_mutex_unlock(&ffn_mutex);
288
* The node does not have any index structure associated
289
* with itself. This can only mean that we are releasing
290
* the node after a failed attempt to allocate the index
296
fibril_mutex_unlock(&nodep->lock);
303
fs_node_t *fat_create_node(dev_handle_t dev_handle, int flags)
308
fat_cluster_t mcl, lcl;
312
bs = block_bb_get(dev_handle);
313
bps = uint16_t_le2host(bs->bps);
314
if (flags & L_DIRECTORY) {
315
/* allocate a cluster */
316
rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
321
nodep = fat_node_get_new();
323
fat_free_clusters(bs, dev_handle, mcl);
326
idxp = fat_idx_get_new(dev_handle);
328
fat_free_clusters(bs, dev_handle, mcl);
329
fat_node_put(FS_NODE(nodep));
332
/* idxp->lock held */
333
if (flags & L_DIRECTORY) {
338
* Populate the new cluster with unused dentries.
340
for (i = 0; i < bs->spc; i++) {
341
b = _fat_block_get(bs, dev_handle, mcl, i,
343
/* mark all dentries as never-used */
344
memset(b->data, 0, bps);
348
nodep->type = FAT_DIRECTORY;
350
nodep->size = bps * bs->spc;
352
nodep->type = FAT_FILE;
353
nodep->firstc = FAT_CLST_RES0;
356
nodep->lnkcnt = 0; /* not linked anywhere */
363
fibril_mutex_unlock(&idxp->lock);
364
return FS_NODE(nodep);
367
int fat_destroy_node(fs_node_t *fn)
369
fat_node_t *nodep = FAT_NODE(fn);
373
* The node is not reachable from the file system. This means that the
374
* link count should be zero and that the index structure cannot be
375
* found in the position hash. Obviously, we don't need to lock the node
376
* nor its index structure.
378
assert(nodep->lnkcnt == 0);
381
* The node may not have any children.
383
assert(fat_has_children(fn) == false);
385
bs = block_bb_get(nodep->idx->dev_handle);
386
if (nodep->firstc != FAT_CLST_RES0) {
388
/* Free all clusters allocated to the node. */
389
fat_free_clusters(bs, nodep->idx->dev_handle, nodep->firstc);
392
fat_idx_destroy(nodep->idx);
398
int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
400
fat_node_t *parentp = FAT_NODE(pfn);
401
fat_node_t *childp = FAT_NODE(cfn);
409
fat_cluster_t mcl, lcl;
412
fibril_mutex_lock(&childp->lock);
413
if (childp->lnkcnt == 1) {
415
* On FAT, we don't support multiple hard links.
417
fibril_mutex_unlock(&childp->lock);
420
assert(childp->lnkcnt == 0);
421
fibril_mutex_unlock(&childp->lock);
423
if (!fat_dentry_name_verify(name)) {
425
* Attempt to create unsupported name.
431
* Get us an unused parent node's dentry or grow the parent and allocate
435
fibril_mutex_lock(&parentp->idx->lock);
436
bs = block_bb_get(parentp->idx->dev_handle);
437
bps = uint16_t_le2host(bs->bps);
438
dps = bps / sizeof(fat_dentry_t);
440
blocks = parentp->size / bps;
442
for (i = 0; i < blocks; i++) {
443
b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE);
444
for (j = 0; j < dps; j++) {
445
d = ((fat_dentry_t *)b->data) + j;
446
switch (fat_classify_dentry(d)) {
447
case FAT_DENTRY_SKIP:
448
case FAT_DENTRY_VALID:
449
/* skipping used and meta entries */
451
case FAT_DENTRY_FREE:
452
case FAT_DENTRY_LAST:
453
/* found an empty slot */
462
* We need to grow the parent in order to create a new unused dentry.
464
if (parentp->idx->pfc == FAT_CLST_ROOT) {
465
/* Can't grow the root directory. */
466
fibril_mutex_unlock(&parentp->idx->lock);
469
rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl);
471
fibril_mutex_unlock(&parentp->idx->lock);
474
fat_append_clusters(bs, parentp, mcl);
475
b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NOREAD);
476
d = (fat_dentry_t *)b->data;
478
* Clear all dentries in the block except for the first one (the first
479
* dentry will be cleared in the next step).
481
memset(d + 1, 0, bps - sizeof(fat_dentry_t));
485
* At this point we only establish the link between the parent and the
486
* child. The dentry, except of the name and the extension, will remain
487
* uninitialized until the corresponding node is synced. Thus the valid
488
* dentry data is kept in the child node structure.
490
memset(d, 0, sizeof(fat_dentry_t));
491
fat_dentry_name_set(d, name);
492
b->dirty = true; /* need to sync block */
494
fibril_mutex_unlock(&parentp->idx->lock);
496
fibril_mutex_lock(&childp->idx->lock);
499
* If possible, create the Sub-directory Identifier Entry and the
500
* Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries
501
* are not mandatory according to Standard ECMA-107 and HelenOS VFS does
502
* not use them anyway, so this is rather a sign of our good will.
504
b = fat_block_get(bs, childp, 0, BLOCK_FLAGS_NONE);
505
d = (fat_dentry_t *)b->data;
506
if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
507
str_cmp(d->name, FAT_NAME_DOT) == 0) {
508
memset(d, 0, sizeof(fat_dentry_t));
509
str_cpy(d->name, 8, FAT_NAME_DOT);
510
str_cpy(d->ext, 3, FAT_EXT_PAD);
511
d->attr = FAT_ATTR_SUBDIR;
512
d->firstc = host2uint16_t_le(childp->firstc);
513
/* TODO: initialize also the date/time members. */
516
if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
517
str_cmp(d->name, FAT_NAME_DOT_DOT) == 0) {
518
memset(d, 0, sizeof(fat_dentry_t));
519
str_cpy(d->name, 8, FAT_NAME_DOT_DOT);
520
str_cpy(d->ext, 3, FAT_EXT_PAD);
521
d->attr = FAT_ATTR_SUBDIR;
522
d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
523
host2uint16_t_le(FAT_CLST_RES0) :
524
host2uint16_t_le(parentp->firstc);
525
/* TODO: initialize also the date/time members. */
527
b->dirty = true; /* need to sync block */
530
childp->idx->pfc = parentp->firstc;
531
childp->idx->pdi = i * dps + j;
532
fibril_mutex_unlock(&childp->idx->lock);
534
fibril_mutex_lock(&childp->lock);
536
childp->dirty = true; /* need to sync node */
537
fibril_mutex_unlock(&childp->lock);
540
* Hash in the index structure into the position hash.
542
fat_idx_hashin(childp->idx);
547
int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
549
fat_node_t *parentp = FAT_NODE(pfn);
550
fat_node_t *childp = FAT_NODE(cfn);
559
if (fat_has_children(cfn))
562
fibril_mutex_lock(&parentp->lock);
563
fibril_mutex_lock(&childp->lock);
564
assert(childp->lnkcnt == 1);
565
fibril_mutex_lock(&childp->idx->lock);
566
bs = block_bb_get(childp->idx->dev_handle);
567
bps = uint16_t_le2host(bs->bps);
569
b = _fat_block_get(bs, childp->idx->dev_handle, childp->idx->pfc,
570
(childp->idx->pdi * sizeof(fat_dentry_t)) / bps,
572
d = (fat_dentry_t *)b->data +
573
(childp->idx->pdi % (bps / sizeof(fat_dentry_t)));
574
/* mark the dentry as not-currently-used */
575
d->name[0] = FAT_DENTRY_ERASED;
576
b->dirty = true; /* need to sync block */
579
/* remove the index structure from the position hash */
580
fat_idx_hashout(childp->idx);
581
/* clear position information */
582
childp->idx->pfc = FAT_CLST_RES0;
583
childp->idx->pdi = 0;
584
fibril_mutex_unlock(&childp->idx->lock);
586
childp->dirty = true;
587
fibril_mutex_unlock(&childp->lock);
588
fibril_mutex_unlock(&parentp->lock);
593
fs_node_t *fat_match(fs_node_t *pfn, const char *component)
596
fat_node_t *parentp = FAT_NODE(pfn);
597
char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
599
unsigned bps; /* bytes per sector */
600
unsigned dps; /* dentries per sector */
605
fibril_mutex_lock(&parentp->idx->lock);
606
bs = block_bb_get(parentp->idx->dev_handle);
607
bps = uint16_t_le2host(bs->bps);
608
dps = bps / sizeof(fat_dentry_t);
609
blocks = parentp->size / bps;
610
for (i = 0; i < blocks; i++) {
611
b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE);
612
for (j = 0; j < dps; j++) {
613
d = ((fat_dentry_t *)b->data) + j;
614
switch (fat_classify_dentry(d)) {
615
case FAT_DENTRY_SKIP:
616
case FAT_DENTRY_FREE:
618
case FAT_DENTRY_LAST:
620
fibril_mutex_unlock(&parentp->idx->lock);
623
case FAT_DENTRY_VALID:
624
fat_dentry_name_get(d, name);
627
if (fat_dentry_namecmp(name, component) == 0) {
631
* Assume tree hierarchy for locking. We
632
* already have the parent and now we are going
633
* to lock the child. Never lock in the oposite
636
fat_idx_t *idx = fat_idx_get_by_pos(
637
parentp->idx->dev_handle, parentp->firstc,
639
fibril_mutex_unlock(&parentp->idx->lock);
642
* Can happen if memory is low or if we
643
* run out of 32-bit indices.
648
nodep = fat_node_get_core(idx);
649
fibril_mutex_unlock(&idx->lock);
651
return FS_NODE(nodep);
657
fibril_mutex_unlock(&parentp->idx->lock);
661
fs_index_t fat_index_get(fs_node_t *fn)
663
return FAT_NODE(fn)->idx->index;
666
size_t fat_size_get(fs_node_t *fn)
668
return FAT_NODE(fn)->size;
671
unsigned fat_lnkcnt_get(fs_node_t *fn)
673
return FAT_NODE(fn)->lnkcnt;
676
bool fat_has_children(fs_node_t *fn)
679
fat_node_t *nodep = FAT_NODE(fn);
686
if (nodep->type != FAT_DIRECTORY)
689
fibril_mutex_lock(&nodep->idx->lock);
690
bs = block_bb_get(nodep->idx->dev_handle);
691
bps = uint16_t_le2host(bs->bps);
692
dps = bps / sizeof(fat_dentry_t);
694
blocks = nodep->size / bps;
696
for (i = 0; i < blocks; i++) {
699
b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE);
700
for (j = 0; j < dps; j++) {
701
d = ((fat_dentry_t *)b->data) + j;
702
switch (fat_classify_dentry(d)) {
703
case FAT_DENTRY_SKIP:
704
case FAT_DENTRY_FREE:
706
case FAT_DENTRY_LAST:
708
fibril_mutex_unlock(&nodep->idx->lock);
711
case FAT_DENTRY_VALID:
713
fibril_mutex_unlock(&nodep->idx->lock);
717
fibril_mutex_unlock(&nodep->idx->lock);
723
fibril_mutex_unlock(&nodep->idx->lock);
727
fs_node_t *fat_root_get(dev_handle_t dev_handle)
729
return fat_node_get(dev_handle, 0);
732
char fat_plb_get_char(unsigned pos)
734
return fat_reg.plb_ro[pos % PLB_SIZE];
737
bool fat_is_directory(fs_node_t *fn)
739
return FAT_NODE(fn)->type == FAT_DIRECTORY;
742
bool fat_is_file(fs_node_t *fn)
744
return FAT_NODE(fn)->type == FAT_FILE;
747
/** libfs operations */
748
libfs_ops_t fat_libfs_ops = {
750
.node_get = fat_node_get,
751
.node_put = fat_node_put,
752
.create = fat_create_node,
753
.destroy = fat_destroy_node,
755
.unlink = fat_unlink,
756
.index_get = fat_index_get,
757
.size_get = fat_size_get,
758
.lnkcnt_get = fat_lnkcnt_get,
759
.has_children = fat_has_children,
760
.root_get = fat_root_get,
761
.plb_get_char = fat_plb_get_char,
762
.is_directory = fat_is_directory,
763
.is_file = fat_is_file
770
void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
772
dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
773
enum cache_mode cmode;
779
/* accept the mount options */
782
if (!ipc_data_write_receive(&callid, &size)) {
783
ipc_answer_0(callid, EINVAL);
784
ipc_answer_0(rid, EINVAL);
787
char *opts = malloc(size + 1);
789
ipc_answer_0(callid, ENOMEM);
790
ipc_answer_0(rid, ENOMEM);
793
ipcarg_t retval = ipc_data_write_finalize(callid, opts, size);
795
ipc_answer_0(rid, retval);
801
/* Check for option enabling write through. */
802
if (str_cmp(opts, "wtcache") == 0)
803
cmode = CACHE_MODE_WT;
805
cmode = CACHE_MODE_WB;
807
/* initialize libblock */
808
rc = block_init(dev_handle, BS_SIZE);
810
ipc_answer_0(rid, rc);
814
/* prepare the boot block */
815
rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE);
817
block_fini(dev_handle);
818
ipc_answer_0(rid, rc);
822
/* get the buffer with the boot sector */
823
bs = block_bb_get(dev_handle);
825
/* Read the number of root directory entries. */
826
bps = uint16_t_le2host(bs->bps);
827
rde = uint16_t_le2host(bs->root_ent_max);
829
if (bps != BS_SIZE) {
830
block_fini(dev_handle);
831
ipc_answer_0(rid, ENOTSUP);
835
/* Initialize the block cache */
836
rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode);
838
block_fini(dev_handle);
839
ipc_answer_0(rid, rc);
843
rc = fat_idx_init_by_dev_handle(dev_handle);
845
block_fini(dev_handle);
846
ipc_answer_0(rid, rc);
850
/* Initialize the root node. */
851
fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
853
block_fini(dev_handle);
854
fat_idx_fini_by_dev_handle(dev_handle);
855
ipc_answer_0(rid, ENOMEM);
858
fs_node_initialize(rfn);
859
fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
862
block_fini(dev_handle);
863
fat_idx_fini_by_dev_handle(dev_handle);
864
ipc_answer_0(rid, ENOMEM);
867
fat_node_initialize(rootp);
869
fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
873
block_fini(dev_handle);
874
fat_idx_fini_by_dev_handle(dev_handle);
875
ipc_answer_0(rid, ENOMEM);
878
assert(ridxp->index == 0);
879
/* ridxp->lock held */
881
rootp->type = FAT_DIRECTORY;
882
rootp->firstc = FAT_CLST_ROOT;
884
rootp->lnkcnt = 0; /* FS root is not linked */
885
rootp->size = rde * sizeof(fat_dentry_t);
887
ridxp->nodep = rootp;
891
fibril_mutex_unlock(&ridxp->lock);
893
ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
896
void fat_mount(ipc_callid_t rid, ipc_call_t *request)
898
libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
901
void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
903
libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
906
void fat_read(ipc_callid_t rid, ipc_call_t *request)
908
dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
909
fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
910
off_t pos = (off_t)IPC_GET_ARG3(*request);
911
fs_node_t *fn = fat_node_get(dev_handle, index);
919
ipc_answer_0(rid, ENOENT);
922
nodep = FAT_NODE(fn);
926
if (!ipc_data_read_receive(&callid, &len)) {
928
ipc_answer_0(callid, EINVAL);
929
ipc_answer_0(rid, EINVAL);
933
bs = block_bb_get(dev_handle);
934
bps = uint16_t_le2host(bs->bps);
936
if (nodep->type == FAT_FILE) {
938
* Our strategy for regular file reads is to read one block at
939
* most and make use of the possibility to return less data than
940
* requested. This keeps the code very simple.
942
if (pos >= nodep->size) {
943
/* reading beyond the EOF */
945
(void) ipc_data_read_finalize(callid, NULL, 0);
947
bytes = min(len, bps - pos % bps);
948
bytes = min(bytes, nodep->size - pos);
949
b = fat_block_get(bs, nodep, pos / bps,
951
(void) ipc_data_read_finalize(callid, b->data + pos % bps,
958
char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
961
assert(nodep->type == FAT_DIRECTORY);
962
assert(nodep->size % bps == 0);
963
assert(bps % sizeof(fat_dentry_t) == 0);
966
* Our strategy for readdir() is to use the position pointer as
967
* an index into the array of all dentries. On entry, it points
968
* to the first unread dentry. If we skip any dentries, we bump
969
* the position pointer accordingly.
971
bnum = (pos * sizeof(fat_dentry_t)) / bps;
972
while (bnum < nodep->size / bps) {
975
b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE);
976
for (o = pos % (bps / sizeof(fat_dentry_t));
977
o < bps / sizeof(fat_dentry_t);
979
d = ((fat_dentry_t *)b->data) + o;
980
switch (fat_classify_dentry(d)) {
981
case FAT_DENTRY_SKIP:
982
case FAT_DENTRY_FREE:
984
case FAT_DENTRY_LAST:
988
case FAT_DENTRY_VALID:
989
fat_dentry_name_get(d, name);
999
ipc_answer_0(callid, ENOENT);
1000
ipc_answer_1(rid, ENOENT, 0);
1003
(void) ipc_data_read_finalize(callid, name, str_size(name) + 1);
1004
bytes = (pos - spos) + 1;
1008
ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
1011
void fat_write(ipc_callid_t rid, ipc_call_t *request)
1013
dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1014
fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1015
off_t pos = (off_t)IPC_GET_ARG3(*request);
1016
fs_node_t *fn = fat_node_get(dev_handle, index);
1023
unsigned bpc; /* bytes per cluster */
1025
int flags = BLOCK_FLAGS_NONE;
1028
ipc_answer_0(rid, ENOENT);
1031
nodep = FAT_NODE(fn);
1033
ipc_callid_t callid;
1035
if (!ipc_data_write_receive(&callid, &len)) {
1037
ipc_answer_0(callid, EINVAL);
1038
ipc_answer_0(rid, EINVAL);
1042
bs = block_bb_get(dev_handle);
1043
bps = uint16_t_le2host(bs->bps);
1048
* In all scenarios, we will attempt to write out only one block worth
1049
* of data at maximum. There might be some more efficient approaches,
1050
* but this one greatly simplifies fat_write(). Note that we can afford
1051
* to do this because the client must be ready to handle the return
1052
* value signalizing a smaller number of bytes written.
1054
bytes = min(len, bps - pos % bps);
1056
flags |= BLOCK_FLAGS_NOREAD;
1058
boundary = ROUND_UP(nodep->size, bpc);
1059
if (pos < boundary) {
1061
* This is the easier case - we are either overwriting already
1062
* existing contents or writing behind the EOF, but still within
1063
* the limits of the last cluster. The node size may grow to the
1064
* next block size boundary.
1066
fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1067
b = fat_block_get(bs, nodep, pos / bps, flags);
1068
(void) ipc_data_write_finalize(callid, b->data + pos % bps,
1070
b->dirty = true; /* need to sync block */
1072
if (pos + bytes > nodep->size) {
1073
nodep->size = pos + bytes;
1074
nodep->dirty = true; /* need to sync node */
1076
ipc_answer_2(rid, EOK, bytes, nodep->size);
1081
* This is the more difficult case. We must allocate new
1082
* clusters for the node and zero them out.
1086
fat_cluster_t mcl, lcl;
1088
nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
1089
/* create an independent chain of nclsts clusters in all FATs */
1090
status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
1091
if (status != EOK) {
1092
/* could not allocate a chain of nclsts clusters */
1094
ipc_answer_0(callid, status);
1095
ipc_answer_0(rid, status);
1098
/* zero fill any gaps */
1099
fat_fill_gap(bs, nodep, mcl, pos);
1100
b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc,
1102
(void) ipc_data_write_finalize(callid, b->data + pos % bps,
1104
b->dirty = true; /* need to sync block */
1107
* Append the cluster chain starting in mcl to the end of the
1108
* node's cluster chain.
1110
fat_append_clusters(bs, nodep, mcl);
1111
nodep->size = pos + bytes;
1112
nodep->dirty = true; /* need to sync node */
1113
ipc_answer_2(rid, EOK, bytes, nodep->size);
1119
void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1121
dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1122
fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1123
size_t size = (off_t)IPC_GET_ARG3(*request);
1124
fs_node_t *fn = fat_node_get(dev_handle, index);
1129
unsigned bpc; /* bytes per cluster */
1133
ipc_answer_0(rid, ENOENT);
1136
nodep = FAT_NODE(fn);
1138
bs = block_bb_get(dev_handle);
1139
bps = uint16_t_le2host(bs->bps);
1143
if (nodep->size == size) {
1145
} else if (nodep->size < size) {
1147
* The standard says we have the freedom to grow the node.
1148
* For now, we simply return an error.
1151
} else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
1153
* The node will be shrunk, but no clusters will be deallocated.
1156
nodep->dirty = true; /* need to sync node */
1160
* The node will be shrunk, clusters will be deallocated.
1163
fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1165
fat_cluster_t lastc;
1166
(void) fat_cluster_walk(bs, dev_handle, nodep->firstc,
1167
&lastc, (size - 1) / bpc);
1168
fat_chop_clusters(bs, nodep, lastc);
1171
nodep->dirty = true; /* need to sync node */
1175
ipc_answer_0(rid, rc);
1179
void fat_close(ipc_callid_t rid, ipc_call_t *request)
1181
ipc_answer_0(rid, EOK);
1184
void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1186
dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1187
fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1190
fs_node_t *fn = fat_node_get(dev_handle, index);
1192
ipc_answer_0(rid, ENOENT);
1196
rc = fat_destroy_node(fn);
1197
ipc_answer_0(rid, rc);
1200
void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1202
libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1205
void fat_stat(ipc_callid_t rid, ipc_call_t *request)
1207
libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1210
void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1212
/* Dummy implementation */
1213
ipc_answer_0(rid, EOK);