2
* Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3
* Written by Alex Tomas <alex@clusterfs.com>
5
* Architecture independence:
6
* Copyright (c) 2005, Bull S.A.
7
* Written by Pierre Peiffer <pierre.peiffer@bull.net>
9
* This program is free software; you can redistribute it and/or modify
10
* it under the terms of the GNU General Public License version 2 as
11
* published by the Free Software Foundation.
13
* This program is distributed in the hope that it will be useful,
14
* but WITHOUT ANY WARRANTY; without even the implied warranty of
15
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
* GNU General Public License for more details.
18
* You should have received a copy of the GNU General Public Licens
19
* along with this program; if not, write to the Free Software
20
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24
* Extents support for EXT4
27
* - ext4*_error() should be used in some situations
28
* - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29
* - smart tree reduction
32
#include <linux/module.h>
34
#include <linux/time.h>
35
#include <linux/jbd2.h>
36
#include <linux/highuid.h>
37
#include <linux/pagemap.h>
38
#include <linux/quotaops.h>
39
#include <linux/string.h>
40
#include <linux/slab.h>
41
#include <linux/falloc.h>
42
#include <asm/uaccess.h>
43
#include <linux/fiemap.h>
44
#include "ext4_jbd2.h"
46
#include <trace/events/ext4.h>
48
static int ext4_split_extent(handle_t *handle,
50
struct ext4_ext_path *path,
51
struct ext4_map_blocks *map,
55
static int ext4_ext_truncate_extend_restart(handle_t *handle,
61
if (!ext4_handle_valid(handle))
63
if (handle->h_buffer_credits > needed)
65
err = ext4_journal_extend(handle, needed);
68
err = ext4_truncate_restart_trans(handle, inode, needed);
80
static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
81
struct ext4_ext_path *path)
84
/* path points to block */
85
return ext4_journal_get_write_access(handle, path->p_bh);
87
/* path points to leaf/index in inode body */
88
/* we use in-core data, no need to protect them */
98
#define ext4_ext_dirty(handle, inode, path) \
99
__ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
100
static int __ext4_ext_dirty(const char *where, unsigned int line,
101
handle_t *handle, struct inode *inode,
102
struct ext4_ext_path *path)
106
/* path points to block */
107
err = __ext4_handle_dirty_metadata(where, line, handle,
110
/* path points to leaf/index in inode body */
111
err = ext4_mark_inode_dirty(handle, inode);
116
static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
117
struct ext4_ext_path *path,
121
int depth = path->p_depth;
122
struct ext4_extent *ex;
125
* Try to predict block placement assuming that we are
126
* filling in a file which will eventually be
127
* non-sparse --- i.e., in the case of libbfd writing
128
* an ELF object sections out-of-order but in a way
129
* the eventually results in a contiguous object or
130
* executable file, or some database extending a table
131
* space file. However, this is actually somewhat
132
* non-ideal if we are writing a sparse file such as
133
* qemu or KVM writing a raw image file that is going
134
* to stay fairly sparse, since it will end up
135
* fragmenting the file system's free space. Maybe we
136
* should have some hueristics or some way to allow
137
* userspace to pass a hint to file system,
138
* especially if the latter case turns out to be
141
ex = path[depth].p_ext;
143
ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
144
ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
146
if (block > ext_block)
147
return ext_pblk + (block - ext_block);
149
return ext_pblk - (ext_block - block);
152
/* it looks like index is empty;
153
* try to find starting block from index itself */
154
if (path[depth].p_bh)
155
return path[depth].p_bh->b_blocknr;
158
/* OK. use inode's group */
159
return ext4_inode_to_goal_block(inode);
163
* Allocation for a meta data block
166
ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
167
struct ext4_ext_path *path,
168
struct ext4_extent *ex, int *err, unsigned int flags)
170
ext4_fsblk_t goal, newblock;
172
goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
173
newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
178
static inline int ext4_ext_space_block(struct inode *inode, int check)
182
size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
183
/ sizeof(struct ext4_extent);
184
#ifdef AGGRESSIVE_TEST
185
if (!check && size > 6)
191
static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
195
size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
196
/ sizeof(struct ext4_extent_idx);
197
#ifdef AGGRESSIVE_TEST
198
if (!check && size > 5)
204
static inline int ext4_ext_space_root(struct inode *inode, int check)
208
size = sizeof(EXT4_I(inode)->i_data);
209
size -= sizeof(struct ext4_extent_header);
210
size /= sizeof(struct ext4_extent);
211
#ifdef AGGRESSIVE_TEST
212
if (!check && size > 3)
218
static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
222
size = sizeof(EXT4_I(inode)->i_data);
223
size -= sizeof(struct ext4_extent_header);
224
size /= sizeof(struct ext4_extent_idx);
225
#ifdef AGGRESSIVE_TEST
226
if (!check && size > 4)
233
* Calculate the number of metadata blocks needed
234
* to allocate @blocks
235
* Worse case is one block per extent
237
int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
239
struct ext4_inode_info *ei = EXT4_I(inode);
242
idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
243
/ sizeof(struct ext4_extent_idx));
246
* If the new delayed allocation block is contiguous with the
247
* previous da block, it can share index blocks with the
248
* previous block, so we only need to allocate a new index
249
* block every idxs leaf blocks. At ldxs**2 blocks, we need
250
* an additional index block, and at ldxs**3 blocks, yet
251
* another index blocks.
253
if (ei->i_da_metadata_calc_len &&
254
ei->i_da_metadata_calc_last_lblock+1 == lblock) {
257
if ((ei->i_da_metadata_calc_len % idxs) == 0)
259
if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
261
if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
263
ei->i_da_metadata_calc_len = 0;
265
ei->i_da_metadata_calc_len++;
266
ei->i_da_metadata_calc_last_lblock++;
271
* In the worst case we need a new set of index blocks at
272
* every level of the inode's extent tree.
274
ei->i_da_metadata_calc_len = 1;
275
ei->i_da_metadata_calc_last_lblock = lblock;
276
return ext_depth(inode) + 1;
280
ext4_ext_max_entries(struct inode *inode, int depth)
284
if (depth == ext_depth(inode)) {
286
max = ext4_ext_space_root(inode, 1);
288
max = ext4_ext_space_root_idx(inode, 1);
291
max = ext4_ext_space_block(inode, 1);
293
max = ext4_ext_space_block_idx(inode, 1);
299
static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
301
ext4_fsblk_t block = ext4_ext_pblock(ext);
302
int len = ext4_ext_get_actual_len(ext);
304
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
307
static int ext4_valid_extent_idx(struct inode *inode,
308
struct ext4_extent_idx *ext_idx)
310
ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
312
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
315
static int ext4_valid_extent_entries(struct inode *inode,
316
struct ext4_extent_header *eh,
319
unsigned short entries;
320
if (eh->eh_entries == 0)
323
entries = le16_to_cpu(eh->eh_entries);
327
struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
329
if (!ext4_valid_extent(inode, ext))
335
struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
337
if (!ext4_valid_extent_idx(inode, ext_idx))
346
static int __ext4_ext_check(const char *function, unsigned int line,
347
struct inode *inode, struct ext4_extent_header *eh,
350
const char *error_msg;
353
if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
354
error_msg = "invalid magic";
357
if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
358
error_msg = "unexpected eh_depth";
361
if (unlikely(eh->eh_max == 0)) {
362
error_msg = "invalid eh_max";
365
max = ext4_ext_max_entries(inode, depth);
366
if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
367
error_msg = "too large eh_max";
370
if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
371
error_msg = "invalid eh_entries";
374
if (!ext4_valid_extent_entries(inode, eh, depth)) {
375
error_msg = "invalid extent entries";
381
ext4_error_inode(inode, function, line, 0,
382
"bad header/extent: %s - magic %x, "
383
"entries %u, max %u(%u), depth %u(%u)",
384
error_msg, le16_to_cpu(eh->eh_magic),
385
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
386
max, le16_to_cpu(eh->eh_depth), depth);
391
#define ext4_ext_check(inode, eh, depth) \
392
__ext4_ext_check(__func__, __LINE__, inode, eh, depth)
394
int ext4_ext_check_inode(struct inode *inode)
396
return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
400
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
402
int k, l = path->p_depth;
405
for (k = 0; k <= l; k++, path++) {
407
ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
408
ext4_idx_pblock(path->p_idx));
409
} else if (path->p_ext) {
410
ext_debug(" %d:[%d]%d:%llu ",
411
le32_to_cpu(path->p_ext->ee_block),
412
ext4_ext_is_uninitialized(path->p_ext),
413
ext4_ext_get_actual_len(path->p_ext),
414
ext4_ext_pblock(path->p_ext));
421
static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
423
int depth = ext_depth(inode);
424
struct ext4_extent_header *eh;
425
struct ext4_extent *ex;
431
eh = path[depth].p_hdr;
432
ex = EXT_FIRST_EXTENT(eh);
434
ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
436
for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
437
ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
438
ext4_ext_is_uninitialized(ex),
439
ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
444
static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
445
ext4_fsblk_t newblock, int level)
447
int depth = ext_depth(inode);
448
struct ext4_extent *ex;
450
if (depth != level) {
451
struct ext4_extent_idx *idx;
452
idx = path[level].p_idx;
453
while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
454
ext_debug("%d: move %d:%llu in new index %llu\n", level,
455
le32_to_cpu(idx->ei_block),
456
ext4_idx_pblock(idx),
464
ex = path[depth].p_ext;
465
while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
466
ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
467
le32_to_cpu(ex->ee_block),
469
ext4_ext_is_uninitialized(ex),
470
ext4_ext_get_actual_len(ex),
477
#define ext4_ext_show_path(inode, path)
478
#define ext4_ext_show_leaf(inode, path)
479
#define ext4_ext_show_move(inode, path, newblock, level)
482
void ext4_ext_drop_refs(struct ext4_ext_path *path)
484
int depth = path->p_depth;
487
for (i = 0; i <= depth; i++, path++)
495
* ext4_ext_binsearch_idx:
496
* binary search for the closest index of the given block
497
* the header must be checked before calling this
500
ext4_ext_binsearch_idx(struct inode *inode,
501
struct ext4_ext_path *path, ext4_lblk_t block)
503
struct ext4_extent_header *eh = path->p_hdr;
504
struct ext4_extent_idx *r, *l, *m;
507
ext_debug("binsearch for %u(idx): ", block);
509
l = EXT_FIRST_INDEX(eh) + 1;
510
r = EXT_LAST_INDEX(eh);
513
if (block < le32_to_cpu(m->ei_block))
517
ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
518
m, le32_to_cpu(m->ei_block),
519
r, le32_to_cpu(r->ei_block));
523
ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
524
ext4_idx_pblock(path->p_idx));
526
#ifdef CHECK_BINSEARCH
528
struct ext4_extent_idx *chix, *ix;
531
chix = ix = EXT_FIRST_INDEX(eh);
532
for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
534
le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
535
printk(KERN_DEBUG "k=%d, ix=0x%p, "
537
ix, EXT_FIRST_INDEX(eh));
538
printk(KERN_DEBUG "%u <= %u\n",
539
le32_to_cpu(ix->ei_block),
540
le32_to_cpu(ix[-1].ei_block));
542
BUG_ON(k && le32_to_cpu(ix->ei_block)
543
<= le32_to_cpu(ix[-1].ei_block));
544
if (block < le32_to_cpu(ix->ei_block))
548
BUG_ON(chix != path->p_idx);
555
* ext4_ext_binsearch:
556
* binary search for closest extent of the given block
557
* the header must be checked before calling this
560
ext4_ext_binsearch(struct inode *inode,
561
struct ext4_ext_path *path, ext4_lblk_t block)
563
struct ext4_extent_header *eh = path->p_hdr;
564
struct ext4_extent *r, *l, *m;
566
if (eh->eh_entries == 0) {
568
* this leaf is empty:
569
* we get such a leaf in split/add case
574
ext_debug("binsearch for %u: ", block);
576
l = EXT_FIRST_EXTENT(eh) + 1;
577
r = EXT_LAST_EXTENT(eh);
581
if (block < le32_to_cpu(m->ee_block))
585
ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
586
m, le32_to_cpu(m->ee_block),
587
r, le32_to_cpu(r->ee_block));
591
ext_debug(" -> %d:%llu:[%d]%d ",
592
le32_to_cpu(path->p_ext->ee_block),
593
ext4_ext_pblock(path->p_ext),
594
ext4_ext_is_uninitialized(path->p_ext),
595
ext4_ext_get_actual_len(path->p_ext));
597
#ifdef CHECK_BINSEARCH
599
struct ext4_extent *chex, *ex;
602
chex = ex = EXT_FIRST_EXTENT(eh);
603
for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
604
BUG_ON(k && le32_to_cpu(ex->ee_block)
605
<= le32_to_cpu(ex[-1].ee_block));
606
if (block < le32_to_cpu(ex->ee_block))
610
BUG_ON(chex != path->p_ext);
616
int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
618
struct ext4_extent_header *eh;
620
eh = ext_inode_hdr(inode);
623
eh->eh_magic = EXT4_EXT_MAGIC;
624
eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
625
ext4_mark_inode_dirty(handle, inode);
626
ext4_ext_invalidate_cache(inode);
630
struct ext4_ext_path *
631
ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
632
struct ext4_ext_path *path)
634
struct ext4_extent_header *eh;
635
struct buffer_head *bh;
636
short int depth, i, ppos = 0, alloc = 0;
638
eh = ext_inode_hdr(inode);
639
depth = ext_depth(inode);
641
/* account possible depth increase */
643
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
646
return ERR_PTR(-ENOMEM);
653
/* walk through the tree */
655
int need_to_validate = 0;
657
ext_debug("depth %d: num %d, max %d\n",
658
ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
660
ext4_ext_binsearch_idx(inode, path + ppos, block);
661
path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
662
path[ppos].p_depth = i;
663
path[ppos].p_ext = NULL;
665
bh = sb_getblk(inode->i_sb, path[ppos].p_block);
668
if (!bh_uptodate_or_lock(bh)) {
669
trace_ext4_ext_load_extent(inode, block,
671
if (bh_submit_read(bh) < 0) {
675
/* validate the extent entries */
676
need_to_validate = 1;
678
eh = ext_block_hdr(bh);
680
if (unlikely(ppos > depth)) {
682
EXT4_ERROR_INODE(inode,
683
"ppos %d > depth %d", ppos, depth);
686
path[ppos].p_bh = bh;
687
path[ppos].p_hdr = eh;
690
if (need_to_validate && ext4_ext_check(inode, eh, i))
694
path[ppos].p_depth = i;
695
path[ppos].p_ext = NULL;
696
path[ppos].p_idx = NULL;
699
ext4_ext_binsearch(inode, path + ppos, block);
700
/* if not an empty leaf */
701
if (path[ppos].p_ext)
702
path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
704
ext4_ext_show_path(inode, path);
709
ext4_ext_drop_refs(path);
712
return ERR_PTR(-EIO);
716
* ext4_ext_insert_index:
717
* insert new index [@logical;@ptr] into the block at @curp;
718
* check where to insert: before @curp or after @curp
720
static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
721
struct ext4_ext_path *curp,
722
int logical, ext4_fsblk_t ptr)
724
struct ext4_extent_idx *ix;
727
err = ext4_ext_get_access(handle, inode, curp);
731
if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
732
EXT4_ERROR_INODE(inode,
733
"logical %d == ei_block %d!",
734
logical, le32_to_cpu(curp->p_idx->ei_block));
738
if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
739
>= le16_to_cpu(curp->p_hdr->eh_max))) {
740
EXT4_ERROR_INODE(inode,
741
"eh_entries %d >= eh_max %d!",
742
le16_to_cpu(curp->p_hdr->eh_entries),
743
le16_to_cpu(curp->p_hdr->eh_max));
747
if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
749
ext_debug("insert new index %d after: %llu\n", logical, ptr);
750
ix = curp->p_idx + 1;
753
ext_debug("insert new index %d before: %llu\n", logical, ptr);
757
len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
760
ext_debug("insert new index %d: "
761
"move %d indices from 0x%p to 0x%p\n",
762
logical, len, ix, ix + 1);
763
memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
766
if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
767
EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
771
ix->ei_block = cpu_to_le32(logical);
772
ext4_idx_store_pblock(ix, ptr);
773
le16_add_cpu(&curp->p_hdr->eh_entries, 1);
775
if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
776
EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
780
err = ext4_ext_dirty(handle, inode, curp);
781
ext4_std_error(inode->i_sb, err);
788
* inserts new subtree into the path, using free index entry
790
* - allocates all needed blocks (new leaf and all intermediate index blocks)
791
* - makes decision where to split
792
* - moves remaining extents and index entries (right to the split point)
793
* into the newly allocated blocks
794
* - initializes subtree
796
static int ext4_ext_split(handle_t *handle, struct inode *inode,
798
struct ext4_ext_path *path,
799
struct ext4_extent *newext, int at)
801
struct buffer_head *bh = NULL;
802
int depth = ext_depth(inode);
803
struct ext4_extent_header *neh;
804
struct ext4_extent_idx *fidx;
806
ext4_fsblk_t newblock, oldblock;
808
ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
811
/* make decision: where to split? */
812
/* FIXME: now decision is simplest: at current extent */
814
/* if current leaf will be split, then we should use
815
* border from split point */
816
if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
817
EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
820
if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
821
border = path[depth].p_ext[1].ee_block;
822
ext_debug("leaf will be split."
823
" next leaf starts at %d\n",
824
le32_to_cpu(border));
826
border = newext->ee_block;
827
ext_debug("leaf will be added."
828
" next leaf starts at %d\n",
829
le32_to_cpu(border));
833
* If error occurs, then we break processing
834
* and mark filesystem read-only. index won't
835
* be inserted and tree will be in consistent
836
* state. Next mount will repair buffers too.
840
* Get array to track all allocated blocks.
841
* We need this to handle errors and free blocks
844
ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
848
/* allocate all needed blocks */
849
ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
850
for (a = 0; a < depth - at; a++) {
851
newblock = ext4_ext_new_meta_block(handle, inode, path,
852
newext, &err, flags);
855
ablocks[a] = newblock;
858
/* initialize new leaf */
859
newblock = ablocks[--a];
860
if (unlikely(newblock == 0)) {
861
EXT4_ERROR_INODE(inode, "newblock == 0!");
865
bh = sb_getblk(inode->i_sb, newblock);
872
err = ext4_journal_get_create_access(handle, bh);
876
neh = ext_block_hdr(bh);
878
neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
879
neh->eh_magic = EXT4_EXT_MAGIC;
882
/* move remainder of path[depth] to the new leaf */
883
if (unlikely(path[depth].p_hdr->eh_entries !=
884
path[depth].p_hdr->eh_max)) {
885
EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
886
path[depth].p_hdr->eh_entries,
887
path[depth].p_hdr->eh_max);
891
/* start copy from next extent */
892
m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
893
ext4_ext_show_move(inode, path, newblock, depth);
895
struct ext4_extent *ex;
896
ex = EXT_FIRST_EXTENT(neh);
897
memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
898
le16_add_cpu(&neh->eh_entries, m);
901
set_buffer_uptodate(bh);
904
err = ext4_handle_dirty_metadata(handle, inode, bh);
910
/* correct old leaf */
912
err = ext4_ext_get_access(handle, inode, path + depth);
915
le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
916
err = ext4_ext_dirty(handle, inode, path + depth);
922
/* create intermediate indexes */
924
if (unlikely(k < 0)) {
925
EXT4_ERROR_INODE(inode, "k %d < 0!", k);
930
ext_debug("create %d intermediate indices\n", k);
931
/* insert new index into current index block */
932
/* current depth stored in i var */
936
newblock = ablocks[--a];
937
bh = sb_getblk(inode->i_sb, newblock);
944
err = ext4_journal_get_create_access(handle, bh);
948
neh = ext_block_hdr(bh);
949
neh->eh_entries = cpu_to_le16(1);
950
neh->eh_magic = EXT4_EXT_MAGIC;
951
neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
952
neh->eh_depth = cpu_to_le16(depth - i);
953
fidx = EXT_FIRST_INDEX(neh);
954
fidx->ei_block = border;
955
ext4_idx_store_pblock(fidx, oldblock);
957
ext_debug("int.index at %d (block %llu): %u -> %llu\n",
958
i, newblock, le32_to_cpu(border), oldblock);
960
/* move remainder of path[i] to the new index block */
961
if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
962
EXT_LAST_INDEX(path[i].p_hdr))) {
963
EXT4_ERROR_INODE(inode,
964
"EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
965
le32_to_cpu(path[i].p_ext->ee_block));
969
/* start copy indexes */
970
m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
971
ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
972
EXT_MAX_INDEX(path[i].p_hdr));
973
ext4_ext_show_move(inode, path, newblock, i);
975
memmove(++fidx, path[i].p_idx,
976
sizeof(struct ext4_extent_idx) * m);
977
le16_add_cpu(&neh->eh_entries, m);
979
set_buffer_uptodate(bh);
982
err = ext4_handle_dirty_metadata(handle, inode, bh);
988
/* correct old index */
990
err = ext4_ext_get_access(handle, inode, path + i);
993
le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
994
err = ext4_ext_dirty(handle, inode, path + i);
1002
/* insert new index */
1003
err = ext4_ext_insert_index(handle, inode, path + at,
1004
le32_to_cpu(border), newblock);
1008
if (buffer_locked(bh))
1014
/* free all allocated blocks in error case */
1015
for (i = 0; i < depth; i++) {
1018
ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1019
EXT4_FREE_BLOCKS_METADATA);
1028
* ext4_ext_grow_indepth:
1029
* implements tree growing procedure:
1030
* - allocates new block
1031
* - moves top-level data (index block or leaf) into the new block
1032
* - initializes new top-level, creating index that points to the
1033
* just created block
1035
static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1037
struct ext4_extent *newext)
1039
struct ext4_extent_header *neh;
1040
struct buffer_head *bh;
1041
ext4_fsblk_t newblock;
1044
newblock = ext4_ext_new_meta_block(handle, inode, NULL,
1045
newext, &err, flags);
1049
bh = sb_getblk(inode->i_sb, newblock);
1052
ext4_std_error(inode->i_sb, err);
1057
err = ext4_journal_get_create_access(handle, bh);
1063
/* move top-level index/leaf into new block */
1064
memmove(bh->b_data, EXT4_I(inode)->i_data,
1065
sizeof(EXT4_I(inode)->i_data));
1067
/* set size of new block */
1068
neh = ext_block_hdr(bh);
1069
/* old root could have indexes or leaves
1070
* so calculate e_max right way */
1071
if (ext_depth(inode))
1072
neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1074
neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1075
neh->eh_magic = EXT4_EXT_MAGIC;
1076
set_buffer_uptodate(bh);
1079
err = ext4_handle_dirty_metadata(handle, inode, bh);
1083
/* Update top-level index: num,max,pointer */
1084
neh = ext_inode_hdr(inode);
1085
neh->eh_entries = cpu_to_le16(1);
1086
ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1087
if (neh->eh_depth == 0) {
1088
/* Root extent block becomes index block */
1089
neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1090
EXT_FIRST_INDEX(neh)->ei_block =
1091
EXT_FIRST_EXTENT(neh)->ee_block;
1093
ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1094
le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1095
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1096
ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1098
neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
1099
ext4_mark_inode_dirty(handle, inode);
1107
* ext4_ext_create_new_leaf:
1108
* finds empty index and adds new leaf.
1109
* if no free index is found, then it requests in-depth growing.
1111
static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1113
struct ext4_ext_path *path,
1114
struct ext4_extent *newext)
1116
struct ext4_ext_path *curp;
1117
int depth, i, err = 0;
1120
i = depth = ext_depth(inode);
1122
/* walk up to the tree and look for free index entry */
1123
curp = path + depth;
1124
while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1129
/* we use already allocated block for index block,
1130
* so subsequent data blocks should be contiguous */
1131
if (EXT_HAS_FREE_INDEX(curp)) {
1132
/* if we found index with free entry, then use that
1133
* entry: create all needed subtree and add new leaf */
1134
err = ext4_ext_split(handle, inode, flags, path, newext, i);
1139
ext4_ext_drop_refs(path);
1140
path = ext4_ext_find_extent(inode,
1141
(ext4_lblk_t)le32_to_cpu(newext->ee_block),
1144
err = PTR_ERR(path);
1146
/* tree is full, time to grow in depth */
1147
err = ext4_ext_grow_indepth(handle, inode, flags, newext);
1152
ext4_ext_drop_refs(path);
1153
path = ext4_ext_find_extent(inode,
1154
(ext4_lblk_t)le32_to_cpu(newext->ee_block),
1157
err = PTR_ERR(path);
1162
* only first (depth 0 -> 1) produces free space;
1163
* in all other cases we have to split the grown tree
1165
depth = ext_depth(inode);
1166
if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1167
/* now we need to split */
1177
* search the closest allocated block to the left for *logical
1178
* and returns it at @logical + it's physical address at @phys
1179
* if *logical is the smallest allocated block, the function
1180
* returns 0 at @phys
1181
* return value contains 0 (success) or error code
1183
static int ext4_ext_search_left(struct inode *inode,
1184
struct ext4_ext_path *path,
1185
ext4_lblk_t *logical, ext4_fsblk_t *phys)
1187
struct ext4_extent_idx *ix;
1188
struct ext4_extent *ex;
1191
if (unlikely(path == NULL)) {
1192
EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1195
depth = path->p_depth;
1198
if (depth == 0 && path->p_ext == NULL)
1201
/* usually extent in the path covers blocks smaller
1202
* then *logical, but it can be that extent is the
1203
* first one in the file */
1205
ex = path[depth].p_ext;
1206
ee_len = ext4_ext_get_actual_len(ex);
1207
if (*logical < le32_to_cpu(ex->ee_block)) {
1208
if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1209
EXT4_ERROR_INODE(inode,
1210
"EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1211
*logical, le32_to_cpu(ex->ee_block));
1214
while (--depth >= 0) {
1215
ix = path[depth].p_idx;
1216
if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1217
EXT4_ERROR_INODE(inode,
1218
"ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1219
ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1220
EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1221
le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1229
if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1230
EXT4_ERROR_INODE(inode,
1231
"logical %d < ee_block %d + ee_len %d!",
1232
*logical, le32_to_cpu(ex->ee_block), ee_len);
1236
*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1237
*phys = ext4_ext_pblock(ex) + ee_len - 1;
1242
* search the closest allocated block to the right for *logical
1243
* and returns it at @logical + it's physical address at @phys
1244
* if *logical is the largest allocated block, the function
1245
* returns 0 at @phys
1246
* return value contains 0 (success) or error code
1248
static int ext4_ext_search_right(struct inode *inode,
1249
struct ext4_ext_path *path,
1250
ext4_lblk_t *logical, ext4_fsblk_t *phys,
1251
struct ext4_extent **ret_ex)
1253
struct buffer_head *bh = NULL;
1254
struct ext4_extent_header *eh;
1255
struct ext4_extent_idx *ix;
1256
struct ext4_extent *ex;
1258
int depth; /* Note, NOT eh_depth; depth from top of tree */
1261
if (unlikely(path == NULL)) {
1262
EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1265
depth = path->p_depth;
1268
if (depth == 0 && path->p_ext == NULL)
1271
/* usually extent in the path covers blocks smaller
1272
* then *logical, but it can be that extent is the
1273
* first one in the file */
1275
ex = path[depth].p_ext;
1276
ee_len = ext4_ext_get_actual_len(ex);
1277
if (*logical < le32_to_cpu(ex->ee_block)) {
1278
if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1279
EXT4_ERROR_INODE(inode,
1280
"first_extent(path[%d].p_hdr) != ex",
1284
while (--depth >= 0) {
1285
ix = path[depth].p_idx;
1286
if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1287
EXT4_ERROR_INODE(inode,
1288
"ix != EXT_FIRST_INDEX *logical %d!",
1296
if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1297
EXT4_ERROR_INODE(inode,
1298
"logical %d < ee_block %d + ee_len %d!",
1299
*logical, le32_to_cpu(ex->ee_block), ee_len);
1303
if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1304
/* next allocated block in this leaf */
1309
/* go up and search for index to the right */
1310
while (--depth >= 0) {
1311
ix = path[depth].p_idx;
1312
if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1316
/* we've gone up to the root and found no index to the right */
1320
/* we've found index to the right, let's
1321
* follow it and find the closest allocated
1322
* block to the right */
1324
block = ext4_idx_pblock(ix);
1325
while (++depth < path->p_depth) {
1326
bh = sb_bread(inode->i_sb, block);
1329
eh = ext_block_hdr(bh);
1330
/* subtract from p_depth to get proper eh_depth */
1331
if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1335
ix = EXT_FIRST_INDEX(eh);
1336
block = ext4_idx_pblock(ix);
1340
bh = sb_bread(inode->i_sb, block);
1343
eh = ext_block_hdr(bh);
1344
if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1348
ex = EXT_FIRST_EXTENT(eh);
1350
*logical = le32_to_cpu(ex->ee_block);
1351
*phys = ext4_ext_pblock(ex);
1359
* ext4_ext_next_allocated_block:
1360
* returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1361
* NOTE: it considers block number from index entry as
1362
* allocated block. Thus, index entries have to be consistent
1366
ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1370
BUG_ON(path == NULL);
1371
depth = path->p_depth;
1373
if (depth == 0 && path->p_ext == NULL)
1374
return EXT_MAX_BLOCKS;
1376
while (depth >= 0) {
1377
if (depth == path->p_depth) {
1379
if (path[depth].p_ext &&
1380
path[depth].p_ext !=
1381
EXT_LAST_EXTENT(path[depth].p_hdr))
1382
return le32_to_cpu(path[depth].p_ext[1].ee_block);
1385
if (path[depth].p_idx !=
1386
EXT_LAST_INDEX(path[depth].p_hdr))
1387
return le32_to_cpu(path[depth].p_idx[1].ei_block);
1392
return EXT_MAX_BLOCKS;
1396
* ext4_ext_next_leaf_block:
1397
* returns first allocated block from next leaf or EXT_MAX_BLOCKS
1399
static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1403
BUG_ON(path == NULL);
1404
depth = path->p_depth;
1406
/* zero-tree has no leaf blocks at all */
1408
return EXT_MAX_BLOCKS;
1410
/* go to index block */
1413
while (depth >= 0) {
1414
if (path[depth].p_idx !=
1415
EXT_LAST_INDEX(path[depth].p_hdr))
1416
return (ext4_lblk_t)
1417
le32_to_cpu(path[depth].p_idx[1].ei_block);
1421
return EXT_MAX_BLOCKS;
1425
* ext4_ext_correct_indexes:
1426
* if leaf gets modified and modified extent is first in the leaf,
1427
* then we have to correct all indexes above.
1428
* TODO: do we need to correct tree in all cases?
1430
static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1431
struct ext4_ext_path *path)
1433
struct ext4_extent_header *eh;
1434
int depth = ext_depth(inode);
1435
struct ext4_extent *ex;
1439
eh = path[depth].p_hdr;
1440
ex = path[depth].p_ext;
1442
if (unlikely(ex == NULL || eh == NULL)) {
1443
EXT4_ERROR_INODE(inode,
1444
"ex %p == NULL or eh %p == NULL", ex, eh);
1449
/* there is no tree at all */
1453
if (ex != EXT_FIRST_EXTENT(eh)) {
1454
/* we correct tree if first leaf got modified only */
1459
* TODO: we need correction if border is smaller than current one
1462
border = path[depth].p_ext->ee_block;
1463
err = ext4_ext_get_access(handle, inode, path + k);
1466
path[k].p_idx->ei_block = border;
1467
err = ext4_ext_dirty(handle, inode, path + k);
1472
/* change all left-side indexes */
1473
if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1475
err = ext4_ext_get_access(handle, inode, path + k);
1478
path[k].p_idx->ei_block = border;
1479
err = ext4_ext_dirty(handle, inode, path + k);
1488
ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1489
struct ext4_extent *ex2)
1491
unsigned short ext1_ee_len, ext2_ee_len, max_len;
1494
* Make sure that either both extents are uninitialized, or
1497
if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1500
if (ext4_ext_is_uninitialized(ex1))
1501
max_len = EXT_UNINIT_MAX_LEN;
1503
max_len = EXT_INIT_MAX_LEN;
1505
ext1_ee_len = ext4_ext_get_actual_len(ex1);
1506
ext2_ee_len = ext4_ext_get_actual_len(ex2);
1508
if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1509
le32_to_cpu(ex2->ee_block))
1513
* To allow future support for preallocated extents to be added
1514
* as an RO_COMPAT feature, refuse to merge to extents if
1515
* this can result in the top bit of ee_len being set.
1517
if (ext1_ee_len + ext2_ee_len > max_len)
1519
#ifdef AGGRESSIVE_TEST
1520
if (ext1_ee_len >= 4)
1524
if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1530
* This function tries to merge the "ex" extent to the next extent in the tree.
1531
* It always tries to merge towards right. If you want to merge towards
1532
* left, pass "ex - 1" as argument instead of "ex".
1533
* Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1534
* 1 if they got merged.
1536
static int ext4_ext_try_to_merge_right(struct inode *inode,
1537
struct ext4_ext_path *path,
1538
struct ext4_extent *ex)
1540
struct ext4_extent_header *eh;
1541
unsigned int depth, len;
1543
int uninitialized = 0;
1545
depth = ext_depth(inode);
1546
BUG_ON(path[depth].p_hdr == NULL);
1547
eh = path[depth].p_hdr;
1549
while (ex < EXT_LAST_EXTENT(eh)) {
1550
if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1552
/* merge with next extent! */
1553
if (ext4_ext_is_uninitialized(ex))
1555
ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1556
+ ext4_ext_get_actual_len(ex + 1));
1558
ext4_ext_mark_uninitialized(ex);
1560
if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1561
len = (EXT_LAST_EXTENT(eh) - ex - 1)
1562
* sizeof(struct ext4_extent);
1563
memmove(ex + 1, ex + 2, len);
1565
le16_add_cpu(&eh->eh_entries, -1);
1567
WARN_ON(eh->eh_entries == 0);
1568
if (!eh->eh_entries)
1569
EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1576
* This function tries to merge the @ex extent to neighbours in the tree.
1577
* return 1 if merge left else 0.
1579
static int ext4_ext_try_to_merge(struct inode *inode,
1580
struct ext4_ext_path *path,
1581
struct ext4_extent *ex) {
1582
struct ext4_extent_header *eh;
1587
depth = ext_depth(inode);
1588
BUG_ON(path[depth].p_hdr == NULL);
1589
eh = path[depth].p_hdr;
1591
if (ex > EXT_FIRST_EXTENT(eh))
1592
merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1595
ret = ext4_ext_try_to_merge_right(inode, path, ex);
1601
* check if a portion of the "newext" extent overlaps with an
1604
* If there is an overlap discovered, it updates the length of the newext
1605
* such that there will be no overlap, and then returns 1.
1606
* If there is no overlap found, it returns 0.
1608
static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1609
struct inode *inode,
1610
struct ext4_extent *newext,
1611
struct ext4_ext_path *path)
1614
unsigned int depth, len1;
1615
unsigned int ret = 0;
1617
b1 = le32_to_cpu(newext->ee_block);
1618
len1 = ext4_ext_get_actual_len(newext);
1619
depth = ext_depth(inode);
1620
if (!path[depth].p_ext)
1622
b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1623
b2 &= ~(sbi->s_cluster_ratio - 1);
1626
* get the next allocated block if the extent in the path
1627
* is before the requested block(s)
1630
b2 = ext4_ext_next_allocated_block(path);
1631
if (b2 == EXT_MAX_BLOCKS)
1633
b2 &= ~(sbi->s_cluster_ratio - 1);
1636
/* check for wrap through zero on extent logical start block*/
1637
if (b1 + len1 < b1) {
1638
len1 = EXT_MAX_BLOCKS - b1;
1639
newext->ee_len = cpu_to_le16(len1);
1643
/* check for overlap */
1644
if (b1 + len1 > b2) {
1645
newext->ee_len = cpu_to_le16(b2 - b1);
1653
* ext4_ext_insert_extent:
1654
* tries to merge requsted extent into the existing extent or
1655
* inserts requested extent as new one into the tree,
1656
* creating new leaf in the no-space case.
1658
int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1659
struct ext4_ext_path *path,
1660
struct ext4_extent *newext, int flag)
1662
struct ext4_extent_header *eh;
1663
struct ext4_extent *ex, *fex;
1664
struct ext4_extent *nearex; /* nearest extent */
1665
struct ext4_ext_path *npath = NULL;
1666
int depth, len, err;
1668
unsigned uninitialized = 0;
1671
if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1672
EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1675
depth = ext_depth(inode);
1676
ex = path[depth].p_ext;
1677
if (unlikely(path[depth].p_hdr == NULL)) {
1678
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1682
/* try to insert block into found extent and return */
1683
if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1684
&& ext4_can_extents_be_merged(inode, ex, newext)) {
1685
ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n",
1686
ext4_ext_is_uninitialized(newext),
1687
ext4_ext_get_actual_len(newext),
1688
le32_to_cpu(ex->ee_block),
1689
ext4_ext_is_uninitialized(ex),
1690
ext4_ext_get_actual_len(ex),
1691
ext4_ext_pblock(ex));
1692
err = ext4_ext_get_access(handle, inode, path + depth);
1697
* ext4_can_extents_be_merged should have checked that either
1698
* both extents are uninitialized, or both aren't. Thus we
1699
* need to check only one of them here.
1701
if (ext4_ext_is_uninitialized(ex))
1703
ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1704
+ ext4_ext_get_actual_len(newext));
1706
ext4_ext_mark_uninitialized(ex);
1707
eh = path[depth].p_hdr;
1712
depth = ext_depth(inode);
1713
eh = path[depth].p_hdr;
1714
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1717
/* probably next leaf has space for us? */
1718
fex = EXT_LAST_EXTENT(eh);
1719
next = EXT_MAX_BLOCKS;
1720
if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
1721
next = ext4_ext_next_leaf_block(path);
1722
if (next != EXT_MAX_BLOCKS) {
1723
ext_debug("next leaf block - %u\n", next);
1724
BUG_ON(npath != NULL);
1725
npath = ext4_ext_find_extent(inode, next, NULL);
1727
return PTR_ERR(npath);
1728
BUG_ON(npath->p_depth != path->p_depth);
1729
eh = npath[depth].p_hdr;
1730
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1731
ext_debug("next leaf isn't full(%d)\n",
1732
le16_to_cpu(eh->eh_entries));
1736
ext_debug("next leaf has no free space(%d,%d)\n",
1737
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1741
* There is no free space in the found leaf.
1742
* We're gonna add a new leaf in the tree.
1744
if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
1745
flags = EXT4_MB_USE_ROOT_BLOCKS;
1746
err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1749
depth = ext_depth(inode);
1750
eh = path[depth].p_hdr;
1753
nearex = path[depth].p_ext;
1755
err = ext4_ext_get_access(handle, inode, path + depth);
1760
/* there is no extent in this leaf, create first one */
1761
ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
1762
le32_to_cpu(newext->ee_block),
1763
ext4_ext_pblock(newext),
1764
ext4_ext_is_uninitialized(newext),
1765
ext4_ext_get_actual_len(newext));
1766
nearex = EXT_FIRST_EXTENT(eh);
1768
if (le32_to_cpu(newext->ee_block)
1769
> le32_to_cpu(nearex->ee_block)) {
1771
ext_debug("insert %u:%llu:[%d]%d before: "
1773
le32_to_cpu(newext->ee_block),
1774
ext4_ext_pblock(newext),
1775
ext4_ext_is_uninitialized(newext),
1776
ext4_ext_get_actual_len(newext),
1781
BUG_ON(newext->ee_block == nearex->ee_block);
1782
ext_debug("insert %u:%llu:[%d]%d after: "
1784
le32_to_cpu(newext->ee_block),
1785
ext4_ext_pblock(newext),
1786
ext4_ext_is_uninitialized(newext),
1787
ext4_ext_get_actual_len(newext),
1790
len = EXT_LAST_EXTENT(eh) - nearex + 1;
1792
ext_debug("insert %u:%llu:[%d]%d: "
1793
"move %d extents from 0x%p to 0x%p\n",
1794
le32_to_cpu(newext->ee_block),
1795
ext4_ext_pblock(newext),
1796
ext4_ext_is_uninitialized(newext),
1797
ext4_ext_get_actual_len(newext),
1798
len, nearex, nearex + 1);
1799
memmove(nearex + 1, nearex,
1800
len * sizeof(struct ext4_extent));
1804
le16_add_cpu(&eh->eh_entries, 1);
1805
path[depth].p_ext = nearex;
1806
nearex->ee_block = newext->ee_block;
1807
ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
1808
nearex->ee_len = newext->ee_len;
1811
/* try to merge extents to the right */
1812
if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1813
ext4_ext_try_to_merge(inode, path, nearex);
1815
/* try to merge extents to the left */
1817
/* time to correct all indexes above */
1818
err = ext4_ext_correct_indexes(handle, inode, path);
1822
err = ext4_ext_dirty(handle, inode, path + depth);
1826
ext4_ext_drop_refs(npath);
1829
ext4_ext_invalidate_cache(inode);
1833
static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1834
ext4_lblk_t num, ext_prepare_callback func,
1837
struct ext4_ext_path *path = NULL;
1838
struct ext4_ext_cache cbex;
1839
struct ext4_extent *ex;
1840
ext4_lblk_t next, start = 0, end = 0;
1841
ext4_lblk_t last = block + num;
1842
int depth, exists, err = 0;
1844
BUG_ON(func == NULL);
1845
BUG_ON(inode == NULL);
1847
while (block < last && block != EXT_MAX_BLOCKS) {
1849
/* find extent for this block */
1850
down_read(&EXT4_I(inode)->i_data_sem);
1851
path = ext4_ext_find_extent(inode, block, path);
1852
up_read(&EXT4_I(inode)->i_data_sem);
1854
err = PTR_ERR(path);
1859
depth = ext_depth(inode);
1860
if (unlikely(path[depth].p_hdr == NULL)) {
1861
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1865
ex = path[depth].p_ext;
1866
next = ext4_ext_next_allocated_block(path);
1870
/* there is no extent yet, so try to allocate
1871
* all requested space */
1874
} else if (le32_to_cpu(ex->ee_block) > block) {
1875
/* need to allocate space before found extent */
1877
end = le32_to_cpu(ex->ee_block);
1878
if (block + num < end)
1880
} else if (block >= le32_to_cpu(ex->ee_block)
1881
+ ext4_ext_get_actual_len(ex)) {
1882
/* need to allocate space after found extent */
1887
} else if (block >= le32_to_cpu(ex->ee_block)) {
1889
* some part of requested space is covered
1893
end = le32_to_cpu(ex->ee_block)
1894
+ ext4_ext_get_actual_len(ex);
1895
if (block + num < end)
1901
BUG_ON(end <= start);
1904
cbex.ec_block = start;
1905
cbex.ec_len = end - start;
1908
cbex.ec_block = le32_to_cpu(ex->ee_block);
1909
cbex.ec_len = ext4_ext_get_actual_len(ex);
1910
cbex.ec_start = ext4_ext_pblock(ex);
1913
if (unlikely(cbex.ec_len == 0)) {
1914
EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
1918
err = func(inode, next, &cbex, ex, cbdata);
1919
ext4_ext_drop_refs(path);
1924
if (err == EXT_REPEAT)
1926
else if (err == EXT_BREAK) {
1931
if (ext_depth(inode) != depth) {
1932
/* depth was changed. we have to realloc path */
1937
block = cbex.ec_block + cbex.ec_len;
1941
ext4_ext_drop_refs(path);
1949
ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1950
__u32 len, ext4_fsblk_t start)
1952
struct ext4_ext_cache *cex;
1954
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1955
trace_ext4_ext_put_in_cache(inode, block, len, start);
1956
cex = &EXT4_I(inode)->i_cached_extent;
1957
cex->ec_block = block;
1959
cex->ec_start = start;
1960
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1964
* ext4_ext_put_gap_in_cache:
1965
* calculate boundaries of the gap that the requested block fits into
1966
* and cache this gap
1969
ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1972
int depth = ext_depth(inode);
1975
struct ext4_extent *ex;
1977
ex = path[depth].p_ext;
1979
/* there is no extent yet, so gap is [0;-] */
1981
len = EXT_MAX_BLOCKS;
1982
ext_debug("cache gap(whole file):");
1983
} else if (block < le32_to_cpu(ex->ee_block)) {
1985
len = le32_to_cpu(ex->ee_block) - block;
1986
ext_debug("cache gap(before): %u [%u:%u]",
1988
le32_to_cpu(ex->ee_block),
1989
ext4_ext_get_actual_len(ex));
1990
} else if (block >= le32_to_cpu(ex->ee_block)
1991
+ ext4_ext_get_actual_len(ex)) {
1993
lblock = le32_to_cpu(ex->ee_block)
1994
+ ext4_ext_get_actual_len(ex);
1996
next = ext4_ext_next_allocated_block(path);
1997
ext_debug("cache gap(after): [%u:%u] %u",
1998
le32_to_cpu(ex->ee_block),
1999
ext4_ext_get_actual_len(ex),
2001
BUG_ON(next == lblock);
2002
len = next - lblock;
2008
ext_debug(" -> %u:%lu\n", lblock, len);
2009
ext4_ext_put_in_cache(inode, lblock, len, 0);
2013
* ext4_ext_check_cache()
2014
* Checks to see if the given block is in the cache.
2015
* If it is, the cached extent is stored in the given
2016
* cache extent pointer. If the cached extent is a hole,
2017
* this routine should be used instead of
2018
* ext4_ext_in_cache if the calling function needs to
2019
* know the size of the hole.
2021
* @inode: The files inode
2022
* @block: The block to look for in the cache
2023
* @ex: Pointer where the cached extent will be stored
2024
* if it contains block
2026
* Return 0 if cache is invalid; 1 if the cache is valid
2028
static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
2029
struct ext4_ext_cache *ex){
2030
struct ext4_ext_cache *cex;
2031
struct ext4_sb_info *sbi;
2035
* We borrow i_block_reservation_lock to protect i_cached_extent
2037
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2038
cex = &EXT4_I(inode)->i_cached_extent;
2039
sbi = EXT4_SB(inode->i_sb);
2041
/* has cache valid data? */
2042
if (cex->ec_len == 0)
2045
if (in_range(block, cex->ec_block, cex->ec_len)) {
2046
memcpy(ex, cex, sizeof(struct ext4_ext_cache));
2047
ext_debug("%u cached by %u:%u:%llu\n",
2049
cex->ec_block, cex->ec_len, cex->ec_start);
2054
sbi->extent_cache_misses++;
2056
sbi->extent_cache_hits++;
2057
trace_ext4_ext_in_cache(inode, block, ret);
2058
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2063
* ext4_ext_in_cache()
2064
* Checks to see if the given block is in the cache.
2065
* If it is, the cached extent is stored in the given
2068
* @inode: The files inode
2069
* @block: The block to look for in the cache
2070
* @ex: Pointer where the cached extent will be stored
2071
* if it contains block
2073
* Return 0 if cache is invalid; 1 if the cache is valid
2076
ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2077
struct ext4_extent *ex)
2079
struct ext4_ext_cache cex;
2082
if (ext4_ext_check_cache(inode, block, &cex)) {
2083
ex->ee_block = cpu_to_le32(cex.ec_block);
2084
ext4_ext_store_pblock(ex, cex.ec_start);
2085
ex->ee_len = cpu_to_le16(cex.ec_len);
2095
* removes index from the index block.
2097
static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2098
struct ext4_ext_path *path)
2103
/* free index block */
2105
leaf = ext4_idx_pblock(path->p_idx);
2106
if (unlikely(path->p_hdr->eh_entries == 0)) {
2107
EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2110
err = ext4_ext_get_access(handle, inode, path);
2114
if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2115
int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2116
len *= sizeof(struct ext4_extent_idx);
2117
memmove(path->p_idx, path->p_idx + 1, len);
2120
le16_add_cpu(&path->p_hdr->eh_entries, -1);
2121
err = ext4_ext_dirty(handle, inode, path);
2124
ext_debug("index is empty, remove it, free block %llu\n", leaf);
2125
trace_ext4_ext_rm_idx(inode, leaf);
2127
ext4_free_blocks(handle, inode, NULL, leaf, 1,
2128
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2133
* ext4_ext_calc_credits_for_single_extent:
2134
* This routine returns max. credits that needed to insert an extent
2135
* to the extent tree.
2136
* When pass the actual path, the caller should calculate credits
2139
int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2140
struct ext4_ext_path *path)
2143
int depth = ext_depth(inode);
2146
/* probably there is space in leaf? */
2147
if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2148
< le16_to_cpu(path[depth].p_hdr->eh_max)) {
2151
* There are some space in the leaf tree, no
2152
* need to account for leaf block credit
2154
* bitmaps and block group descriptor blocks
2155
* and other metadata blocks still need to be
2158
/* 1 bitmap, 1 block group descriptor */
2159
ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2164
return ext4_chunk_trans_blocks(inode, nrblocks);
2168
* How many index/leaf blocks need to change/allocate to modify nrblocks?
2170
* if nrblocks are fit in a single extent (chunk flag is 1), then
2171
* in the worse case, each tree level index/leaf need to be changed
2172
* if the tree split due to insert a new extent, then the old tree
2173
* index/leaf need to be updated too
2175
* If the nrblocks are discontiguous, they could cause
2176
* the whole tree split more than once, but this is really rare.
2178
int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2181
int depth = ext_depth(inode);
2191
static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2192
struct ext4_extent *ex,
2193
ext4_fsblk_t *partial_cluster,
2194
ext4_lblk_t from, ext4_lblk_t to)
2196
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2197
unsigned short ee_len = ext4_ext_get_actual_len(ex);
2199
int flags = EXT4_FREE_BLOCKS_FORGET;
2201
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2202
flags |= EXT4_FREE_BLOCKS_METADATA;
2204
* For bigalloc file systems, we never free a partial cluster
2205
* at the beginning of the extent. Instead, we make a note
2206
* that we tried freeing the cluster, and check to see if we
2207
* need to free it on a subsequent call to ext4_remove_blocks,
2208
* or at the end of the ext4_truncate() operation.
2210
flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2212
trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2214
* If we have a partial cluster, and it's different from the
2215
* cluster of the last block, we need to explicitly free the
2216
* partial cluster here.
2218
pblk = ext4_ext_pblock(ex) + ee_len - 1;
2219
if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2220
ext4_free_blocks(handle, inode, NULL,
2221
EXT4_C2B(sbi, *partial_cluster),
2222
sbi->s_cluster_ratio, flags);
2223
*partial_cluster = 0;
2226
#ifdef EXTENTS_STATS
2228
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2229
spin_lock(&sbi->s_ext_stats_lock);
2230
sbi->s_ext_blocks += ee_len;
2231
sbi->s_ext_extents++;
2232
if (ee_len < sbi->s_ext_min)
2233
sbi->s_ext_min = ee_len;
2234
if (ee_len > sbi->s_ext_max)
2235
sbi->s_ext_max = ee_len;
2236
if (ext_depth(inode) > sbi->s_depth_max)
2237
sbi->s_depth_max = ext_depth(inode);
2238
spin_unlock(&sbi->s_ext_stats_lock);
2241
if (from >= le32_to_cpu(ex->ee_block)
2242
&& to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2246
num = le32_to_cpu(ex->ee_block) + ee_len - from;
2247
pblk = ext4_ext_pblock(ex) + ee_len - num;
2248
ext_debug("free last %u blocks starting %llu\n", num, pblk);
2249
ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2251
* If the block range to be freed didn't start at the
2252
* beginning of a cluster, and we removed the entire
2253
* extent, save the partial cluster here, since we
2254
* might need to delete if we determine that the
2255
* truncate operation has removed all of the blocks in
2258
if (pblk & (sbi->s_cluster_ratio - 1) &&
2260
*partial_cluster = EXT4_B2C(sbi, pblk);
2262
*partial_cluster = 0;
2263
} else if (from == le32_to_cpu(ex->ee_block)
2264
&& to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2270
start = ext4_ext_pblock(ex);
2272
ext_debug("free first %u blocks starting %llu\n", num, start);
2273
ext4_free_blocks(handle, inode, NULL, start, num, flags);
2276
printk(KERN_INFO "strange request: removal(2) "
2277
"%u-%u from %u:%u\n",
2278
from, to, le32_to_cpu(ex->ee_block), ee_len);
2285
* ext4_ext_rm_leaf() Removes the extents associated with the
2286
* blocks appearing between "start" and "end", and splits the extents
2287
* if "start" and "end" appear in the same extent
2289
* @handle: The journal handle
2290
* @inode: The files inode
2291
* @path: The path to the leaf
2292
* @start: The first block to remove
2293
* @end: The last block to remove
2296
ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2297
struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
2298
ext4_lblk_t start, ext4_lblk_t end)
2300
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2301
int err = 0, correct_index = 0;
2302
int depth = ext_depth(inode), credits;
2303
struct ext4_extent_header *eh;
2306
ext4_lblk_t ex_ee_block;
2307
unsigned short ex_ee_len;
2308
unsigned uninitialized = 0;
2309
struct ext4_extent *ex;
2311
/* the header must be checked already in ext4_ext_remove_space() */
2312
ext_debug("truncate since %u in leaf\n", start);
2313
if (!path[depth].p_hdr)
2314
path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2315
eh = path[depth].p_hdr;
2316
if (unlikely(path[depth].p_hdr == NULL)) {
2317
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2320
/* find where to start removing */
2321
ex = EXT_LAST_EXTENT(eh);
2323
ex_ee_block = le32_to_cpu(ex->ee_block);
2324
ex_ee_len = ext4_ext_get_actual_len(ex);
2326
trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2328
while (ex >= EXT_FIRST_EXTENT(eh) &&
2329
ex_ee_block + ex_ee_len > start) {
2331
if (ext4_ext_is_uninitialized(ex))
2336
ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2337
uninitialized, ex_ee_len);
2338
path[depth].p_ext = ex;
2340
a = ex_ee_block > start ? ex_ee_block : start;
2341
b = ex_ee_block+ex_ee_len - 1 < end ?
2342
ex_ee_block+ex_ee_len - 1 : end;
2344
ext_debug(" border %u:%u\n", a, b);
2346
/* If this extent is beyond the end of the hole, skip it */
2347
if (end <= ex_ee_block) {
2349
ex_ee_block = le32_to_cpu(ex->ee_block);
2350
ex_ee_len = ext4_ext_get_actual_len(ex);
2352
} else if (b != ex_ee_block + ex_ee_len - 1) {
2353
EXT4_ERROR_INODE(inode," bad truncate %u:%u\n",
2357
} else if (a != ex_ee_block) {
2358
/* remove tail of the extent */
2359
num = a - ex_ee_block;
2361
/* remove whole extent: excellent! */
2365
* 3 for leaf, sb, and inode plus 2 (bmap and group
2366
* descriptor) for each block group; assume two block
2367
* groups plus ex_ee_len/blocks_per_block_group for
2370
credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2371
if (ex == EXT_FIRST_EXTENT(eh)) {
2373
credits += (ext_depth(inode)) + 1;
2375
credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2377
err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2381
err = ext4_ext_get_access(handle, inode, path + depth);
2385
err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2391
/* this extent is removed; mark slot entirely unused */
2392
ext4_ext_store_pblock(ex, 0);
2394
ex->ee_len = cpu_to_le16(num);
2396
* Do not mark uninitialized if all the blocks in the
2397
* extent have been removed.
2399
if (uninitialized && num)
2400
ext4_ext_mark_uninitialized(ex);
2402
* If the extent was completely released,
2403
* we need to remove it from the leaf
2406
if (end != EXT_MAX_BLOCKS - 1) {
2408
* For hole punching, we need to scoot all the
2409
* extents up when an extent is removed so that
2410
* we dont have blank extents in the middle
2412
memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2413
sizeof(struct ext4_extent));
2415
/* Now get rid of the one at the end */
2416
memset(EXT_LAST_EXTENT(eh), 0,
2417
sizeof(struct ext4_extent));
2419
le16_add_cpu(&eh->eh_entries, -1);
2421
*partial_cluster = 0;
2423
err = ext4_ext_dirty(handle, inode, path + depth);
2427
ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2428
ext4_ext_pblock(ex));
2430
ex_ee_block = le32_to_cpu(ex->ee_block);
2431
ex_ee_len = ext4_ext_get_actual_len(ex);
2434
if (correct_index && eh->eh_entries)
2435
err = ext4_ext_correct_indexes(handle, inode, path);
2438
* If there is still a entry in the leaf node, check to see if
2439
* it references the partial cluster. This is the only place
2440
* where it could; if it doesn't, we can free the cluster.
2442
if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
2443
(EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2444
*partial_cluster)) {
2445
int flags = EXT4_FREE_BLOCKS_FORGET;
2447
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2448
flags |= EXT4_FREE_BLOCKS_METADATA;
2450
ext4_free_blocks(handle, inode, NULL,
2451
EXT4_C2B(sbi, *partial_cluster),
2452
sbi->s_cluster_ratio, flags);
2453
*partial_cluster = 0;
2456
/* if this leaf is free, then we should
2457
* remove it from index block above */
2458
if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2459
err = ext4_ext_rm_idx(handle, inode, path + depth);
2466
* ext4_ext_more_to_rm:
2467
* returns 1 if current index has to be freed (even partial)
2470
ext4_ext_more_to_rm(struct ext4_ext_path *path)
2472
BUG_ON(path->p_idx == NULL);
2474
if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2478
* if truncate on deeper level happened, it wasn't partial,
2479
* so we have to consider current index for truncation
2481
if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2486
static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
2488
struct super_block *sb = inode->i_sb;
2489
int depth = ext_depth(inode);
2490
struct ext4_ext_path *path;
2491
ext4_fsblk_t partial_cluster = 0;
2495
ext_debug("truncate since %u\n", start);
2497
/* probably first extent we're gonna free will be last in block */
2498
handle = ext4_journal_start(inode, depth + 1);
2500
return PTR_ERR(handle);
2503
ext4_ext_invalidate_cache(inode);
2505
trace_ext4_ext_remove_space(inode, start, depth);
2508
* We start scanning from right side, freeing all the blocks
2509
* after i_size and walking into the tree depth-wise.
2511
depth = ext_depth(inode);
2512
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
2514
ext4_journal_stop(handle);
2517
path[0].p_depth = depth;
2518
path[0].p_hdr = ext_inode_hdr(inode);
2519
if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2525
while (i >= 0 && err == 0) {
2527
/* this is leaf block */
2528
err = ext4_ext_rm_leaf(handle, inode, path,
2529
&partial_cluster, start,
2530
EXT_MAX_BLOCKS - 1);
2531
/* root level has p_bh == NULL, brelse() eats this */
2532
brelse(path[i].p_bh);
2533
path[i].p_bh = NULL;
2538
/* this is index block */
2539
if (!path[i].p_hdr) {
2540
ext_debug("initialize header\n");
2541
path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2544
if (!path[i].p_idx) {
2545
/* this level hasn't been touched yet */
2546
path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2547
path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2548
ext_debug("init index ptr: hdr 0x%p, num %d\n",
2550
le16_to_cpu(path[i].p_hdr->eh_entries));
2552
/* we were already here, see at next index */
2556
ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2557
i, EXT_FIRST_INDEX(path[i].p_hdr),
2559
if (ext4_ext_more_to_rm(path + i)) {
2560
struct buffer_head *bh;
2561
/* go to the next level */
2562
ext_debug("move to level %d (block %llu)\n",
2563
i + 1, ext4_idx_pblock(path[i].p_idx));
2564
memset(path + i + 1, 0, sizeof(*path));
2565
bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2567
/* should we reset i_size? */
2571
if (WARN_ON(i + 1 > depth)) {
2575
if (ext4_ext_check(inode, ext_block_hdr(bh),
2580
path[i + 1].p_bh = bh;
2582
/* save actual number of indexes since this
2583
* number is changed at the next iteration */
2584
path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2587
/* we finished processing this index, go up */
2588
if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2589
/* index is empty, remove it;
2590
* handle must be already prepared by the
2591
* truncatei_leaf() */
2592
err = ext4_ext_rm_idx(handle, inode, path + i);
2594
/* root level has p_bh == NULL, brelse() eats this */
2595
brelse(path[i].p_bh);
2596
path[i].p_bh = NULL;
2598
ext_debug("return to level %d\n", i);
2602
trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
2603
path->p_hdr->eh_entries);
2605
/* If we still have something in the partial cluster and we have removed
2606
* even the first extent, then we should free the blocks in the partial
2607
* cluster as well. */
2608
if (partial_cluster && path->p_hdr->eh_entries == 0) {
2609
int flags = EXT4_FREE_BLOCKS_FORGET;
2611
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2612
flags |= EXT4_FREE_BLOCKS_METADATA;
2614
ext4_free_blocks(handle, inode, NULL,
2615
EXT4_C2B(EXT4_SB(sb), partial_cluster),
2616
EXT4_SB(sb)->s_cluster_ratio, flags);
2617
partial_cluster = 0;
2620
/* TODO: flexible tree reduction should be here */
2621
if (path->p_hdr->eh_entries == 0) {
2623
* truncate to zero freed all the tree,
2624
* so we need to correct eh_depth
2626
err = ext4_ext_get_access(handle, inode, path);
2628
ext_inode_hdr(inode)->eh_depth = 0;
2629
ext_inode_hdr(inode)->eh_max =
2630
cpu_to_le16(ext4_ext_space_root(inode, 0));
2631
err = ext4_ext_dirty(handle, inode, path);
2635
ext4_ext_drop_refs(path);
2639
ext4_journal_stop(handle);
2645
* called at mount time
2647
void ext4_ext_init(struct super_block *sb)
2650
* possible initialization would be here
2653
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2654
#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2655
printk(KERN_INFO "EXT4-fs: file extents enabled");
2656
#ifdef AGGRESSIVE_TEST
2657
printk(", aggressive tests");
2659
#ifdef CHECK_BINSEARCH
2660
printk(", check binsearch");
2662
#ifdef EXTENTS_STATS
2667
#ifdef EXTENTS_STATS
2668
spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2669
EXT4_SB(sb)->s_ext_min = 1 << 30;
2670
EXT4_SB(sb)->s_ext_max = 0;
2676
* called at umount time
2678
void ext4_ext_release(struct super_block *sb)
2680
if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2683
#ifdef EXTENTS_STATS
2684
if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2685
struct ext4_sb_info *sbi = EXT4_SB(sb);
2686
printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2687
sbi->s_ext_blocks, sbi->s_ext_extents,
2688
sbi->s_ext_blocks / sbi->s_ext_extents);
2689
printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2690
sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2695
/* FIXME!! we need to try to merge to left or right after zero-out */
2696
static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2698
ext4_fsblk_t ee_pblock;
2699
unsigned int ee_len;
2702
ee_len = ext4_ext_get_actual_len(ex);
2703
ee_pblock = ext4_ext_pblock(ex);
2705
ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2713
* used by extent splitting.
2715
#define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
2717
#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
2718
#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
2721
* ext4_split_extent_at() splits an extent at given block.
2723
* @handle: the journal handle
2724
* @inode: the file inode
2725
* @path: the path to the extent
2726
* @split: the logical block where the extent is splitted.
2727
* @split_flags: indicates if the extent could be zeroout if split fails, and
2728
* the states(init or uninit) of new extents.
2729
* @flags: flags used to insert new extent to extent tree.
2732
* Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2733
* of which are deterimined by split_flag.
2735
* There are two cases:
2736
* a> the extent are splitted into two extent.
2737
* b> split is not needed, and just mark the extent.
2739
* return 0 on success.
2741
static int ext4_split_extent_at(handle_t *handle,
2742
struct inode *inode,
2743
struct ext4_ext_path *path,
2748
ext4_fsblk_t newblock;
2749
ext4_lblk_t ee_block;
2750
struct ext4_extent *ex, newex, orig_ex;
2751
struct ext4_extent *ex2 = NULL;
2752
unsigned int ee_len, depth;
2755
ext_debug("ext4_split_extents_at: inode %lu, logical"
2756
"block %llu\n", inode->i_ino, (unsigned long long)split);
2758
ext4_ext_show_leaf(inode, path);
2760
depth = ext_depth(inode);
2761
ex = path[depth].p_ext;
2762
ee_block = le32_to_cpu(ex->ee_block);
2763
ee_len = ext4_ext_get_actual_len(ex);
2764
newblock = split - ee_block + ext4_ext_pblock(ex);
2766
BUG_ON(split < ee_block || split >= (ee_block + ee_len));
2768
err = ext4_ext_get_access(handle, inode, path + depth);
2772
if (split == ee_block) {
2774
* case b: block @split is the block that the extent begins with
2775
* then we just change the state of the extent, and splitting
2778
if (split_flag & EXT4_EXT_MARK_UNINIT2)
2779
ext4_ext_mark_uninitialized(ex);
2781
ext4_ext_mark_initialized(ex);
2783
if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
2784
ext4_ext_try_to_merge(inode, path, ex);
2786
err = ext4_ext_dirty(handle, inode, path + depth);
2791
memcpy(&orig_ex, ex, sizeof(orig_ex));
2792
ex->ee_len = cpu_to_le16(split - ee_block);
2793
if (split_flag & EXT4_EXT_MARK_UNINIT1)
2794
ext4_ext_mark_uninitialized(ex);
2797
* path may lead to new leaf, not to original leaf any more
2798
* after ext4_ext_insert_extent() returns,
2800
err = ext4_ext_dirty(handle, inode, path + depth);
2802
goto fix_extent_len;
2805
ex2->ee_block = cpu_to_le32(split);
2806
ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
2807
ext4_ext_store_pblock(ex2, newblock);
2808
if (split_flag & EXT4_EXT_MARK_UNINIT2)
2809
ext4_ext_mark_uninitialized(ex2);
2811
err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2812
if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2813
err = ext4_ext_zeroout(inode, &orig_ex);
2815
goto fix_extent_len;
2816
/* update the extent length and mark as initialized */
2817
ex->ee_len = cpu_to_le32(ee_len);
2818
ext4_ext_try_to_merge(inode, path, ex);
2819
err = ext4_ext_dirty(handle, inode, path + depth);
2822
goto fix_extent_len;
2825
ext4_ext_show_leaf(inode, path);
2829
ex->ee_len = orig_ex.ee_len;
2830
ext4_ext_dirty(handle, inode, path + depth);
2835
* ext4_split_extents() splits an extent and mark extent which is covered
2836
* by @map as split_flags indicates
2838
* It may result in splitting the extent into multiple extents (upto three)
2839
* There are three possibilities:
2840
* a> There is no split required
2841
* b> Splits in two extents: Split is happening at either end of the extent
2842
* c> Splits in three extents: Somone is splitting in middle of the extent
2845
static int ext4_split_extent(handle_t *handle,
2846
struct inode *inode,
2847
struct ext4_ext_path *path,
2848
struct ext4_map_blocks *map,
2852
ext4_lblk_t ee_block;
2853
struct ext4_extent *ex;
2854
unsigned int ee_len, depth;
2857
int split_flag1, flags1;
2859
depth = ext_depth(inode);
2860
ex = path[depth].p_ext;
2861
ee_block = le32_to_cpu(ex->ee_block);
2862
ee_len = ext4_ext_get_actual_len(ex);
2863
uninitialized = ext4_ext_is_uninitialized(ex);
2865
if (map->m_lblk + map->m_len < ee_block + ee_len) {
2866
split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
2867
EXT4_EXT_MAY_ZEROOUT : 0;
2868
flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
2870
split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
2871
EXT4_EXT_MARK_UNINIT2;
2872
err = ext4_split_extent_at(handle, inode, path,
2873
map->m_lblk + map->m_len, split_flag1, flags1);
2878
ext4_ext_drop_refs(path);
2879
path = ext4_ext_find_extent(inode, map->m_lblk, path);
2881
return PTR_ERR(path);
2883
if (map->m_lblk >= ee_block) {
2884
split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
2885
EXT4_EXT_MAY_ZEROOUT : 0;
2887
split_flag1 |= EXT4_EXT_MARK_UNINIT1;
2888
if (split_flag & EXT4_EXT_MARK_UNINIT2)
2889
split_flag1 |= EXT4_EXT_MARK_UNINIT2;
2890
err = ext4_split_extent_at(handle, inode, path,
2891
map->m_lblk, split_flag1, flags);
2896
ext4_ext_show_leaf(inode, path);
2898
return err ? err : map->m_len;
2901
#define EXT4_EXT_ZERO_LEN 7
2903
* This function is called by ext4_ext_map_blocks() if someone tries to write
2904
* to an uninitialized extent. It may result in splitting the uninitialized
2905
* extent into multiple extents (up to three - one initialized and two
2907
* There are three possibilities:
2908
* a> There is no split required: Entire extent should be initialized
2909
* b> Splits in two extents: Write is happening at either end of the extent
2910
* c> Splits in three extents: Somone is writing in middle of the extent
2913
* - The extent pointed to by 'path' is uninitialized.
2914
* - The extent pointed to by 'path' contains a superset
2915
* of the logical span [map->m_lblk, map->m_lblk + map->m_len).
2917
* Post-conditions on success:
2918
* - the returned value is the number of blocks beyond map->l_lblk
2919
* that are allocated and initialized.
2920
* It is guaranteed to be >= map->m_len.
2922
static int ext4_ext_convert_to_initialized(handle_t *handle,
2923
struct inode *inode,
2924
struct ext4_map_blocks *map,
2925
struct ext4_ext_path *path)
2927
struct ext4_extent_header *eh;
2928
struct ext4_map_blocks split_map;
2929
struct ext4_extent zero_ex;
2930
struct ext4_extent *ex;
2931
ext4_lblk_t ee_block, eof_block;
2932
unsigned int ee_len, depth;
2937
ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
2938
"block %llu, max_blocks %u\n", inode->i_ino,
2939
(unsigned long long)map->m_lblk, map->m_len);
2941
eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
2942
inode->i_sb->s_blocksize_bits;
2943
if (eof_block < map->m_lblk + map->m_len)
2944
eof_block = map->m_lblk + map->m_len;
2946
depth = ext_depth(inode);
2947
eh = path[depth].p_hdr;
2948
ex = path[depth].p_ext;
2949
ee_block = le32_to_cpu(ex->ee_block);
2950
ee_len = ext4_ext_get_actual_len(ex);
2951
allocated = ee_len - (map->m_lblk - ee_block);
2953
trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
2955
/* Pre-conditions */
2956
BUG_ON(!ext4_ext_is_uninitialized(ex));
2957
BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
2960
* Attempt to transfer newly initialized blocks from the currently
2961
* uninitialized extent to its left neighbor. This is much cheaper
2962
* than an insertion followed by a merge as those involve costly
2963
* memmove() calls. This is the common case in steady state for
2964
* workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append
2967
* Limitations of the current logic:
2968
* - L1: we only deal with writes at the start of the extent.
2969
* The approach could be extended to writes at the end
2970
* of the extent but this scenario was deemed less common.
2971
* - L2: we do not deal with writes covering the whole extent.
2972
* This would require removing the extent if the transfer
2974
* - L3: we only attempt to merge with an extent stored in the
2975
* same extent tree node.
2977
if ((map->m_lblk == ee_block) && /*L1*/
2978
(map->m_len < ee_len) && /*L2*/
2979
(ex > EXT_FIRST_EXTENT(eh))) { /*L3*/
2980
struct ext4_extent *prev_ex;
2981
ext4_lblk_t prev_lblk;
2982
ext4_fsblk_t prev_pblk, ee_pblk;
2983
unsigned int prev_len, write_len;
2986
prev_lblk = le32_to_cpu(prev_ex->ee_block);
2987
prev_len = ext4_ext_get_actual_len(prev_ex);
2988
prev_pblk = ext4_ext_pblock(prev_ex);
2989
ee_pblk = ext4_ext_pblock(ex);
2990
write_len = map->m_len;
2993
* A transfer of blocks from 'ex' to 'prev_ex' is allowed
2994
* upon those conditions:
2995
* - C1: prev_ex is initialized,
2996
* - C2: prev_ex is logically abutting ex,
2997
* - C3: prev_ex is physically abutting ex,
2998
* - C4: prev_ex can receive the additional blocks without
2999
* overflowing the (initialized) length limit.
3001
if ((!ext4_ext_is_uninitialized(prev_ex)) && /*C1*/
3002
((prev_lblk + prev_len) == ee_block) && /*C2*/
3003
((prev_pblk + prev_len) == ee_pblk) && /*C3*/
3004
(prev_len < (EXT_INIT_MAX_LEN - write_len))) { /*C4*/
3005
err = ext4_ext_get_access(handle, inode, path + depth);
3009
trace_ext4_ext_convert_to_initialized_fastpath(inode,
3012
/* Shift the start of ex by 'write_len' blocks */
3013
ex->ee_block = cpu_to_le32(ee_block + write_len);
3014
ext4_ext_store_pblock(ex, ee_pblk + write_len);
3015
ex->ee_len = cpu_to_le16(ee_len - write_len);
3016
ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3018
/* Extend prev_ex by 'write_len' blocks */
3019
prev_ex->ee_len = cpu_to_le16(prev_len + write_len);
3021
/* Mark the block containing both extents as dirty */
3022
ext4_ext_dirty(handle, inode, path + depth);
3024
/* Update path to point to the right extent */
3025
path[depth].p_ext = prev_ex;
3027
/* Result: number of initialized blocks past m_lblk */
3028
allocated = write_len;
3033
WARN_ON(map->m_lblk < ee_block);
3035
* It is safe to convert extent to initialized via explicit
3036
* zeroout only if extent is fully insde i_size or new_size.
3038
split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3040
/* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
3041
if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
3042
(EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3043
err = ext4_ext_zeroout(inode, ex);
3047
err = ext4_ext_get_access(handle, inode, path + depth);
3050
ext4_ext_mark_initialized(ex);
3051
ext4_ext_try_to_merge(inode, path, ex);
3052
err = ext4_ext_dirty(handle, inode, path + depth);
3058
* 1. split the extent into three extents.
3059
* 2. split the extent into two extents, zeroout the first half.
3060
* 3. split the extent into two extents, zeroout the second half.
3061
* 4. split the extent into two extents with out zeroout.
3063
split_map.m_lblk = map->m_lblk;
3064
split_map.m_len = map->m_len;
3066
if (allocated > map->m_len) {
3067
if (allocated <= EXT4_EXT_ZERO_LEN &&
3068
(EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3071
cpu_to_le32(map->m_lblk);
3072
zero_ex.ee_len = cpu_to_le16(allocated);
3073
ext4_ext_store_pblock(&zero_ex,
3074
ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3075
err = ext4_ext_zeroout(inode, &zero_ex);
3078
split_map.m_lblk = map->m_lblk;
3079
split_map.m_len = allocated;
3080
} else if ((map->m_lblk - ee_block + map->m_len <
3081
EXT4_EXT_ZERO_LEN) &&
3082
(EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3084
if (map->m_lblk != ee_block) {
3085
zero_ex.ee_block = ex->ee_block;
3086
zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3088
ext4_ext_store_pblock(&zero_ex,
3089
ext4_ext_pblock(ex));
3090
err = ext4_ext_zeroout(inode, &zero_ex);
3095
split_map.m_lblk = ee_block;
3096
split_map.m_len = map->m_lblk - ee_block + map->m_len;
3097
allocated = map->m_len;
3101
allocated = ext4_split_extent(handle, inode, path,
3102
&split_map, split_flag, 0);
3107
return err ? err : allocated;
3111
* This function is called by ext4_ext_map_blocks() from
3112
* ext4_get_blocks_dio_write() when DIO to write
3113
* to an uninitialized extent.
3115
* Writing to an uninitialized extent may result in splitting the uninitialized
3116
* extent into multiple /initialized uninitialized extents (up to three)
3117
* There are three possibilities:
3118
* a> There is no split required: Entire extent should be uninitialized
3119
* b> Splits in two extents: Write is happening at either end of the extent
3120
* c> Splits in three extents: Somone is writing in middle of the extent
3122
* One of more index blocks maybe needed if the extent tree grow after
3123
* the uninitialized extent split. To prevent ENOSPC occur at the IO
3124
* complete, we need to split the uninitialized extent before DIO submit
3125
* the IO. The uninitialized extent called at this time will be split
3126
* into three uninitialized extent(at most). After IO complete, the part
3127
* being filled will be convert to initialized by the end_io callback function
3128
* via ext4_convert_unwritten_extents().
3130
* Returns the size of uninitialized extent to be written on success.
3132
static int ext4_split_unwritten_extents(handle_t *handle,
3133
struct inode *inode,
3134
struct ext4_map_blocks *map,
3135
struct ext4_ext_path *path,
3138
ext4_lblk_t eof_block;
3139
ext4_lblk_t ee_block;
3140
struct ext4_extent *ex;
3141
unsigned int ee_len;
3142
int split_flag = 0, depth;
3144
ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3145
"block %llu, max_blocks %u\n", inode->i_ino,
3146
(unsigned long long)map->m_lblk, map->m_len);
3148
eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3149
inode->i_sb->s_blocksize_bits;
3150
if (eof_block < map->m_lblk + map->m_len)
3151
eof_block = map->m_lblk + map->m_len;
3153
* It is safe to convert extent to initialized via explicit
3154
* zeroout only if extent is fully insde i_size or new_size.
3156
depth = ext_depth(inode);
3157
ex = path[depth].p_ext;
3158
ee_block = le32_to_cpu(ex->ee_block);
3159
ee_len = ext4_ext_get_actual_len(ex);
3161
split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3162
split_flag |= EXT4_EXT_MARK_UNINIT2;
3164
flags |= EXT4_GET_BLOCKS_PRE_IO;
3165
return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3168
static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3169
struct inode *inode,
3170
struct ext4_ext_path *path)
3172
struct ext4_extent *ex;
3176
depth = ext_depth(inode);
3177
ex = path[depth].p_ext;
3179
ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3180
"block %llu, max_blocks %u\n", inode->i_ino,
3181
(unsigned long long)le32_to_cpu(ex->ee_block),
3182
ext4_ext_get_actual_len(ex));
3184
err = ext4_ext_get_access(handle, inode, path + depth);
3187
/* first mark the extent as initialized */
3188
ext4_ext_mark_initialized(ex);
3190
/* note: ext4_ext_correct_indexes() isn't needed here because
3191
* borders are not changed
3193
ext4_ext_try_to_merge(inode, path, ex);
3195
/* Mark modified extent as dirty */
3196
err = ext4_ext_dirty(handle, inode, path + depth);
3198
ext4_ext_show_leaf(inode, path);
3202
static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3203
sector_t block, int count)
3206
for (i = 0; i < count; i++)
3207
unmap_underlying_metadata(bdev, block + i);
3211
* Handle EOFBLOCKS_FL flag, clearing it if necessary
3213
static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3215
struct ext4_ext_path *path,
3219
struct ext4_extent_header *eh;
3220
struct ext4_extent *last_ex;
3222
if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3225
depth = ext_depth(inode);
3226
eh = path[depth].p_hdr;
3228
if (unlikely(!eh->eh_entries)) {
3229
EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
3230
"EOFBLOCKS_FL set");
3233
last_ex = EXT_LAST_EXTENT(eh);
3235
* We should clear the EOFBLOCKS_FL flag if we are writing the
3236
* last block in the last extent in the file. We test this by
3237
* first checking to see if the caller to
3238
* ext4_ext_get_blocks() was interested in the last block (or
3239
* a block beyond the last block) in the current extent. If
3240
* this turns out to be false, we can bail out from this
3241
* function immediately.
3243
if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3244
ext4_ext_get_actual_len(last_ex))
3247
* If the caller does appear to be planning to write at or
3248
* beyond the end of the current extent, we then test to see
3249
* if the current extent is the last extent in the file, by
3250
* checking to make sure it was reached via the rightmost node
3251
* at each level of the tree.
3253
for (i = depth-1; i >= 0; i--)
3254
if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3256
ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3257
return ext4_mark_inode_dirty(handle, inode);
3261
* ext4_find_delalloc_range: find delayed allocated block in the given range.
3263
* Goes through the buffer heads in the range [lblk_start, lblk_end] and returns
3264
* whether there are any buffers marked for delayed allocation. It returns '1'
3265
* on the first delalloc'ed buffer head found. If no buffer head in the given
3266
* range is marked for delalloc, it returns 0.
3267
* lblk_start should always be <= lblk_end.
3268
* search_hint_reverse is to indicate that searching in reverse from lblk_end to
3269
* lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed
3270
* block sooner). This is useful when blocks are truncated sequentially from
3271
* lblk_start towards lblk_end.
3273
static int ext4_find_delalloc_range(struct inode *inode,
3274
ext4_lblk_t lblk_start,
3275
ext4_lblk_t lblk_end,
3276
int search_hint_reverse)
3278
struct address_space *mapping = inode->i_mapping;
3279
struct buffer_head *head, *bh = NULL;
3281
ext4_lblk_t i, pg_lblk;
3284
/* reverse search wont work if fs block size is less than page size */
3285
if (inode->i_blkbits < PAGE_CACHE_SHIFT)
3286
search_hint_reverse = 0;
3288
if (search_hint_reverse)
3293
index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
3295
while ((i >= lblk_start) && (i <= lblk_end)) {
3296
page = find_get_page(mapping, index);
3300
if (!page_has_buffers(page))
3303
head = page_buffers(page);
3308
pg_lblk = index << (PAGE_CACHE_SHIFT -
3311
if (unlikely(pg_lblk < lblk_start)) {
3313
* This is possible when fs block size is less
3314
* than page size and our cluster starts/ends in
3315
* middle of the page. So we need to skip the
3316
* initial few blocks till we reach the 'lblk'
3322
/* Check if the buffer is delayed allocated and that it
3323
* is not yet mapped. (when da-buffers are mapped during
3324
* their writeout, their da_mapped bit is set.)
3326
if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
3327
page_cache_release(page);
3328
trace_ext4_find_delalloc_range(inode,
3329
lblk_start, lblk_end,
3330
search_hint_reverse,
3334
if (search_hint_reverse)
3338
} while ((i >= lblk_start) && (i <= lblk_end) &&
3339
((bh = bh->b_this_page) != head));
3342
page_cache_release(page);
3344
* Move to next page. 'i' will be the first lblk in the next
3347
if (search_hint_reverse)
3351
i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
3354
trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3355
search_hint_reverse, 0, 0);
3359
int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
3360
int search_hint_reverse)
3362
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3363
ext4_lblk_t lblk_start, lblk_end;
3364
lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
3365
lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3367
return ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3368
search_hint_reverse);
3372
* Determines how many complete clusters (out of those specified by the 'map')
3373
* are under delalloc and were reserved quota for.
3374
* This function is called when we are writing out the blocks that were
3375
* originally written with their allocation delayed, but then the space was
3376
* allocated using fallocate() before the delayed allocation could be resolved.
3377
* The cases to look for are:
3378
* ('=' indicated delayed allocated blocks
3379
* '-' indicates non-delayed allocated blocks)
3380
* (a) partial clusters towards beginning and/or end outside of allocated range
3381
* are not delalloc'ed.
3383
* |----c---=|====c====|====c====|===-c----|
3384
* |++++++ allocated ++++++|
3385
* ==> 4 complete clusters in above example
3387
* (b) partial cluster (outside of allocated range) towards either end is
3388
* marked for delayed allocation. In this case, we will exclude that
3391
* |----====c========|========c========|
3392
* |++++++ allocated ++++++|
3393
* ==> 1 complete clusters in above example
3396
* |================c================|
3397
* |++++++ allocated ++++++|
3398
* ==> 0 complete clusters in above example
3400
* The ext4_da_update_reserve_space will be called only if we
3401
* determine here that there were some "entire" clusters that span
3402
* this 'allocated' range.
3403
* In the non-bigalloc case, this function will just end up returning num_blks
3404
* without ever calling ext4_find_delalloc_range.
3407
get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3408
unsigned int num_blks)
3410
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3411
ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3412
ext4_lblk_t lblk_from, lblk_to, c_offset;
3413
unsigned int allocated_clusters = 0;
3415
alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
3416
alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
3418
/* max possible clusters for this allocation */
3419
allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3421
trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3423
/* Check towards left side */
3424
c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
3426
lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
3427
lblk_to = lblk_from + c_offset - 1;
3429
if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3430
allocated_clusters--;
3433
/* Now check towards right. */
3434
c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
3435
if (allocated_clusters && c_offset) {
3436
lblk_from = lblk_start + num_blks;
3437
lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3439
if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3440
allocated_clusters--;
3443
return allocated_clusters;
3447
ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3448
struct ext4_map_blocks *map,
3449
struct ext4_ext_path *path, int flags,
3450
unsigned int allocated, ext4_fsblk_t newblock)
3454
ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3456
ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
3457
"block %llu, max_blocks %u, flags %d, allocated %u",
3458
inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3460
ext4_ext_show_leaf(inode, path);
3462
trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
3465
/* get_block() before submit the IO, split the extent */
3466
if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3467
ret = ext4_split_unwritten_extents(handle, inode, map,
3470
* Flag the inode(non aio case) or end_io struct (aio case)
3471
* that this IO needs to conversion to written when IO is
3475
ext4_set_io_unwritten_flag(inode, io);
3477
ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3478
if (ext4_should_dioread_nolock(inode))
3479
map->m_flags |= EXT4_MAP_UNINIT;
3482
/* IO end_io complete, convert the filled extent to written */
3483
if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3484
ret = ext4_convert_unwritten_extents_endio(handle, inode,
3487
ext4_update_inode_fsync_trans(handle, inode, 1);
3488
err = check_eofblocks_fl(handle, inode, map->m_lblk,
3494
/* buffered IO case */
3496
* repeat fallocate creation request
3497
* we already have an unwritten extent
3499
if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3502
/* buffered READ or buffered write_begin() lookup */
3503
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3505
* We have blocks reserved already. We
3506
* return allocated blocks so that delalloc
3507
* won't do block reservation for us. But
3508
* the buffer head will be unmapped so that
3509
* a read from the block returns 0s.
3511
map->m_flags |= EXT4_MAP_UNWRITTEN;
3515
/* buffered write, writepage time, convert*/
3516
ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3518
ext4_update_inode_fsync_trans(handle, inode, 1);
3525
map->m_flags |= EXT4_MAP_NEW;
3527
* if we allocated more blocks than requested
3528
* we need to make sure we unmap the extra block
3529
* allocated. The actual needed block will get
3530
* unmapped later when we find the buffer_head marked
3533
if (allocated > map->m_len) {
3534
unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3535
newblock + map->m_len,
3536
allocated - map->m_len);
3537
allocated = map->m_len;
3541
* If we have done fallocate with the offset that is already
3542
* delayed allocated, we would have block reservation
3543
* and quota reservation done in the delayed write path.
3544
* But fallocate would have already updated quota and block
3545
* count for this offset. So cancel these reservation
3547
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
3548
unsigned int reserved_clusters;
3549
reserved_clusters = get_reserved_cluster_alloc(inode,
3550
map->m_lblk, map->m_len);
3551
if (reserved_clusters)
3552
ext4_da_update_reserve_space(inode,
3558
map->m_flags |= EXT4_MAP_MAPPED;
3559
if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
3560
err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3566
if (allocated > map->m_len)
3567
allocated = map->m_len;
3568
ext4_ext_show_leaf(inode, path);
3569
map->m_pblk = newblock;
3570
map->m_len = allocated;
3573
ext4_ext_drop_refs(path);
3576
return err ? err : allocated;
3580
* get_implied_cluster_alloc - check to see if the requested
3581
* allocation (in the map structure) overlaps with a cluster already
3582
* allocated in an extent.
3583
* @sb The filesystem superblock structure
3584
* @map The requested lblk->pblk mapping
3585
* @ex The extent structure which might contain an implied
3586
* cluster allocation
3588
* This function is called by ext4_ext_map_blocks() after we failed to
3589
* find blocks that were already in the inode's extent tree. Hence,
3590
* we know that the beginning of the requested region cannot overlap
3591
* the extent from the inode's extent tree. There are three cases we
3592
* want to catch. The first is this case:
3594
* |--- cluster # N--|
3595
* |--- extent ---| |---- requested region ---|
3598
* The second case that we need to test for is this one:
3600
* |--------- cluster # N ----------------|
3601
* |--- requested region --| |------- extent ----|
3602
* |=======================|
3604
* The third case is when the requested region lies between two extents
3605
* within the same cluster:
3606
* |------------- cluster # N-------------|
3607
* |----- ex -----| |---- ex_right ----|
3608
* |------ requested region ------|
3609
* |================|
3611
* In each of the above cases, we need to set the map->m_pblk and
3612
* map->m_len so it corresponds to the return the extent labelled as
3613
* "|====|" from cluster #N, since it is already in use for data in
3614
* cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to
3615
* signal to ext4_ext_map_blocks() that map->m_pblk should be treated
3616
* as a new "allocated" block region. Otherwise, we will return 0 and
3617
* ext4_ext_map_blocks() will then allocate one or more new clusters
3618
* by calling ext4_mb_new_blocks().
3620
static int get_implied_cluster_alloc(struct super_block *sb,
3621
struct ext4_map_blocks *map,
3622
struct ext4_extent *ex,
3623
struct ext4_ext_path *path)
3625
struct ext4_sb_info *sbi = EXT4_SB(sb);
3626
ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3627
ext4_lblk_t ex_cluster_start, ex_cluster_end;
3628
ext4_lblk_t rr_cluster_start, rr_cluster_end;
3629
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3630
ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3631
unsigned short ee_len = ext4_ext_get_actual_len(ex);
3633
/* The extent passed in that we are trying to match */
3634
ex_cluster_start = EXT4_B2C(sbi, ee_block);
3635
ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
3637
/* The requested region passed into ext4_map_blocks() */
3638
rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
3639
rr_cluster_end = EXT4_B2C(sbi, map->m_lblk + map->m_len - 1);
3641
if ((rr_cluster_start == ex_cluster_end) ||
3642
(rr_cluster_start == ex_cluster_start)) {
3643
if (rr_cluster_start == ex_cluster_end)
3644
ee_start += ee_len - 1;
3645
map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
3647
map->m_len = min(map->m_len,
3648
(unsigned) sbi->s_cluster_ratio - c_offset);
3650
* Check for and handle this case:
3652
* |--------- cluster # N-------------|
3653
* |------- extent ----|
3654
* |--- requested region ---|
3658
if (map->m_lblk < ee_block)
3659
map->m_len = min(map->m_len, ee_block - map->m_lblk);
3662
* Check for the case where there is already another allocated
3663
* block to the right of 'ex' but before the end of the cluster.
3665
* |------------- cluster # N-------------|
3666
* |----- ex -----| |---- ex_right ----|
3667
* |------ requested region ------|
3668
* |================|
3670
if (map->m_lblk > ee_block) {
3671
ext4_lblk_t next = ext4_ext_next_allocated_block(path);
3672
map->m_len = min(map->m_len, next - map->m_lblk);
3675
trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
3679
trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
3685
* Block allocation/map/preallocation routine for extents based files
3688
* Need to be called with
3689
* down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3690
* (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3692
* return > 0, number of of blocks already mapped/allocated
3693
* if create == 0 and these are pre-allocated blocks
3694
* buffer head is unmapped
3695
* otherwise blocks are mapped
3697
* return = 0, if plain look up failed (blocks have not been allocated)
3698
* buffer head is unmapped
3700
* return < 0, error case.
3702
int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3703
struct ext4_map_blocks *map, int flags)
3705
struct ext4_ext_path *path = NULL;
3706
struct ext4_extent newex, *ex, *ex2;
3707
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3708
ext4_fsblk_t newblock = 0;
3709
int free_on_err = 0, err = 0, depth, ret;
3710
unsigned int allocated = 0, offset = 0;
3711
unsigned int allocated_clusters = 0;
3712
unsigned int punched_out = 0;
3713
unsigned int result = 0;
3714
struct ext4_allocation_request ar;
3715
ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3716
ext4_lblk_t cluster_offset;
3718
ext_debug("blocks %u/%u requested for inode %lu\n",
3719
map->m_lblk, map->m_len, inode->i_ino);
3720
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
3722
/* check in cache */
3723
if (!(flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) &&
3724
ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
3725
if (!newex.ee_start_lo && !newex.ee_start_hi) {
3726
if ((sbi->s_cluster_ratio > 1) &&
3727
ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
3728
map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3730
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3732
* block isn't allocated yet and
3733
* user doesn't want to allocate it
3737
/* we should allocate requested block */
3739
/* block is already allocated */
3740
if (sbi->s_cluster_ratio > 1)
3741
map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3742
newblock = map->m_lblk
3743
- le32_to_cpu(newex.ee_block)
3744
+ ext4_ext_pblock(&newex);
3745
/* number of remaining blocks in the extent */
3746
allocated = ext4_ext_get_actual_len(&newex) -
3747
(map->m_lblk - le32_to_cpu(newex.ee_block));
3752
/* find extent for this block */
3753
path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
3755
err = PTR_ERR(path);
3760
depth = ext_depth(inode);
3763
* consistent leaf must not be empty;
3764
* this situation is possible, though, _during_ tree modification;
3765
* this is why assert can't be put in ext4_ext_find_extent()
3767
if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3768
EXT4_ERROR_INODE(inode, "bad extent address "
3769
"lblock: %lu, depth: %d pblock %lld",
3770
(unsigned long) map->m_lblk, depth,
3771
path[depth].p_block);
3776
ex = path[depth].p_ext;
3778
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3779
ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3780
unsigned short ee_len;
3783
* Uninitialized extents are treated as holes, except that
3784
* we split out initialized portions during a write.
3786
ee_len = ext4_ext_get_actual_len(ex);
3788
trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
3790
/* if found extent covers block, simply return it */
3791
if (in_range(map->m_lblk, ee_block, ee_len)) {
3792
struct ext4_map_blocks punch_map;
3793
ext4_fsblk_t partial_cluster = 0;
3795
newblock = map->m_lblk - ee_block + ee_start;
3796
/* number of remaining blocks in the extent */
3797
allocated = ee_len - (map->m_lblk - ee_block);
3798
ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
3799
ee_block, ee_len, newblock);
3801
if ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0) {
3803
* Do not put uninitialized extent
3806
if (!ext4_ext_is_uninitialized(ex)) {
3807
ext4_ext_put_in_cache(inode, ee_block,
3811
ret = ext4_ext_handle_uninitialized_extents(
3812
handle, inode, map, path, flags,
3813
allocated, newblock);
3818
* Punch out the map length, but only to the
3821
punched_out = allocated < map->m_len ?
3822
allocated : map->m_len;
3825
* Sense extents need to be converted to
3826
* uninitialized, they must fit in an
3827
* uninitialized extent
3829
if (punched_out > EXT_UNINIT_MAX_LEN)
3830
punched_out = EXT_UNINIT_MAX_LEN;
3832
punch_map.m_lblk = map->m_lblk;
3833
punch_map.m_pblk = newblock;
3834
punch_map.m_len = punched_out;
3835
punch_map.m_flags = 0;
3837
/* Check to see if the extent needs to be split */
3838
if (punch_map.m_len != ee_len ||
3839
punch_map.m_lblk != ee_block) {
3841
ret = ext4_split_extent(handle, inode,
3842
path, &punch_map, 0,
3843
EXT4_GET_BLOCKS_PUNCH_OUT_EXT |
3844
EXT4_GET_BLOCKS_PRE_IO);
3851
* find extent for the block at
3852
* the start of the hole
3854
ext4_ext_drop_refs(path);
3857
path = ext4_ext_find_extent(inode,
3860
err = PTR_ERR(path);
3865
depth = ext_depth(inode);
3866
ex = path[depth].p_ext;
3867
ee_len = ext4_ext_get_actual_len(ex);
3868
ee_block = le32_to_cpu(ex->ee_block);
3869
ee_start = ext4_ext_pblock(ex);
3873
ext4_ext_mark_uninitialized(ex);
3875
ext4_ext_invalidate_cache(inode);
3877
err = ext4_ext_rm_leaf(handle, inode, path,
3878
&partial_cluster, map->m_lblk,
3879
map->m_lblk + punched_out);
3881
if (!err && path->p_hdr->eh_entries == 0) {
3883
* Punch hole freed all of this sub tree,
3884
* so we need to correct eh_depth
3886
err = ext4_ext_get_access(handle, inode, path);
3888
ext_inode_hdr(inode)->eh_depth = 0;
3889
ext_inode_hdr(inode)->eh_max =
3890
cpu_to_le16(ext4_ext_space_root(
3893
err = ext4_ext_dirty(
3894
handle, inode, path);
3902
if ((sbi->s_cluster_ratio > 1) &&
3903
ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
3904
map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3907
* requested block isn't allocated yet;
3908
* we couldn't try to create block if create flag is zero
3910
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3912
* put just found gap into cache to speed up
3913
* subsequent requests
3915
ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
3920
* Okay, we need to do block allocation.
3922
map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
3923
newex.ee_block = cpu_to_le32(map->m_lblk);
3924
cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3927
* If we are doing bigalloc, check to see if the extent returned
3928
* by ext4_ext_find_extent() implies a cluster we can use.
3930
if (cluster_offset && ex &&
3931
get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
3932
ar.len = allocated = map->m_len;
3933
newblock = map->m_pblk;
3934
map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3935
goto got_allocated_blocks;
3938
/* find neighbour allocated blocks */
3939
ar.lleft = map->m_lblk;
3940
err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
3943
ar.lright = map->m_lblk;
3945
err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
3949
/* Check if the extent after searching to the right implies a
3950
* cluster we can use. */
3951
if ((sbi->s_cluster_ratio > 1) && ex2 &&
3952
get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
3953
ar.len = allocated = map->m_len;
3954
newblock = map->m_pblk;
3955
map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3956
goto got_allocated_blocks;
3960
* See if request is beyond maximum number of blocks we can have in
3961
* a single extent. For an initialized extent this limit is
3962
* EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
3963
* EXT_UNINIT_MAX_LEN.
3965
if (map->m_len > EXT_INIT_MAX_LEN &&
3966
!(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3967
map->m_len = EXT_INIT_MAX_LEN;
3968
else if (map->m_len > EXT_UNINIT_MAX_LEN &&
3969
(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3970
map->m_len = EXT_UNINIT_MAX_LEN;
3972
/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
3973
newex.ee_len = cpu_to_le16(map->m_len);
3974
err = ext4_ext_check_overlap(sbi, inode, &newex, path);
3976
allocated = ext4_ext_get_actual_len(&newex);
3978
allocated = map->m_len;
3980
/* allocate new block */
3982
ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
3983
ar.logical = map->m_lblk;
3985
* We calculate the offset from the beginning of the cluster
3986
* for the logical block number, since when we allocate a
3987
* physical cluster, the physical block should start at the
3988
* same offset from the beginning of the cluster. This is
3989
* needed so that future calls to get_implied_cluster_alloc()
3992
offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
3993
ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
3995
ar.logical -= offset;
3996
if (S_ISREG(inode->i_mode))
3997
ar.flags = EXT4_MB_HINT_DATA;
3999
/* disable in-core preallocation for non-regular files */
4001
if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4002
ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4003
newblock = ext4_mb_new_blocks(handle, &ar, &err);
4006
ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4007
ar.goal, newblock, allocated);
4009
allocated_clusters = ar.len;
4010
ar.len = EXT4_C2B(sbi, ar.len) - offset;
4011
if (ar.len > allocated)
4014
got_allocated_blocks:
4015
/* try to insert new extent into found leaf and return */
4016
ext4_ext_store_pblock(&newex, newblock + offset);
4017
newex.ee_len = cpu_to_le16(ar.len);
4018
/* Mark uninitialized */
4019
if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
4020
ext4_ext_mark_uninitialized(&newex);
4022
* io_end structure was created for every IO write to an
4023
* uninitialized extent. To avoid unnecessary conversion,
4024
* here we flag the IO that really needs the conversion.
4025
* For non asycn direct IO case, flag the inode state
4026
* that we need to perform conversion when IO is done.
4028
if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
4030
ext4_set_io_unwritten_flag(inode, io);
4032
ext4_set_inode_state(inode,
4033
EXT4_STATE_DIO_UNWRITTEN);
4035
if (ext4_should_dioread_nolock(inode))
4036
map->m_flags |= EXT4_MAP_UNINIT;
4040
if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4041
err = check_eofblocks_fl(handle, inode, map->m_lblk,
4044
err = ext4_ext_insert_extent(handle, inode, path,
4046
if (err && free_on_err) {
4047
int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4048
EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4049
/* free data blocks we just allocated */
4050
/* not a good idea to call discard here directly,
4051
* but otherwise we'd need to call it every free() */
4052
ext4_discard_preallocations(inode);
4053
ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
4054
ext4_ext_get_actual_len(&newex), fb_flags);
4058
/* previous routine could use block we allocated */
4059
newblock = ext4_ext_pblock(&newex);
4060
allocated = ext4_ext_get_actual_len(&newex);
4061
if (allocated > map->m_len)
4062
allocated = map->m_len;
4063
map->m_flags |= EXT4_MAP_NEW;
4066
* Update reserved blocks/metadata blocks after successful
4067
* block allocation which had been deferred till now.
4069
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4070
unsigned int reserved_clusters;
4072
* Check how many clusters we had reserved this allocated range
4074
reserved_clusters = get_reserved_cluster_alloc(inode,
4075
map->m_lblk, allocated);
4076
if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
4077
if (reserved_clusters) {
4079
* We have clusters reserved for this range.
4080
* But since we are not doing actual allocation
4081
* and are simply using blocks from previously
4082
* allocated cluster, we should release the
4083
* reservation and not claim quota.
4085
ext4_da_update_reserve_space(inode,
4086
reserved_clusters, 0);
4089
BUG_ON(allocated_clusters < reserved_clusters);
4090
/* We will claim quota for all newly allocated blocks.*/
4091
ext4_da_update_reserve_space(inode, allocated_clusters,
4093
if (reserved_clusters < allocated_clusters) {
4094
struct ext4_inode_info *ei = EXT4_I(inode);
4095
int reservation = allocated_clusters -
4098
* It seems we claimed few clusters outside of
4099
* the range of this allocation. We should give
4100
* it back to the reservation pool. This can
4101
* happen in the following case:
4103
* * Suppose s_cluster_ratio is 4 (i.e., each
4104
* cluster has 4 blocks. Thus, the clusters
4105
* are [0-3],[4-7],[8-11]...
4106
* * First comes delayed allocation write for
4107
* logical blocks 10 & 11. Since there were no
4108
* previous delayed allocated blocks in the
4109
* range [8-11], we would reserve 1 cluster
4111
* * Next comes write for logical blocks 3 to 8.
4112
* In this case, we will reserve 2 clusters
4113
* (for [0-3] and [4-7]; and not for [8-11] as
4114
* that range has a delayed allocated blocks.
4115
* Thus total reserved clusters now becomes 3.
4116
* * Now, during the delayed allocation writeout
4117
* time, we will first write blocks [3-8] and
4118
* allocate 3 clusters for writing these
4119
* blocks. Also, we would claim all these
4120
* three clusters above.
4121
* * Now when we come here to writeout the
4122
* blocks [10-11], we would expect to claim
4123
* the reservation of 1 cluster we had made
4124
* (and we would claim it since there are no
4125
* more delayed allocated blocks in the range
4126
* [8-11]. But our reserved cluster count had
4127
* already gone to 0.
4129
* Thus, at the step 4 above when we determine
4130
* that there are still some unwritten delayed
4131
* allocated blocks outside of our current
4132
* block range, we should increment the
4133
* reserved clusters count so that when the
4134
* remaining blocks finally gets written, we
4137
dquot_reserve_block(inode,
4138
EXT4_C2B(sbi, reservation));
4139
spin_lock(&ei->i_block_reservation_lock);
4140
ei->i_reserved_data_blocks += reservation;
4141
spin_unlock(&ei->i_block_reservation_lock);
4147
* Cache the extent and update transaction to commit on fdatasync only
4148
* when it is _not_ an uninitialized extent.
4150
if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
4151
ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
4152
ext4_update_inode_fsync_trans(handle, inode, 1);
4154
ext4_update_inode_fsync_trans(handle, inode, 0);
4156
if (allocated > map->m_len)
4157
allocated = map->m_len;
4158
ext4_ext_show_leaf(inode, path);
4159
map->m_flags |= EXT4_MAP_MAPPED;
4160
map->m_pblk = newblock;
4161
map->m_len = allocated;
4164
ext4_ext_drop_refs(path);
4167
result = (flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) ?
4168
punched_out : allocated;
4170
trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
4171
newblock, map->m_len, err ? err : result);
4173
return err ? err : result;
4176
void ext4_ext_truncate(struct inode *inode)
4178
struct address_space *mapping = inode->i_mapping;
4179
struct super_block *sb = inode->i_sb;
4180
ext4_lblk_t last_block;
4186
* finish any pending end_io work so we won't run the risk of
4187
* converting any truncated blocks to initialized later
4189
ext4_flush_completed_IO(inode);
4192
* probably first extent we're gonna free will be last in block
4194
err = ext4_writepage_trans_blocks(inode);
4195
handle = ext4_journal_start(inode, err);
4199
if (inode->i_size % PAGE_CACHE_SIZE != 0) {
4200
page_len = PAGE_CACHE_SIZE -
4201
(inode->i_size & (PAGE_CACHE_SIZE - 1));
4203
err = ext4_discard_partial_page_buffers(handle,
4204
mapping, inode->i_size, page_len, 0);
4210
if (ext4_orphan_add(handle, inode))
4213
down_write(&EXT4_I(inode)->i_data_sem);
4214
ext4_ext_invalidate_cache(inode);
4216
ext4_discard_preallocations(inode);
4219
* TODO: optimization is possible here.
4220
* Probably we need not scan at all,
4221
* because page truncation is enough.
4224
/* we have to know where to truncate from in crash case */
4225
EXT4_I(inode)->i_disksize = inode->i_size;
4226
ext4_mark_inode_dirty(handle, inode);
4228
last_block = (inode->i_size + sb->s_blocksize - 1)
4229
>> EXT4_BLOCK_SIZE_BITS(sb);
4230
err = ext4_ext_remove_space(inode, last_block);
4232
/* In a multi-transaction truncate, we only make the final
4233
* transaction synchronous.
4236
ext4_handle_sync(handle);
4238
up_write(&EXT4_I(inode)->i_data_sem);
4242
* If this was a simple ftruncate() and the file will remain alive,
4243
* then we need to clear up the orphan record which we created above.
4244
* However, if this was a real unlink then we were called by
4245
* ext4_delete_inode(), and we allow that function to clean up the
4246
* orphan info for us.
4249
ext4_orphan_del(handle, inode);
4251
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4252
ext4_mark_inode_dirty(handle, inode);
4253
ext4_journal_stop(handle);
4256
static void ext4_falloc_update_inode(struct inode *inode,
4257
int mode, loff_t new_size, int update_ctime)
4259
struct timespec now;
4262
now = current_fs_time(inode->i_sb);
4263
if (!timespec_equal(&inode->i_ctime, &now))
4264
inode->i_ctime = now;
4267
* Update only when preallocation was requested beyond
4270
if (!(mode & FALLOC_FL_KEEP_SIZE)) {
4271
if (new_size > i_size_read(inode))
4272
i_size_write(inode, new_size);
4273
if (new_size > EXT4_I(inode)->i_disksize)
4274
ext4_update_i_disksize(inode, new_size);
4277
* Mark that we allocate beyond EOF so the subsequent truncate
4278
* can proceed even if the new size is the same as i_size.
4280
if (new_size > i_size_read(inode))
4281
ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4287
* preallocate space for a file. This implements ext4's fallocate file
4288
* operation, which gets called from sys_fallocate system call.
4289
* For block-mapped files, posix_fallocate should fall back to the method
4290
* of writing zeroes to the required new blocks (the same behavior which is
4291
* expected for file systems which do not support fallocate() system call).
4293
long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4295
struct inode *inode = file->f_path.dentry->d_inode;
4298
unsigned int max_blocks;
4303
struct ext4_map_blocks map;
4304
unsigned int credits, blkbits = inode->i_blkbits;
4307
* currently supporting (pre)allocate mode for extent-based
4310
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4313
/* Return error if mode is not supported */
4314
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
4317
if (mode & FALLOC_FL_PUNCH_HOLE)
4318
return ext4_punch_hole(file, offset, len);
4320
trace_ext4_fallocate_enter(inode, offset, len, mode);
4321
map.m_lblk = offset >> blkbits;
4323
* We can't just convert len to max_blocks because
4324
* If blocksize = 4096 offset = 3072 and len = 2048
4326
max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4329
* credits to insert 1 extent into extent tree
4331
credits = ext4_chunk_trans_blocks(inode, max_blocks);
4332
mutex_lock(&inode->i_mutex);
4333
ret = inode_newsize_ok(inode, (len + offset));
4335
mutex_unlock(&inode->i_mutex);
4336
trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4339
flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
4340
if (mode & FALLOC_FL_KEEP_SIZE)
4341
flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4343
* Don't normalize the request if it can fit in one extent so
4344
* that it doesn't get unnecessarily split into multiple
4347
if (len <= EXT_UNINIT_MAX_LEN << blkbits)
4348
flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4350
while (ret >= 0 && ret < max_blocks) {
4351
map.m_lblk = map.m_lblk + ret;
4352
map.m_len = max_blocks = max_blocks - ret;
4353
handle = ext4_journal_start(inode, credits);
4354
if (IS_ERR(handle)) {
4355
ret = PTR_ERR(handle);
4358
ret = ext4_map_blocks(handle, inode, &map, flags);
4362
printk(KERN_ERR "%s: ext4_ext_map_blocks "
4363
"returned error inode#%lu, block=%u, "
4364
"max_blocks=%u", __func__,
4365
inode->i_ino, map.m_lblk, max_blocks);
4367
ext4_mark_inode_dirty(handle, inode);
4368
ret2 = ext4_journal_stop(handle);
4371
if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
4372
blkbits) >> blkbits))
4373
new_size = offset + len;
4375
new_size = ((loff_t) map.m_lblk + ret) << blkbits;
4377
ext4_falloc_update_inode(inode, mode, new_size,
4378
(map.m_flags & EXT4_MAP_NEW));
4379
ext4_mark_inode_dirty(handle, inode);
4380
ret2 = ext4_journal_stop(handle);
4384
if (ret == -ENOSPC &&
4385
ext4_should_retry_alloc(inode->i_sb, &retries)) {
4389
mutex_unlock(&inode->i_mutex);
4390
trace_ext4_fallocate_exit(inode, offset, max_blocks,
4391
ret > 0 ? ret2 : ret);
4392
return ret > 0 ? ret2 : ret;
4396
* This function convert a range of blocks to written extents
4397
* The caller of this function will pass the start offset and the size.
4398
* all unwritten extents within this range will be converted to
4401
* This function is called from the direct IO end io call back
4402
* function, to convert the fallocated extents after IO is completed.
4403
* Returns 0 on success.
4405
int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
4409
unsigned int max_blocks;
4412
struct ext4_map_blocks map;
4413
unsigned int credits, blkbits = inode->i_blkbits;
4415
map.m_lblk = offset >> blkbits;
4417
* We can't just convert len to max_blocks because
4418
* If blocksize = 4096 offset = 3072 and len = 2048
4420
max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
4423
* credits to insert 1 extent into extent tree
4425
credits = ext4_chunk_trans_blocks(inode, max_blocks);
4426
while (ret >= 0 && ret < max_blocks) {
4428
map.m_len = (max_blocks -= ret);
4429
handle = ext4_journal_start(inode, credits);
4430
if (IS_ERR(handle)) {
4431
ret = PTR_ERR(handle);
4434
ret = ext4_map_blocks(handle, inode, &map,
4435
EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4438
printk(KERN_ERR "%s: ext4_ext_map_blocks "
4439
"returned error inode#%lu, block=%u, "
4440
"max_blocks=%u", __func__,
4441
inode->i_ino, map.m_lblk, map.m_len);
4443
ext4_mark_inode_dirty(handle, inode);
4444
ret2 = ext4_journal_stop(handle);
4445
if (ret <= 0 || ret2 )
4448
return ret > 0 ? ret2 : ret;
4452
* Callback function called for each extent to gather FIEMAP information.
4454
static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
4455
struct ext4_ext_cache *newex, struct ext4_extent *ex,
4463
struct fiemap_extent_info *fieinfo = data;
4464
unsigned char blksize_bits;
4466
blksize_bits = inode->i_sb->s_blocksize_bits;
4467
logical = (__u64)newex->ec_block << blksize_bits;
4469
if (newex->ec_start == 0) {
4471
* No extent in extent-tree contains block @newex->ec_start,
4472
* then the block may stay in 1)a hole or 2)delayed-extent.
4474
* Holes or delayed-extents are processed as follows.
4475
* 1. lookup dirty pages with specified range in pagecache.
4476
* If no page is got, then there is no delayed-extent and
4477
* return with EXT_CONTINUE.
4478
* 2. find the 1st mapped buffer,
4479
* 3. check if the mapped buffer is both in the request range
4480
* and a delayed buffer. If not, there is no delayed-extent,
4482
* 4. a delayed-extent is found, the extent will be collected.
4484
ext4_lblk_t end = 0;
4485
pgoff_t last_offset;
4488
pgoff_t start_index = 0;
4489
struct page **pages = NULL;
4490
struct buffer_head *bh = NULL;
4491
struct buffer_head *head = NULL;
4492
unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
4494
pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
4498
offset = logical >> PAGE_SHIFT;
4500
last_offset = offset;
4502
ret = find_get_pages_tag(inode->i_mapping, &offset,
4503
PAGECACHE_TAG_DIRTY, nr_pages, pages);
4505
if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4506
/* First time, try to find a mapped buffer. */
4509
for (index = 0; index < ret; index++)
4510
page_cache_release(pages[index]);
4513
return EXT_CONTINUE;
4518
/* Try to find the 1st mapped buffer. */
4519
end = ((__u64)pages[index]->index << PAGE_SHIFT) >>
4521
if (!page_has_buffers(pages[index]))
4523
head = page_buffers(pages[index]);
4530
if (end >= newex->ec_block +
4532
/* The buffer is out of
4533
* the request range.
4537
if (buffer_mapped(bh) &&
4538
end >= newex->ec_block) {
4539
start_index = index - 1;
4540
/* get the 1st mapped buffer. */
4541
goto found_mapped_buffer;
4544
bh = bh->b_this_page;
4546
} while (bh != head);
4548
/* No mapped buffer in the range found in this page,
4549
* We need to look up next page.
4552
/* There is no page left, but we need to limit
4555
newex->ec_len = end - newex->ec_block;
4560
/*Find contiguous delayed buffers. */
4561
if (ret > 0 && pages[0]->index == last_offset)
4562
head = page_buffers(pages[0]);
4568
found_mapped_buffer:
4569
if (bh != NULL && buffer_delay(bh)) {
4570
/* 1st or contiguous delayed buffer found. */
4571
if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4573
* 1st delayed buffer found, record
4574
* the start of extent.
4576
flags |= FIEMAP_EXTENT_DELALLOC;
4577
newex->ec_block = end;
4578
logical = (__u64)end << blksize_bits;
4580
/* Find contiguous delayed buffers. */
4582
if (!buffer_delay(bh))
4583
goto found_delayed_extent;
4584
bh = bh->b_this_page;
4586
} while (bh != head);
4588
for (; index < ret; index++) {
4589
if (!page_has_buffers(pages[index])) {
4593
head = page_buffers(pages[index]);
4599
if (pages[index]->index !=
4600
pages[start_index]->index + index
4602
/* Blocks are not contiguous. */
4608
if (!buffer_delay(bh))
4609
/* Delayed-extent ends. */
4610
goto found_delayed_extent;
4611
bh = bh->b_this_page;
4613
} while (bh != head);
4615
} else if (!(flags & FIEMAP_EXTENT_DELALLOC))
4619
found_delayed_extent:
4620
newex->ec_len = min(end - newex->ec_block,
4621
(ext4_lblk_t)EXT_INIT_MAX_LEN);
4622
if (ret == nr_pages && bh != NULL &&
4623
newex->ec_len < EXT_INIT_MAX_LEN &&
4625
/* Have not collected an extent and continue. */
4626
for (index = 0; index < ret; index++)
4627
page_cache_release(pages[index]);
4631
for (index = 0; index < ret; index++)
4632
page_cache_release(pages[index]);
4636
physical = (__u64)newex->ec_start << blksize_bits;
4637
length = (__u64)newex->ec_len << blksize_bits;
4639
if (ex && ext4_ext_is_uninitialized(ex))
4640
flags |= FIEMAP_EXTENT_UNWRITTEN;
4642
if (next == EXT_MAX_BLOCKS)
4643
flags |= FIEMAP_EXTENT_LAST;
4645
ret = fiemap_fill_next_extent(fieinfo, logical, physical,
4651
return EXT_CONTINUE;
4653
/* fiemap flags we can handle specified here */
4654
#define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4656
static int ext4_xattr_fiemap(struct inode *inode,
4657
struct fiemap_extent_info *fieinfo)
4661
__u32 flags = FIEMAP_EXTENT_LAST;
4662
int blockbits = inode->i_sb->s_blocksize_bits;
4666
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4667
struct ext4_iloc iloc;
4668
int offset; /* offset of xattr in inode */
4670
error = ext4_get_inode_loc(inode, &iloc);
4673
physical = iloc.bh->b_blocknr << blockbits;
4674
offset = EXT4_GOOD_OLD_INODE_SIZE +
4675
EXT4_I(inode)->i_extra_isize;
4677
length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4678
flags |= FIEMAP_EXTENT_DATA_INLINE;
4680
} else { /* external block */
4681
physical = EXT4_I(inode)->i_file_acl << blockbits;
4682
length = inode->i_sb->s_blocksize;
4686
error = fiemap_fill_next_extent(fieinfo, 0, physical,
4688
return (error < 0 ? error : 0);
4692
* ext4_ext_punch_hole
4694
* Punches a hole of "length" bytes in a file starting
4697
* @inode: The inode of the file to punch a hole in
4698
* @offset: The starting byte offset of the hole
4699
* @length: The length of the hole
4701
* Returns the number of blocks removed or negative on err
4703
int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
4705
struct inode *inode = file->f_path.dentry->d_inode;
4706
struct super_block *sb = inode->i_sb;
4707
struct ext4_ext_cache cache_ex;
4708
ext4_lblk_t first_block, last_block, num_blocks, iblock, max_blocks;
4709
struct address_space *mapping = inode->i_mapping;
4710
struct ext4_map_blocks map;
4712
loff_t first_page, last_page, page_len;
4713
loff_t first_page_offset, last_page_offset;
4714
int ret, credits, blocks_released, err = 0;
4716
/* No need to punch hole beyond i_size */
4717
if (offset >= inode->i_size)
4721
* If the hole extends beyond i_size, set the hole
4722
* to end after the page that contains i_size
4724
if (offset + length > inode->i_size) {
4725
length = inode->i_size +
4726
PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
4730
first_block = (offset + sb->s_blocksize - 1) >>
4731
EXT4_BLOCK_SIZE_BITS(sb);
4732
last_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4734
first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
4735
last_page = (offset + length) >> PAGE_CACHE_SHIFT;
4737
first_page_offset = first_page << PAGE_CACHE_SHIFT;
4738
last_page_offset = last_page << PAGE_CACHE_SHIFT;
4741
* Write out all dirty pages to avoid race conditions
4742
* Then release them.
4744
if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4745
err = filemap_write_and_wait_range(mapping,
4746
offset, offset + length - 1);
4752
/* Now release the pages */
4753
if (last_page_offset > first_page_offset) {
4754
truncate_inode_pages_range(mapping, first_page_offset,
4755
last_page_offset-1);
4758
/* finish any pending end_io work */
4759
ext4_flush_completed_IO(inode);
4761
credits = ext4_writepage_trans_blocks(inode);
4762
handle = ext4_journal_start(inode, credits);
4764
return PTR_ERR(handle);
4766
err = ext4_orphan_add(handle, inode);
4771
* Now we need to zero out the non-page-aligned data in the
4772
* pages at the start and tail of the hole, and unmap the buffer
4773
* heads for the block aligned regions of the page that were
4774
* completely zeroed.
4776
if (first_page > last_page) {
4778
* If the file space being truncated is contained within a page
4779
* just zero out and unmap the middle of that page
4781
err = ext4_discard_partial_page_buffers(handle,
4782
mapping, offset, length, 0);
4788
* zero out and unmap the partial page that contains
4789
* the start of the hole
4791
page_len = first_page_offset - offset;
4793
err = ext4_discard_partial_page_buffers(handle, mapping,
4794
offset, page_len, 0);
4800
* zero out and unmap the partial page that contains
4801
* the end of the hole
4803
page_len = offset + length - last_page_offset;
4805
err = ext4_discard_partial_page_buffers(handle, mapping,
4806
last_page_offset, page_len, 0);
4814
* If i_size is contained in the last page, we need to
4815
* unmap and zero the partial page after i_size
4817
if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
4818
inode->i_size % PAGE_CACHE_SIZE != 0) {
4820
page_len = PAGE_CACHE_SIZE -
4821
(inode->i_size & (PAGE_CACHE_SIZE - 1));
4824
err = ext4_discard_partial_page_buffers(handle,
4825
mapping, inode->i_size, page_len, 0);
4832
/* If there are no blocks to remove, return now */
4833
if (first_block >= last_block)
4836
down_write(&EXT4_I(inode)->i_data_sem);
4837
ext4_ext_invalidate_cache(inode);
4838
ext4_discard_preallocations(inode);
4841
* Loop over all the blocks and identify blocks
4842
* that need to be punched out
4844
iblock = first_block;
4845
blocks_released = 0;
4846
while (iblock < last_block) {
4847
max_blocks = last_block - iblock;
4849
memset(&map, 0, sizeof(map));
4850
map.m_lblk = iblock;
4851
map.m_len = max_blocks;
4852
ret = ext4_ext_map_blocks(handle, inode, &map,
4853
EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
4856
blocks_released += ret;
4858
} else if (ret == 0) {
4860
* If map blocks could not find the block,
4861
* then it is in a hole. If the hole was
4862
* not already cached, then map blocks should
4863
* put it in the cache. So we can get the hole
4866
memset(&cache_ex, 0, sizeof(cache_ex));
4867
if ((ext4_ext_check_cache(inode, iblock, &cache_ex)) &&
4868
!cache_ex.ec_start) {
4870
/* The hole is cached */
4871
num_blocks = cache_ex.ec_block +
4872
cache_ex.ec_len - iblock;
4875
/* The block could not be identified */
4880
/* Map blocks error */
4885
if (num_blocks == 0) {
4886
/* This condition should never happen */
4887
ext_debug("Block lookup failed");
4892
iblock += num_blocks;
4895
if (blocks_released > 0) {
4896
ext4_ext_invalidate_cache(inode);
4897
ext4_discard_preallocations(inode);
4901
ext4_handle_sync(handle);
4903
up_write(&EXT4_I(inode)->i_data_sem);
4906
ext4_orphan_del(handle, inode);
4907
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4908
ext4_mark_inode_dirty(handle, inode);
4909
ext4_journal_stop(handle);
4912
int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4913
__u64 start, __u64 len)
4915
ext4_lblk_t start_blk;
4918
/* fallback to generic here if not in extents fmt */
4919
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4920
return generic_block_fiemap(inode, fieinfo, start, len,
4923
if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
4926
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4927
error = ext4_xattr_fiemap(inode, fieinfo);
4929
ext4_lblk_t len_blks;
4932
start_blk = start >> inode->i_sb->s_blocksize_bits;
4933
last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4934
if (last_blk >= EXT_MAX_BLOCKS)
4935
last_blk = EXT_MAX_BLOCKS-1;
4936
len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4939
* Walk the extent tree gathering extent information.
4940
* ext4_ext_fiemap_cb will push extents back to user.
4942
error = ext4_ext_walk_space(inode, start_blk, len_blks,
4943
ext4_ext_fiemap_cb, fieinfo);