2
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
5
* This program is free software; you can redistribute it and/or
6
* modify it under the terms of the GNU General Public License as
7
* published by the Free Software Foundation.
9
* This program is distributed in the hope that it would be useful,
10
* but WITHOUT ANY WARRANTY; without even the implied warranty of
11
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
* GNU General Public License for more details.
14
* You should have received a copy of the GNU General Public License
15
* along with this program; if not, write the Free Software Foundation,
16
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20
#include "xfs_types.h"
24
#include "xfs_trans.h"
28
#include "xfs_da_btree.h"
29
#include "xfs_bmap_btree.h"
30
#include "xfs_alloc_btree.h"
31
#include "xfs_ialloc_btree.h"
32
#include "xfs_dinode.h"
33
#include "xfs_inode.h"
34
#include "xfs_btree.h"
35
#include "xfs_mount.h"
36
#include "xfs_itable.h"
37
#include "xfs_inode_item.h"
38
#include "xfs_extfree_item.h"
39
#include "xfs_alloc.h"
41
#include "xfs_rtalloc.h"
42
#include "xfs_error.h"
43
#include "xfs_attr_leaf.h"
45
#include "xfs_quota.h"
46
#include "xfs_trans_space.h"
47
#include "xfs_buf_item.h"
48
#include "xfs_filestream.h"
49
#include "xfs_vnodeops.h"
50
#include "xfs_trace.h"
53
kmem_zone_t *xfs_bmap_free_item_zone;
56
* Prototypes for internal bmap routines.
61
xfs_bmap_check_leaf_extents(
62
struct xfs_btree_cur *cur,
66
#define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
71
* Called from xfs_bmap_add_attrfork to handle extents format files.
73
STATIC int /* error */
74
xfs_bmap_add_attrfork_extents(
75
xfs_trans_t *tp, /* transaction pointer */
76
xfs_inode_t *ip, /* incore inode pointer */
77
xfs_fsblock_t *firstblock, /* first block allocated */
78
xfs_bmap_free_t *flist, /* blocks to free at commit */
79
int *flags); /* inode logging flags */
82
* Called from xfs_bmap_add_attrfork to handle local format files.
84
STATIC int /* error */
85
xfs_bmap_add_attrfork_local(
86
xfs_trans_t *tp, /* transaction pointer */
87
xfs_inode_t *ip, /* incore inode pointer */
88
xfs_fsblock_t *firstblock, /* first block allocated */
89
xfs_bmap_free_t *flist, /* blocks to free at commit */
90
int *flags); /* inode logging flags */
93
* xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
94
* It figures out where to ask the underlying allocator to put the new extent.
96
STATIC int /* error */
98
xfs_bmalloca_t *ap); /* bmap alloc argument struct */
101
* Transform a btree format file with only one leaf node, where the
102
* extents list will fit in the inode, into an extents format file.
103
* Since the file extents are already in-core, all we have to do is
104
* give up the space for the btree root and pitch the leaf block.
106
STATIC int /* error */
107
xfs_bmap_btree_to_extents(
108
xfs_trans_t *tp, /* transaction pointer */
109
xfs_inode_t *ip, /* incore inode pointer */
110
xfs_btree_cur_t *cur, /* btree cursor */
111
int *logflagsp, /* inode logging flags */
112
int whichfork); /* data or attr fork */
115
* Remove the entry "free" from the free item list. Prev points to the
116
* previous entry, unless "free" is the head of the list.
120
xfs_bmap_free_t *flist, /* free item list header */
121
xfs_bmap_free_item_t *prev, /* previous item on list, if any */
122
xfs_bmap_free_item_t *free); /* list item to be freed */
125
* Convert an extents-format file into a btree-format file.
126
* The new file will have a root block (in the inode) and a single child block.
128
STATIC int /* error */
129
xfs_bmap_extents_to_btree(
130
xfs_trans_t *tp, /* transaction pointer */
131
xfs_inode_t *ip, /* incore inode pointer */
132
xfs_fsblock_t *firstblock, /* first-block-allocated */
133
xfs_bmap_free_t *flist, /* blocks freed in xaction */
134
xfs_btree_cur_t **curp, /* cursor returned to caller */
135
int wasdel, /* converting a delayed alloc */
136
int *logflagsp, /* inode logging flags */
137
int whichfork); /* data or attr fork */
140
* Convert a local file to an extents file.
141
* This code is sort of bogus, since the file data needs to get
142
* logged so it won't be lost. The bmap-level manipulations are ok, though.
144
STATIC int /* error */
145
xfs_bmap_local_to_extents(
146
xfs_trans_t *tp, /* transaction pointer */
147
xfs_inode_t *ip, /* incore inode pointer */
148
xfs_fsblock_t *firstblock, /* first block allocated in xaction */
149
xfs_extlen_t total, /* total blocks needed by transaction */
150
int *logflagsp, /* inode logging flags */
151
int whichfork); /* data or attr fork */
154
* Search the extents list for the inode, for the extent containing bno.
155
* If bno lies in a hole, point to the next entry. If bno lies past eof,
156
* *eofp will be set, and *prevp will contain the last entry (null if none).
157
* Else, *lastxp will be set to the index of the found
158
* entry; *gotp will contain the entry.
160
STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
161
xfs_bmap_search_extents(
162
xfs_inode_t *ip, /* incore inode pointer */
163
xfs_fileoff_t bno, /* block number searched for */
164
int whichfork, /* data or attr fork */
165
int *eofp, /* out: end of file found */
166
xfs_extnum_t *lastxp, /* out: last extent index */
167
xfs_bmbt_irec_t *gotp, /* out: extent entry found */
168
xfs_bmbt_irec_t *prevp); /* out: previous extent entry found */
171
* Compute the worst-case number of indirect blocks that will be used
172
* for ip's delayed extent of length "len".
175
xfs_bmap_worst_indlen(
176
xfs_inode_t *ip, /* incore inode pointer */
177
xfs_filblks_t len); /* delayed extent length */
181
* Perform various validation checks on the values being returned
185
xfs_bmap_validate_ret(
189
xfs_bmbt_irec_t *mval,
193
#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
201
xfs_fsblock_t blockno,
206
xfs_bmap_count_leaves(
213
xfs_bmap_disk_count_leaves(
214
struct xfs_mount *mp,
215
struct xfs_btree_block *block,
220
* Bmap internal routines.
223
STATIC int /* error */
225
struct xfs_btree_cur *cur,
229
int *stat) /* success/failure */
231
cur->bc_rec.b.br_startoff = off;
232
cur->bc_rec.b.br_startblock = bno;
233
cur->bc_rec.b.br_blockcount = len;
234
return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
237
STATIC int /* error */
239
struct xfs_btree_cur *cur,
243
int *stat) /* success/failure */
245
cur->bc_rec.b.br_startoff = off;
246
cur->bc_rec.b.br_startblock = bno;
247
cur->bc_rec.b.br_blockcount = len;
248
return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
252
* Update the record referred to by cur to the value given
253
* by [off, bno, len, state].
254
* This either works (return 0) or gets an EFSCORRUPTED error.
258
struct xfs_btree_cur *cur,
264
union xfs_btree_rec rec;
266
xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
267
return xfs_btree_update(cur, &rec);
271
* Called from xfs_bmap_add_attrfork to handle btree format files.
273
STATIC int /* error */
274
xfs_bmap_add_attrfork_btree(
275
xfs_trans_t *tp, /* transaction pointer */
276
xfs_inode_t *ip, /* incore inode pointer */
277
xfs_fsblock_t *firstblock, /* first block allocated */
278
xfs_bmap_free_t *flist, /* blocks to free at commit */
279
int *flags) /* inode logging flags */
281
xfs_btree_cur_t *cur; /* btree cursor */
282
int error; /* error return value */
283
xfs_mount_t *mp; /* file system mount struct */
284
int stat; /* newroot status */
287
if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
288
*flags |= XFS_ILOG_DBROOT;
290
cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
291
cur->bc_private.b.flist = flist;
292
cur->bc_private.b.firstblock = *firstblock;
293
if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
295
/* must be at least one entry */
296
XFS_WANT_CORRUPTED_GOTO(stat == 1, error0);
297
if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
300
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
301
return XFS_ERROR(ENOSPC);
303
*firstblock = cur->bc_private.b.firstblock;
304
cur->bc_private.b.allocated = 0;
305
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
309
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
314
* Called from xfs_bmap_add_attrfork to handle extents format files.
316
STATIC int /* error */
317
xfs_bmap_add_attrfork_extents(
318
xfs_trans_t *tp, /* transaction pointer */
319
xfs_inode_t *ip, /* incore inode pointer */
320
xfs_fsblock_t *firstblock, /* first block allocated */
321
xfs_bmap_free_t *flist, /* blocks to free at commit */
322
int *flags) /* inode logging flags */
324
xfs_btree_cur_t *cur; /* bmap btree cursor */
325
int error; /* error return value */
327
if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
330
error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
331
flags, XFS_DATA_FORK);
333
cur->bc_private.b.allocated = 0;
334
xfs_btree_del_cursor(cur,
335
error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
341
* Called from xfs_bmap_add_attrfork to handle local format files.
343
STATIC int /* error */
344
xfs_bmap_add_attrfork_local(
345
xfs_trans_t *tp, /* transaction pointer */
346
xfs_inode_t *ip, /* incore inode pointer */
347
xfs_fsblock_t *firstblock, /* first block allocated */
348
xfs_bmap_free_t *flist, /* blocks to free at commit */
349
int *flags) /* inode logging flags */
351
xfs_da_args_t dargs; /* args for dir/attr code */
352
int error; /* error return value */
353
xfs_mount_t *mp; /* mount structure pointer */
355
if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
357
if (S_ISDIR(ip->i_d.di_mode)) {
359
memset(&dargs, 0, sizeof(dargs));
361
dargs.firstblock = firstblock;
363
dargs.total = mp->m_dirblkfsbs;
364
dargs.whichfork = XFS_DATA_FORK;
366
error = xfs_dir2_sf_to_block(&dargs);
368
error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
374
* Convert a delayed allocation to a real allocation.
376
STATIC int /* error */
377
xfs_bmap_add_extent_delay_real(
378
struct xfs_bmalloca *bma)
380
struct xfs_bmbt_irec *new = &bma->got;
381
int diff; /* temp value */
382
xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
383
int error; /* error return value */
384
int i; /* temp state */
385
xfs_ifork_t *ifp; /* inode fork pointer */
386
xfs_fileoff_t new_endoff; /* end offset of new entry */
387
xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
388
/* left is 0, right is 1, prev is 2 */
389
int rval=0; /* return value (logging flags) */
390
int state = 0;/* state bits, accessed thru macros */
391
xfs_filblks_t da_new; /* new count del alloc blocks used */
392
xfs_filblks_t da_old; /* old count del alloc blocks used */
393
xfs_filblks_t temp=0; /* value for da_new calculations */
394
xfs_filblks_t temp2=0;/* value for da_new calculations */
395
int tmp_rval; /* partial logging flags */
397
ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK);
399
ASSERT(bma->idx >= 0);
400
ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
401
ASSERT(!isnullstartblock(new->br_startblock));
403
(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
405
XFS_STATS_INC(xs_add_exlist);
412
* Set up a bunch of variables to make the tests simpler.
414
ep = xfs_iext_get_ext(ifp, bma->idx);
415
xfs_bmbt_get_all(ep, &PREV);
416
new_endoff = new->br_startoff + new->br_blockcount;
417
ASSERT(PREV.br_startoff <= new->br_startoff);
418
ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
420
da_old = startblockval(PREV.br_startblock);
424
* Set flags determining what part of the previous delayed allocation
425
* extent is being replaced by a real allocation.
427
if (PREV.br_startoff == new->br_startoff)
428
state |= BMAP_LEFT_FILLING;
429
if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
430
state |= BMAP_RIGHT_FILLING;
433
* Check and set flags if this segment has a left neighbor.
434
* Don't set contiguous if the combined extent would be too large.
437
state |= BMAP_LEFT_VALID;
438
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
440
if (isnullstartblock(LEFT.br_startblock))
441
state |= BMAP_LEFT_DELAY;
444
if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
445
LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
446
LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
447
LEFT.br_state == new->br_state &&
448
LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
449
state |= BMAP_LEFT_CONTIG;
452
* Check and set flags if this segment has a right neighbor.
453
* Don't set contiguous if the combined extent would be too large.
454
* Also check for all-three-contiguous being too large.
456
if (bma->idx < bma->ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
457
state |= BMAP_RIGHT_VALID;
458
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
460
if (isnullstartblock(RIGHT.br_startblock))
461
state |= BMAP_RIGHT_DELAY;
464
if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
465
new_endoff == RIGHT.br_startoff &&
466
new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
467
new->br_state == RIGHT.br_state &&
468
new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
469
((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
470
BMAP_RIGHT_FILLING)) !=
471
(BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
472
BMAP_RIGHT_FILLING) ||
473
LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
475
state |= BMAP_RIGHT_CONTIG;
479
* Switch out based on the FILLING and CONTIG state bits.
481
switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
482
BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
483
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
484
BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
486
* Filling in all of a previously delayed allocation extent.
487
* The left and right neighbors are both contiguous with new.
490
trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
491
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
492
LEFT.br_blockcount + PREV.br_blockcount +
493
RIGHT.br_blockcount);
494
trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
496
xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
497
bma->ip->i_d.di_nextents--;
498
if (bma->cur == NULL)
499
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
501
rval = XFS_ILOG_CORE;
502
error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
504
RIGHT.br_blockcount, &i);
507
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
508
error = xfs_btree_delete(bma->cur, &i);
511
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
512
error = xfs_btree_decrement(bma->cur, 0, &i);
515
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
516
error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
520
RIGHT.br_blockcount, LEFT.br_state);
526
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
528
* Filling in all of a previously delayed allocation extent.
529
* The left neighbor is contiguous, the right is not.
533
trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
534
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
535
LEFT.br_blockcount + PREV.br_blockcount);
536
trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
538
xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
539
if (bma->cur == NULL)
540
rval = XFS_ILOG_DEXT;
543
error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
544
LEFT.br_startblock, LEFT.br_blockcount,
548
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
549
error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
552
PREV.br_blockcount, LEFT.br_state);
558
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
560
* Filling in all of a previously delayed allocation extent.
561
* The right neighbor is contiguous, the left is not.
563
trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
564
xfs_bmbt_set_startblock(ep, new->br_startblock);
565
xfs_bmbt_set_blockcount(ep,
566
PREV.br_blockcount + RIGHT.br_blockcount);
567
trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
569
xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
570
if (bma->cur == NULL)
571
rval = XFS_ILOG_DEXT;
574
error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
576
RIGHT.br_blockcount, &i);
579
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
580
error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
583
RIGHT.br_blockcount, PREV.br_state);
589
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
591
* Filling in all of a previously delayed allocation extent.
592
* Neither the left nor right neighbors are contiguous with
595
trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
596
xfs_bmbt_set_startblock(ep, new->br_startblock);
597
trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
599
bma->ip->i_d.di_nextents++;
600
if (bma->cur == NULL)
601
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
603
rval = XFS_ILOG_CORE;
604
error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
605
new->br_startblock, new->br_blockcount,
609
XFS_WANT_CORRUPTED_GOTO(i == 0, done);
610
bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
611
error = xfs_btree_insert(bma->cur, &i);
614
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
618
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
620
* Filling in the first part of a previous delayed allocation.
621
* The left neighbor is contiguous.
623
trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
624
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
625
LEFT.br_blockcount + new->br_blockcount);
626
xfs_bmbt_set_startoff(ep,
627
PREV.br_startoff + new->br_blockcount);
628
trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
630
temp = PREV.br_blockcount - new->br_blockcount;
631
trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
632
xfs_bmbt_set_blockcount(ep, temp);
633
if (bma->cur == NULL)
634
rval = XFS_ILOG_DEXT;
637
error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
638
LEFT.br_startblock, LEFT.br_blockcount,
642
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
643
error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
651
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
652
startblockval(PREV.br_startblock));
653
xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
654
trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
659
case BMAP_LEFT_FILLING:
661
* Filling in the first part of a previous delayed allocation.
662
* The left neighbor is not contiguous.
664
trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
665
xfs_bmbt_set_startoff(ep, new_endoff);
666
temp = PREV.br_blockcount - new->br_blockcount;
667
xfs_bmbt_set_blockcount(ep, temp);
668
xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
669
bma->ip->i_d.di_nextents++;
670
if (bma->cur == NULL)
671
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
673
rval = XFS_ILOG_CORE;
674
error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
675
new->br_startblock, new->br_blockcount,
679
XFS_WANT_CORRUPTED_GOTO(i == 0, done);
680
bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
681
error = xfs_btree_insert(bma->cur, &i);
684
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
686
if (bma->ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
687
bma->ip->i_d.di_nextents > bma->ip->i_df.if_ext_max) {
688
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
689
bma->firstblock, bma->flist,
690
&bma->cur, 1, &tmp_rval, XFS_DATA_FORK);
695
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
696
startblockval(PREV.br_startblock) -
697
(bma->cur ? bma->cur->bc_private.b.allocated : 0));
698
ep = xfs_iext_get_ext(ifp, bma->idx + 1);
699
xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
700
trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
703
case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
705
* Filling in the last part of a previous delayed allocation.
706
* The right neighbor is contiguous with the new allocation.
708
temp = PREV.br_blockcount - new->br_blockcount;
709
trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
710
xfs_bmbt_set_blockcount(ep, temp);
711
xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
712
new->br_startoff, new->br_startblock,
713
new->br_blockcount + RIGHT.br_blockcount,
715
trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
716
if (bma->cur == NULL)
717
rval = XFS_ILOG_DEXT;
720
error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
722
RIGHT.br_blockcount, &i);
725
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
726
error = xfs_bmbt_update(bma->cur, new->br_startoff,
735
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
736
startblockval(PREV.br_startblock));
737
trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
738
xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
739
trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
744
case BMAP_RIGHT_FILLING:
746
* Filling in the last part of a previous delayed allocation.
747
* The right neighbor is not contiguous.
749
temp = PREV.br_blockcount - new->br_blockcount;
750
trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
751
xfs_bmbt_set_blockcount(ep, temp);
752
xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
753
bma->ip->i_d.di_nextents++;
754
if (bma->cur == NULL)
755
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
757
rval = XFS_ILOG_CORE;
758
error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
759
new->br_startblock, new->br_blockcount,
763
XFS_WANT_CORRUPTED_GOTO(i == 0, done);
764
bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
765
error = xfs_btree_insert(bma->cur, &i);
768
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
770
if (bma->ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
771
bma->ip->i_d.di_nextents > bma->ip->i_df.if_ext_max) {
772
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
773
bma->firstblock, bma->flist, &bma->cur, 1,
774
&tmp_rval, XFS_DATA_FORK);
779
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
780
startblockval(PREV.br_startblock) -
781
(bma->cur ? bma->cur->bc_private.b.allocated : 0));
782
ep = xfs_iext_get_ext(ifp, bma->idx);
783
xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
784
trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
791
* Filling in the middle part of a previous delayed allocation.
792
* Contiguity is impossible here.
793
* This case is avoided almost all the time.
795
* We start with a delayed allocation:
797
* +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
800
* and we are allocating:
801
* +rrrrrrrrrrrrrrrrr+
804
* and we set it up for insertion as:
805
* +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
807
* PREV @ idx LEFT RIGHT
808
* inserted at idx + 1
810
temp = new->br_startoff - PREV.br_startoff;
811
temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
812
trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
813
xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
815
RIGHT.br_state = PREV.br_state;
816
RIGHT.br_startblock = nullstartblock(
817
(int)xfs_bmap_worst_indlen(bma->ip, temp2));
818
RIGHT.br_startoff = new_endoff;
819
RIGHT.br_blockcount = temp2;
820
/* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
821
xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
822
bma->ip->i_d.di_nextents++;
823
if (bma->cur == NULL)
824
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
826
rval = XFS_ILOG_CORE;
827
error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
828
new->br_startblock, new->br_blockcount,
832
XFS_WANT_CORRUPTED_GOTO(i == 0, done);
833
bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
834
error = xfs_btree_insert(bma->cur, &i);
837
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
839
if (bma->ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
840
bma->ip->i_d.di_nextents > bma->ip->i_df.if_ext_max) {
841
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
842
bma->firstblock, bma->flist, &bma->cur,
843
1, &tmp_rval, XFS_DATA_FORK);
848
temp = xfs_bmap_worst_indlen(bma->ip, temp);
849
temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
850
diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
851
(bma->cur ? bma->cur->bc_private.b.allocated : 0));
853
error = xfs_icsb_modify_counters(bma->ip->i_mount,
855
-((int64_t)diff), 0);
861
ep = xfs_iext_get_ext(ifp, bma->idx);
862
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
863
trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
864
trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
865
xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
866
nullstartblock((int)temp2));
867
trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
870
da_new = temp + temp2;
873
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
874
case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
875
case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
876
case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
877
case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
878
case BMAP_LEFT_CONTIG:
879
case BMAP_RIGHT_CONTIG:
881
* These cases are all impossible.
886
/* convert to a btree if necessary */
887
if (XFS_IFORK_FORMAT(bma->ip, XFS_DATA_FORK) == XFS_DINODE_FMT_EXTENTS &&
888
XFS_IFORK_NEXTENTS(bma->ip, XFS_DATA_FORK) > ifp->if_ext_max) {
889
int tmp_logflags; /* partial log flag return val */
891
ASSERT(bma->cur == NULL);
892
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
893
bma->firstblock, bma->flist, &bma->cur,
894
da_old > 0, &tmp_logflags, XFS_DATA_FORK);
895
bma->logflags |= tmp_logflags;
900
/* adjust for changes in reserved delayed indirect blocks */
901
if (da_old || da_new) {
904
temp += bma->cur->bc_private.b.allocated;
905
ASSERT(temp <= da_old);
907
xfs_icsb_modify_counters(bma->ip->i_mount,
909
(int64_t)(da_old - temp), 0);
912
/* clear out the allocated field, done with it now in any case. */
914
bma->cur->bc_private.b.allocated = 0;
916
xfs_bmap_check_leaf_extents(bma->cur, bma->ip, XFS_DATA_FORK);
918
bma->logflags |= rval;
926
* Convert an unwritten allocation to a real allocation or vice versa.
928
STATIC int /* error */
929
xfs_bmap_add_extent_unwritten_real(
930
struct xfs_trans *tp,
931
xfs_inode_t *ip, /* incore inode pointer */
932
xfs_extnum_t *idx, /* extent number to update/insert */
933
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
934
xfs_bmbt_irec_t *new, /* new data to add to file extents */
935
xfs_fsblock_t *first, /* pointer to firstblock variable */
936
xfs_bmap_free_t *flist, /* list of extents to be freed */
937
int *logflagsp) /* inode logging flags */
939
xfs_btree_cur_t *cur; /* btree cursor */
940
xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
941
int error; /* error return value */
942
int i; /* temp state */
943
xfs_ifork_t *ifp; /* inode fork pointer */
944
xfs_fileoff_t new_endoff; /* end offset of new entry */
945
xfs_exntst_t newext; /* new extent state */
946
xfs_exntst_t oldext; /* old extent state */
947
xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
948
/* left is 0, right is 1, prev is 2 */
949
int rval=0; /* return value (logging flags) */
950
int state = 0;/* state bits, accessed thru macros */
955
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
958
ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
959
ASSERT(!isnullstartblock(new->br_startblock));
961
XFS_STATS_INC(xs_add_exlist);
968
* Set up a bunch of variables to make the tests simpler.
971
ep = xfs_iext_get_ext(ifp, *idx);
972
xfs_bmbt_get_all(ep, &PREV);
973
newext = new->br_state;
974
oldext = (newext == XFS_EXT_UNWRITTEN) ?
975
XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
976
ASSERT(PREV.br_state == oldext);
977
new_endoff = new->br_startoff + new->br_blockcount;
978
ASSERT(PREV.br_startoff <= new->br_startoff);
979
ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
982
* Set flags determining what part of the previous oldext allocation
983
* extent is being replaced by a newext allocation.
985
if (PREV.br_startoff == new->br_startoff)
986
state |= BMAP_LEFT_FILLING;
987
if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
988
state |= BMAP_RIGHT_FILLING;
991
* Check and set flags if this segment has a left neighbor.
992
* Don't set contiguous if the combined extent would be too large.
995
state |= BMAP_LEFT_VALID;
996
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
998
if (isnullstartblock(LEFT.br_startblock))
999
state |= BMAP_LEFT_DELAY;
1002
if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1003
LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1004
LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1005
LEFT.br_state == newext &&
1006
LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1007
state |= BMAP_LEFT_CONTIG;
1010
* Check and set flags if this segment has a right neighbor.
1011
* Don't set contiguous if the combined extent would be too large.
1012
* Also check for all-three-contiguous being too large.
1014
if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1015
state |= BMAP_RIGHT_VALID;
1016
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
1017
if (isnullstartblock(RIGHT.br_startblock))
1018
state |= BMAP_RIGHT_DELAY;
1021
if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1022
new_endoff == RIGHT.br_startoff &&
1023
new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1024
newext == RIGHT.br_state &&
1025
new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1026
((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1027
BMAP_RIGHT_FILLING)) !=
1028
(BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1029
BMAP_RIGHT_FILLING) ||
1030
LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1032
state |= BMAP_RIGHT_CONTIG;
1035
* Switch out based on the FILLING and CONTIG state bits.
1037
switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1038
BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1039
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1040
BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1042
* Setting all of a previous oldext extent to newext.
1043
* The left and right neighbors are both contiguous with new.
1047
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1048
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1049
LEFT.br_blockcount + PREV.br_blockcount +
1050
RIGHT.br_blockcount);
1051
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1053
xfs_iext_remove(ip, *idx + 1, 2, state);
1054
ip->i_d.di_nextents -= 2;
1056
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1058
rval = XFS_ILOG_CORE;
1059
if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1060
RIGHT.br_startblock,
1061
RIGHT.br_blockcount, &i)))
1063
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1064
if ((error = xfs_btree_delete(cur, &i)))
1066
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1067
if ((error = xfs_btree_decrement(cur, 0, &i)))
1069
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1070
if ((error = xfs_btree_delete(cur, &i)))
1072
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1073
if ((error = xfs_btree_decrement(cur, 0, &i)))
1075
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1076
if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1078
LEFT.br_blockcount + PREV.br_blockcount +
1079
RIGHT.br_blockcount, LEFT.br_state)))
1084
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1086
* Setting all of a previous oldext extent to newext.
1087
* The left neighbor is contiguous, the right is not.
1091
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1092
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1093
LEFT.br_blockcount + PREV.br_blockcount);
1094
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1096
xfs_iext_remove(ip, *idx + 1, 1, state);
1097
ip->i_d.di_nextents--;
1099
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1101
rval = XFS_ILOG_CORE;
1102
if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1103
PREV.br_startblock, PREV.br_blockcount,
1106
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1107
if ((error = xfs_btree_delete(cur, &i)))
1109
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1110
if ((error = xfs_btree_decrement(cur, 0, &i)))
1112
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1113
if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1115
LEFT.br_blockcount + PREV.br_blockcount,
1121
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1123
* Setting all of a previous oldext extent to newext.
1124
* The right neighbor is contiguous, the left is not.
1126
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1127
xfs_bmbt_set_blockcount(ep,
1128
PREV.br_blockcount + RIGHT.br_blockcount);
1129
xfs_bmbt_set_state(ep, newext);
1130
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1131
xfs_iext_remove(ip, *idx + 1, 1, state);
1132
ip->i_d.di_nextents--;
1134
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1136
rval = XFS_ILOG_CORE;
1137
if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1138
RIGHT.br_startblock,
1139
RIGHT.br_blockcount, &i)))
1141
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1142
if ((error = xfs_btree_delete(cur, &i)))
1144
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1145
if ((error = xfs_btree_decrement(cur, 0, &i)))
1147
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1148
if ((error = xfs_bmbt_update(cur, new->br_startoff,
1150
new->br_blockcount + RIGHT.br_blockcount,
1156
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1158
* Setting all of a previous oldext extent to newext.
1159
* Neither the left nor right neighbors are contiguous with
1162
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1163
xfs_bmbt_set_state(ep, newext);
1164
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1167
rval = XFS_ILOG_DEXT;
1170
if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1171
new->br_startblock, new->br_blockcount,
1174
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1175
if ((error = xfs_bmbt_update(cur, new->br_startoff,
1176
new->br_startblock, new->br_blockcount,
1182
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1184
* Setting the first part of a previous oldext extent to newext.
1185
* The left neighbor is contiguous.
1187
trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
1188
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
1189
LEFT.br_blockcount + new->br_blockcount);
1190
xfs_bmbt_set_startoff(ep,
1191
PREV.br_startoff + new->br_blockcount);
1192
trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
1194
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1195
xfs_bmbt_set_startblock(ep,
1196
new->br_startblock + new->br_blockcount);
1197
xfs_bmbt_set_blockcount(ep,
1198
PREV.br_blockcount - new->br_blockcount);
1199
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1204
rval = XFS_ILOG_DEXT;
1207
if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1208
PREV.br_startblock, PREV.br_blockcount,
1211
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1212
if ((error = xfs_bmbt_update(cur,
1213
PREV.br_startoff + new->br_blockcount,
1214
PREV.br_startblock + new->br_blockcount,
1215
PREV.br_blockcount - new->br_blockcount,
1218
if ((error = xfs_btree_decrement(cur, 0, &i)))
1220
error = xfs_bmbt_update(cur, LEFT.br_startoff,
1222
LEFT.br_blockcount + new->br_blockcount,
1229
case BMAP_LEFT_FILLING:
1231
* Setting the first part of a previous oldext extent to newext.
1232
* The left neighbor is not contiguous.
1234
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1235
ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1236
xfs_bmbt_set_startoff(ep, new_endoff);
1237
xfs_bmbt_set_blockcount(ep,
1238
PREV.br_blockcount - new->br_blockcount);
1239
xfs_bmbt_set_startblock(ep,
1240
new->br_startblock + new->br_blockcount);
1241
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1243
xfs_iext_insert(ip, *idx, 1, new, state);
1244
ip->i_d.di_nextents++;
1246
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1248
rval = XFS_ILOG_CORE;
1249
if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1250
PREV.br_startblock, PREV.br_blockcount,
1253
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1254
if ((error = xfs_bmbt_update(cur,
1255
PREV.br_startoff + new->br_blockcount,
1256
PREV.br_startblock + new->br_blockcount,
1257
PREV.br_blockcount - new->br_blockcount,
1260
cur->bc_rec.b = *new;
1261
if ((error = xfs_btree_insert(cur, &i)))
1263
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1267
case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1269
* Setting the last part of a previous oldext extent to newext.
1270
* The right neighbor is contiguous with the new allocation.
1272
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1273
xfs_bmbt_set_blockcount(ep,
1274
PREV.br_blockcount - new->br_blockcount);
1275
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1279
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1280
xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
1281
new->br_startoff, new->br_startblock,
1282
new->br_blockcount + RIGHT.br_blockcount, newext);
1283
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1286
rval = XFS_ILOG_DEXT;
1289
if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1291
PREV.br_blockcount, &i)))
1293
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1294
if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1296
PREV.br_blockcount - new->br_blockcount,
1299
if ((error = xfs_btree_increment(cur, 0, &i)))
1301
if ((error = xfs_bmbt_update(cur, new->br_startoff,
1303
new->br_blockcount + RIGHT.br_blockcount,
1309
case BMAP_RIGHT_FILLING:
1311
* Setting the last part of a previous oldext extent to newext.
1312
* The right neighbor is not contiguous.
1314
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1315
xfs_bmbt_set_blockcount(ep,
1316
PREV.br_blockcount - new->br_blockcount);
1317
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1320
xfs_iext_insert(ip, *idx, 1, new, state);
1322
ip->i_d.di_nextents++;
1324
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1326
rval = XFS_ILOG_CORE;
1327
if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1328
PREV.br_startblock, PREV.br_blockcount,
1331
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1332
if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1334
PREV.br_blockcount - new->br_blockcount,
1337
if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1338
new->br_startblock, new->br_blockcount,
1341
XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1342
cur->bc_rec.b.br_state = XFS_EXT_NORM;
1343
if ((error = xfs_btree_insert(cur, &i)))
1345
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1351
* Setting the middle part of a previous oldext extent to
1352
* newext. Contiguity is impossible here.
1353
* One extent becomes three extents.
1355
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1356
xfs_bmbt_set_blockcount(ep,
1357
new->br_startoff - PREV.br_startoff);
1358
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1361
r[1].br_startoff = new_endoff;
1362
r[1].br_blockcount =
1363
PREV.br_startoff + PREV.br_blockcount - new_endoff;
1364
r[1].br_startblock = new->br_startblock + new->br_blockcount;
1365
r[1].br_state = oldext;
1368
xfs_iext_insert(ip, *idx, 2, &r[0], state);
1370
ip->i_d.di_nextents += 2;
1372
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1374
rval = XFS_ILOG_CORE;
1375
if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1376
PREV.br_startblock, PREV.br_blockcount,
1379
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1380
/* new right extent - oldext */
1381
if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
1382
r[1].br_startblock, r[1].br_blockcount,
1385
/* new left extent - oldext */
1386
cur->bc_rec.b = PREV;
1387
cur->bc_rec.b.br_blockcount =
1388
new->br_startoff - PREV.br_startoff;
1389
if ((error = xfs_btree_insert(cur, &i)))
1391
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1393
* Reset the cursor to the position of the new extent
1394
* we are about to insert as we can't trust it after
1395
* the previous insert.
1397
if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1398
new->br_startblock, new->br_blockcount,
1401
XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1402
/* new middle extent - newext */
1403
cur->bc_rec.b.br_state = new->br_state;
1404
if ((error = xfs_btree_insert(cur, &i)))
1406
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1410
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1411
case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1412
case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1413
case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1414
case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1415
case BMAP_LEFT_CONTIG:
1416
case BMAP_RIGHT_CONTIG:
1418
* These cases are all impossible.
1423
/* convert to a btree if necessary */
1424
if (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) == XFS_DINODE_FMT_EXTENTS &&
1425
XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > ifp->if_ext_max) {
1426
int tmp_logflags; /* partial log flag return val */
1428
ASSERT(cur == NULL);
1429
error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur,
1430
0, &tmp_logflags, XFS_DATA_FORK);
1431
*logflagsp |= tmp_logflags;
1436
/* clear out the allocated field, done with it now in any case. */
1438
cur->bc_private.b.allocated = 0;
1442
xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK);
1452
* Convert a hole to a delayed allocation.
1455
xfs_bmap_add_extent_hole_delay(
1456
xfs_inode_t *ip, /* incore inode pointer */
1457
xfs_extnum_t *idx, /* extent number to update/insert */
1458
xfs_bmbt_irec_t *new) /* new data to add to file extents */
1460
xfs_ifork_t *ifp; /* inode fork pointer */
1461
xfs_bmbt_irec_t left; /* left neighbor extent entry */
1462
xfs_filblks_t newlen=0; /* new indirect size */
1463
xfs_filblks_t oldlen=0; /* old indirect size */
1464
xfs_bmbt_irec_t right; /* right neighbor extent entry */
1465
int state; /* state bits, accessed thru macros */
1466
xfs_filblks_t temp=0; /* temp for indirect calculations */
1468
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1470
ASSERT(isnullstartblock(new->br_startblock));
1473
* Check and set flags if this segment has a left neighbor
1476
state |= BMAP_LEFT_VALID;
1477
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
1479
if (isnullstartblock(left.br_startblock))
1480
state |= BMAP_LEFT_DELAY;
1484
* Check and set flags if the current (right) segment exists.
1485
* If it doesn't exist, we're converting the hole at end-of-file.
1487
if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1488
state |= BMAP_RIGHT_VALID;
1489
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
1491
if (isnullstartblock(right.br_startblock))
1492
state |= BMAP_RIGHT_DELAY;
1496
* Set contiguity flags on the left and right neighbors.
1497
* Don't let extents get too large, even if the pieces are contiguous.
1499
if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
1500
left.br_startoff + left.br_blockcount == new->br_startoff &&
1501
left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1502
state |= BMAP_LEFT_CONTIG;
1504
if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
1505
new->br_startoff + new->br_blockcount == right.br_startoff &&
1506
new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1507
(!(state & BMAP_LEFT_CONTIG) ||
1508
(left.br_blockcount + new->br_blockcount +
1509
right.br_blockcount <= MAXEXTLEN)))
1510
state |= BMAP_RIGHT_CONTIG;
1513
* Switch out based on the contiguity flags.
1515
switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1516
case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1518
* New allocation is contiguous with delayed allocations
1519
* on the left and on the right.
1520
* Merge all three into a single extent record.
1523
temp = left.br_blockcount + new->br_blockcount +
1524
right.br_blockcount;
1526
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1527
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
1528
oldlen = startblockval(left.br_startblock) +
1529
startblockval(new->br_startblock) +
1530
startblockval(right.br_startblock);
1531
newlen = xfs_bmap_worst_indlen(ip, temp);
1532
xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
1533
nullstartblock((int)newlen));
1534
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1536
xfs_iext_remove(ip, *idx + 1, 1, state);
1539
case BMAP_LEFT_CONTIG:
1541
* New allocation is contiguous with a delayed allocation
1543
* Merge the new allocation with the left neighbor.
1546
temp = left.br_blockcount + new->br_blockcount;
1548
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1549
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
1550
oldlen = startblockval(left.br_startblock) +
1551
startblockval(new->br_startblock);
1552
newlen = xfs_bmap_worst_indlen(ip, temp);
1553
xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
1554
nullstartblock((int)newlen));
1555
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1558
case BMAP_RIGHT_CONTIG:
1560
* New allocation is contiguous with a delayed allocation
1562
* Merge the new allocation with the right neighbor.
1564
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1565
temp = new->br_blockcount + right.br_blockcount;
1566
oldlen = startblockval(new->br_startblock) +
1567
startblockval(right.br_startblock);
1568
newlen = xfs_bmap_worst_indlen(ip, temp);
1569
xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
1571
nullstartblock((int)newlen), temp, right.br_state);
1572
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1577
* New allocation is not contiguous with another
1578
* delayed allocation.
1579
* Insert a new entry.
1581
oldlen = newlen = 0;
1582
xfs_iext_insert(ip, *idx, 1, new, state);
1585
if (oldlen != newlen) {
1586
ASSERT(oldlen > newlen);
1587
xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
1588
(int64_t)(oldlen - newlen), 0);
1590
* Nothing to do for disk quota accounting here.
1596
* Convert a hole to a real allocation.
1598
STATIC int /* error */
1599
xfs_bmap_add_extent_hole_real(
1600
struct xfs_bmalloca *bma,
1603
struct xfs_bmbt_irec *new = &bma->got;
1604
int error; /* error return value */
1605
int i; /* temp state */
1606
xfs_ifork_t *ifp; /* inode fork pointer */
1607
xfs_bmbt_irec_t left; /* left neighbor extent entry */
1608
xfs_bmbt_irec_t right; /* right neighbor extent entry */
1609
int rval=0; /* return value (logging flags) */
1610
int state; /* state bits, accessed thru macros */
1612
ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1614
ASSERT(bma->idx >= 0);
1615
ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
1616
ASSERT(!isnullstartblock(new->br_startblock));
1618
!(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1620
XFS_STATS_INC(xs_add_exlist);
1623
if (whichfork == XFS_ATTR_FORK)
1624
state |= BMAP_ATTRFORK;
1627
* Check and set flags if this segment has a left neighbor.
1630
state |= BMAP_LEFT_VALID;
1631
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left);
1632
if (isnullstartblock(left.br_startblock))
1633
state |= BMAP_LEFT_DELAY;
1637
* Check and set flags if this segment has a current value.
1638
* Not true if we're inserting into the "hole" at eof.
1640
if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1641
state |= BMAP_RIGHT_VALID;
1642
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
1643
if (isnullstartblock(right.br_startblock))
1644
state |= BMAP_RIGHT_DELAY;
1648
* We're inserting a real allocation between "left" and "right".
1649
* Set the contiguity flags. Don't let extents get too large.
1651
if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1652
left.br_startoff + left.br_blockcount == new->br_startoff &&
1653
left.br_startblock + left.br_blockcount == new->br_startblock &&
1654
left.br_state == new->br_state &&
1655
left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1656
state |= BMAP_LEFT_CONTIG;
1658
if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1659
new->br_startoff + new->br_blockcount == right.br_startoff &&
1660
new->br_startblock + new->br_blockcount == right.br_startblock &&
1661
new->br_state == right.br_state &&
1662
new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1663
(!(state & BMAP_LEFT_CONTIG) ||
1664
left.br_blockcount + new->br_blockcount +
1665
right.br_blockcount <= MAXEXTLEN))
1666
state |= BMAP_RIGHT_CONTIG;
1670
* Select which case we're in here, and implement it.
1672
switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1673
case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1675
* New allocation is contiguous with real allocations on the
1676
* left and on the right.
1677
* Merge all three into a single extent record.
1680
trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1681
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1682
left.br_blockcount + new->br_blockcount +
1683
right.br_blockcount);
1684
trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1686
xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1688
XFS_IFORK_NEXT_SET(bma->ip, whichfork,
1689
XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1);
1690
if (bma->cur == NULL) {
1691
rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
1693
rval = XFS_ILOG_CORE;
1694
error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff,
1695
right.br_startblock, right.br_blockcount,
1699
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1700
error = xfs_btree_delete(bma->cur, &i);
1703
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1704
error = xfs_btree_decrement(bma->cur, 0, &i);
1707
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1708
error = xfs_bmbt_update(bma->cur, left.br_startoff,
1710
left.br_blockcount +
1711
new->br_blockcount +
1712
right.br_blockcount,
1719
case BMAP_LEFT_CONTIG:
1721
* New allocation is contiguous with a real allocation
1723
* Merge the new allocation with the left neighbor.
1726
trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1727
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1728
left.br_blockcount + new->br_blockcount);
1729
trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1731
if (bma->cur == NULL) {
1732
rval = xfs_ilog_fext(whichfork);
1735
error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff,
1736
left.br_startblock, left.br_blockcount,
1740
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1741
error = xfs_bmbt_update(bma->cur, left.br_startoff,
1743
left.br_blockcount +
1751
case BMAP_RIGHT_CONTIG:
1753
* New allocation is contiguous with a real allocation
1755
* Merge the new allocation with the right neighbor.
1757
trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1758
xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx),
1759
new->br_startoff, new->br_startblock,
1760
new->br_blockcount + right.br_blockcount,
1762
trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1764
if (bma->cur == NULL) {
1765
rval = xfs_ilog_fext(whichfork);
1768
error = xfs_bmbt_lookup_eq(bma->cur,
1770
right.br_startblock,
1771
right.br_blockcount, &i);
1774
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1775
error = xfs_bmbt_update(bma->cur, new->br_startoff,
1777
new->br_blockcount +
1778
right.br_blockcount,
1787
* New allocation is not contiguous with another
1789
* Insert a new entry.
1791
xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
1792
XFS_IFORK_NEXT_SET(bma->ip, whichfork,
1793
XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1);
1794
if (bma->cur == NULL) {
1795
rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
1797
rval = XFS_ILOG_CORE;
1798
error = xfs_bmbt_lookup_eq(bma->cur,
1801
new->br_blockcount, &i);
1804
XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1805
bma->cur->bc_rec.b.br_state = new->br_state;
1806
error = xfs_btree_insert(bma->cur, &i);
1809
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1814
/* convert to a btree if necessary */
1815
if (XFS_IFORK_FORMAT(bma->ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
1816
XFS_IFORK_NEXTENTS(bma->ip, whichfork) > ifp->if_ext_max) {
1817
int tmp_logflags; /* partial log flag return val */
1819
ASSERT(bma->cur == NULL);
1820
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1821
bma->firstblock, bma->flist, &bma->cur,
1822
0, &tmp_logflags, whichfork);
1823
bma->logflags |= tmp_logflags;
1828
/* clear out the allocated field, done with it now in any case. */
1830
bma->cur->bc_private.b.allocated = 0;
1832
xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
1834
bma->logflags |= rval;
1839
* Adjust the size of the new extent based on di_extsize and rt extsize.
1842
xfs_bmap_extsize_align(
1844
xfs_bmbt_irec_t *gotp, /* next extent pointer */
1845
xfs_bmbt_irec_t *prevp, /* previous extent pointer */
1846
xfs_extlen_t extsz, /* align to this extent size */
1847
int rt, /* is this a realtime inode? */
1848
int eof, /* is extent at end-of-file? */
1849
int delay, /* creating delalloc extent? */
1850
int convert, /* overwriting unwritten extent? */
1851
xfs_fileoff_t *offp, /* in/out: aligned offset */
1852
xfs_extlen_t *lenp) /* in/out: aligned length */
1854
xfs_fileoff_t orig_off; /* original offset */
1855
xfs_extlen_t orig_alen; /* original length */
1856
xfs_fileoff_t orig_end; /* original off+len */
1857
xfs_fileoff_t nexto; /* next file offset */
1858
xfs_fileoff_t prevo; /* previous file offset */
1859
xfs_fileoff_t align_off; /* temp for offset */
1860
xfs_extlen_t align_alen; /* temp for length */
1861
xfs_extlen_t temp; /* temp for calculations */
1866
orig_off = align_off = *offp;
1867
orig_alen = align_alen = *lenp;
1868
orig_end = orig_off + orig_alen;
1871
* If this request overlaps an existing extent, then don't
1872
* attempt to perform any additional alignment.
1874
if (!delay && !eof &&
1875
(orig_off >= gotp->br_startoff) &&
1876
(orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
1881
* If the file offset is unaligned vs. the extent size
1882
* we need to align it. This will be possible unless
1883
* the file was previously written with a kernel that didn't
1884
* perform this alignment, or if a truncate shot us in the
1887
temp = do_mod(orig_off, extsz);
1893
* Same adjustment for the end of the requested area.
1895
if ((temp = (align_alen % extsz))) {
1896
align_alen += extsz - temp;
1899
* If the previous block overlaps with this proposed allocation
1900
* then move the start forward without adjusting the length.
1902
if (prevp->br_startoff != NULLFILEOFF) {
1903
if (prevp->br_startblock == HOLESTARTBLOCK)
1904
prevo = prevp->br_startoff;
1906
prevo = prevp->br_startoff + prevp->br_blockcount;
1909
if (align_off != orig_off && align_off < prevo)
1912
* If the next block overlaps with this proposed allocation
1913
* then move the start back without adjusting the length,
1914
* but not before offset 0.
1915
* This may of course make the start overlap previous block,
1916
* and if we hit the offset 0 limit then the next block
1917
* can still overlap too.
1919
if (!eof && gotp->br_startoff != NULLFILEOFF) {
1920
if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
1921
(!delay && gotp->br_startblock == DELAYSTARTBLOCK))
1922
nexto = gotp->br_startoff + gotp->br_blockcount;
1924
nexto = gotp->br_startoff;
1926
nexto = NULLFILEOFF;
1928
align_off + align_alen != orig_end &&
1929
align_off + align_alen > nexto)
1930
align_off = nexto > align_alen ? nexto - align_alen : 0;
1932
* If we're now overlapping the next or previous extent that
1933
* means we can't fit an extsz piece in this hole. Just move
1934
* the start forward to the first valid spot and set
1935
* the length so we hit the end.
1937
if (align_off != orig_off && align_off < prevo)
1939
if (align_off + align_alen != orig_end &&
1940
align_off + align_alen > nexto &&
1941
nexto != NULLFILEOFF) {
1942
ASSERT(nexto > prevo);
1943
align_alen = nexto - align_off;
1947
* If realtime, and the result isn't a multiple of the realtime
1948
* extent size we need to remove blocks until it is.
1950
if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
1952
* We're not covering the original request, or
1953
* we won't be able to once we fix the length.
1955
if (orig_off < align_off ||
1956
orig_end > align_off + align_alen ||
1957
align_alen - temp < orig_alen)
1958
return XFS_ERROR(EINVAL);
1960
* Try to fix it by moving the start up.
1962
if (align_off + temp <= orig_off) {
1967
* Try to fix it by moving the end in.
1969
else if (align_off + align_alen - temp >= orig_end)
1972
* Set the start to the minimum then trim the length.
1975
align_alen -= orig_off - align_off;
1976
align_off = orig_off;
1977
align_alen -= align_alen % mp->m_sb.sb_rextsize;
1980
* Result doesn't cover the request, fail it.
1982
if (orig_off < align_off || orig_end > align_off + align_alen)
1983
return XFS_ERROR(EINVAL);
1985
ASSERT(orig_off >= align_off);
1986
ASSERT(orig_end <= align_off + align_alen);
1990
if (!eof && gotp->br_startoff != NULLFILEOFF)
1991
ASSERT(align_off + align_alen <= gotp->br_startoff);
1992
if (prevp->br_startoff != NULLFILEOFF)
1993
ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
2001
#define XFS_ALLOC_GAP_UNITS 4
2005
xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2007
xfs_fsblock_t adjust; /* adjustment to block numbers */
2008
xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
2009
xfs_mount_t *mp; /* mount point structure */
2010
int nullfb; /* true if ap->firstblock isn't set */
2011
int rt; /* true if inode is realtime */
2013
#define ISVALID(x,y) \
2015
(x) < mp->m_sb.sb_rblocks : \
2016
XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
2017
XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
2018
XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
2020
mp = ap->ip->i_mount;
2021
nullfb = *ap->firstblock == NULLFSBLOCK;
2022
rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
2023
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
2025
* If allocating at eof, and there's a previous real block,
2026
* try to use its last block as our starting point.
2028
if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
2029
!isnullstartblock(ap->prev.br_startblock) &&
2030
ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
2031
ap->prev.br_startblock)) {
2032
ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
2034
* Adjust for the gap between prevp and us.
2036
adjust = ap->offset -
2037
(ap->prev.br_startoff + ap->prev.br_blockcount);
2039
ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
2040
ap->blkno += adjust;
2043
* If not at eof, then compare the two neighbor blocks.
2044
* Figure out whether either one gives us a good starting point,
2045
* and pick the better one.
2047
else if (!ap->eof) {
2048
xfs_fsblock_t gotbno; /* right side block number */
2049
xfs_fsblock_t gotdiff=0; /* right side difference */
2050
xfs_fsblock_t prevbno; /* left side block number */
2051
xfs_fsblock_t prevdiff=0; /* left side difference */
2054
* If there's a previous (left) block, select a requested
2055
* start block based on it.
2057
if (ap->prev.br_startoff != NULLFILEOFF &&
2058
!isnullstartblock(ap->prev.br_startblock) &&
2059
(prevbno = ap->prev.br_startblock +
2060
ap->prev.br_blockcount) &&
2061
ISVALID(prevbno, ap->prev.br_startblock)) {
2063
* Calculate gap to end of previous block.
2065
adjust = prevdiff = ap->offset -
2066
(ap->prev.br_startoff +
2067
ap->prev.br_blockcount);
2069
* Figure the startblock based on the previous block's
2070
* end and the gap size.
2072
* If the gap is large relative to the piece we're
2073
* allocating, or using it gives us an invalid block
2074
* number, then just use the end of the previous block.
2076
if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
2077
ISVALID(prevbno + prevdiff,
2078
ap->prev.br_startblock))
2083
* If the firstblock forbids it, can't use it,
2086
if (!rt && !nullfb &&
2087
XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
2088
prevbno = NULLFSBLOCK;
2091
* No previous block or can't follow it, just default.
2094
prevbno = NULLFSBLOCK;
2096
* If there's a following (right) block, select a requested
2097
* start block based on it.
2099
if (!isnullstartblock(ap->got.br_startblock)) {
2101
* Calculate gap to start of next block.
2103
adjust = gotdiff = ap->got.br_startoff - ap->offset;
2105
* Figure the startblock based on the next block's
2106
* start and the gap size.
2108
gotbno = ap->got.br_startblock;
2111
* If the gap is large relative to the piece we're
2112
* allocating, or using it gives us an invalid block
2113
* number, then just use the start of the next block
2114
* offset by our length.
2116
if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
2117
ISVALID(gotbno - gotdiff, gotbno))
2119
else if (ISVALID(gotbno - ap->length, gotbno)) {
2120
gotbno -= ap->length;
2121
gotdiff += adjust - ap->length;
2125
* If the firstblock forbids it, can't use it,
2128
if (!rt && !nullfb &&
2129
XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
2130
gotbno = NULLFSBLOCK;
2133
* No next block, just default.
2136
gotbno = NULLFSBLOCK;
2138
* If both valid, pick the better one, else the only good
2139
* one, else ap->blkno is already set (to 0 or the inode block).
2141
if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
2142
ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
2143
else if (prevbno != NULLFSBLOCK)
2144
ap->blkno = prevbno;
2145
else if (gotbno != NULLFSBLOCK)
2153
xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2155
xfs_alloctype_t atype = 0; /* type for allocation routines */
2156
int error; /* error return value */
2157
xfs_mount_t *mp; /* mount point structure */
2158
xfs_extlen_t prod = 0; /* product factor for allocators */
2159
xfs_extlen_t ralen = 0; /* realtime allocation length */
2160
xfs_extlen_t align; /* minimum allocation alignment */
2163
mp = ap->ip->i_mount;
2164
align = xfs_get_extsz_hint(ap->ip);
2165
prod = align / mp->m_sb.sb_rextsize;
2166
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
2167
align, 1, ap->eof, 0,
2168
ap->conv, &ap->offset, &ap->length);
2172
ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
2175
* If the offset & length are not perfectly aligned
2176
* then kill prod, it will just get us in trouble.
2178
if (do_mod(ap->offset, align) || ap->length % align)
2181
* Set ralen to be the actual requested length in rtextents.
2183
ralen = ap->length / mp->m_sb.sb_rextsize;
2185
* If the old value was close enough to MAXEXTLEN that
2186
* we rounded up to it, cut it back so it's valid again.
2187
* Note that if it's a really large request (bigger than
2188
* MAXEXTLEN), we don't hear about that number, and can't
2189
* adjust the starting point to match it.
2191
if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
2192
ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
2195
* Lock out other modifications to the RT bitmap inode.
2197
xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
2198
xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
2201
* If it's an allocation to an empty file at offset 0,
2202
* pick an extent that will space things out in the rt area.
2204
if (ap->eof && ap->offset == 0) {
2205
xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
2207
error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
2210
ap->blkno = rtx * mp->m_sb.sb_rextsize;
2215
xfs_bmap_adjacent(ap);
2218
* Realtime allocation, done through xfs_rtallocate_extent.
2220
atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
2221
do_div(ap->blkno, mp->m_sb.sb_rextsize);
2224
if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
2225
&ralen, atype, ap->wasdel, prod, &rtb)))
2227
if (rtb == NULLFSBLOCK && prod > 1 &&
2228
(error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
2229
ap->length, &ralen, atype,
2230
ap->wasdel, 1, &rtb)))
2233
if (ap->blkno != NULLFSBLOCK) {
2234
ap->blkno *= mp->m_sb.sb_rextsize;
2235
ralen *= mp->m_sb.sb_rextsize;
2237
ap->ip->i_d.di_nblocks += ralen;
2238
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2240
ap->ip->i_delayed_blks -= ralen;
2242
* Adjust the disk quota also. This was reserved
2245
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2246
ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
2247
XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
2255
xfs_bmap_btalloc_nullfb(
2256
struct xfs_bmalloca *ap,
2257
struct xfs_alloc_arg *args,
2260
struct xfs_mount *mp = ap->ip->i_mount;
2261
struct xfs_perag *pag;
2262
xfs_agnumber_t ag, startag;
2266
if (ap->userdata && xfs_inode_is_filestream(ap->ip))
2267
args->type = XFS_ALLOCTYPE_NEAR_BNO;
2269
args->type = XFS_ALLOCTYPE_START_BNO;
2270
args->total = ap->total;
2273
* Search for an allocation group with a single extent large enough
2274
* for the request. If one isn't found, then adjust the minimum
2275
* allocation size to the largest space found.
2277
startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
2278
if (startag == NULLAGNUMBER)
2281
pag = xfs_perag_get(mp, ag);
2282
while (*blen < args->maxlen) {
2283
if (!pag->pagf_init) {
2284
error = xfs_alloc_pagf_init(mp, args->tp, ag,
2285
XFS_ALLOC_FLAG_TRYLOCK);
2293
* See xfs_alloc_fix_freelist...
2295
if (pag->pagf_init) {
2296
xfs_extlen_t longest;
2297
longest = xfs_alloc_longest_free_extent(mp, pag);
2298
if (*blen < longest)
2303
if (xfs_inode_is_filestream(ap->ip)) {
2304
if (*blen >= args->maxlen)
2309
* If startag is an invalid AG, we've
2310
* come here once before and
2311
* xfs_filestream_new_ag picked the
2312
* best currently available.
2314
* Don't continue looping, since we
2315
* could loop forever.
2317
if (startag == NULLAGNUMBER)
2320
error = xfs_filestream_new_ag(ap, &ag);
2325
/* loop again to set 'blen'*/
2326
startag = NULLAGNUMBER;
2327
pag = xfs_perag_get(mp, ag);
2331
if (++ag == mp->m_sb.sb_agcount)
2336
pag = xfs_perag_get(mp, ag);
2341
* Since the above loop did a BUF_TRYLOCK, it is
2342
* possible that there is space for this request.
2344
if (notinit || *blen < ap->minlen)
2345
args->minlen = ap->minlen;
2347
* If the best seen length is less than the request
2348
* length, use the best as the minimum.
2350
else if (*blen < args->maxlen)
2351
args->minlen = *blen;
2353
* Otherwise we've seen an extent as big as maxlen,
2354
* use that as the minimum.
2357
args->minlen = args->maxlen;
2360
* set the failure fallback case to look in the selected
2361
* AG as the stream may have moved.
2363
if (xfs_inode_is_filestream(ap->ip))
2364
ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
2371
xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2373
xfs_mount_t *mp; /* mount point structure */
2374
xfs_alloctype_t atype = 0; /* type for allocation routines */
2375
xfs_extlen_t align; /* minimum allocation alignment */
2376
xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
2378
xfs_alloc_arg_t args;
2380
xfs_extlen_t nextminlen = 0;
2381
int nullfb; /* true if ap->firstblock isn't set */
2388
mp = ap->ip->i_mount;
2389
align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
2390
if (unlikely(align)) {
2391
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
2392
align, 0, ap->eof, 0, ap->conv,
2393
&ap->offset, &ap->length);
2397
nullfb = *ap->firstblock == NULLFSBLOCK;
2398
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
2400
if (ap->userdata && xfs_inode_is_filestream(ap->ip)) {
2401
ag = xfs_filestream_lookup_ag(ap->ip);
2402
ag = (ag != NULLAGNUMBER) ? ag : 0;
2403
ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
2405
ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2408
ap->blkno = *ap->firstblock;
2410
xfs_bmap_adjacent(ap);
2413
* If allowed, use ap->blkno; otherwise must use firstblock since
2414
* it's in the right allocation group.
2416
if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
2419
ap->blkno = *ap->firstblock;
2421
* Normal allocation, done through xfs_alloc_vextent.
2423
tryagain = isaligned = 0;
2426
args.fsbno = ap->blkno;
2428
/* Trim the allocation back to the maximum an AG can fit. */
2429
args.maxlen = MIN(ap->length, XFS_ALLOC_AG_MAX_USABLE(mp));
2430
args.firstblock = *ap->firstblock;
2433
error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
2436
} else if (ap->flist->xbf_low) {
2437
if (xfs_inode_is_filestream(ap->ip))
2438
args.type = XFS_ALLOCTYPE_FIRST_AG;
2440
args.type = XFS_ALLOCTYPE_START_BNO;
2441
args.total = args.minlen = ap->minlen;
2443
args.type = XFS_ALLOCTYPE_NEAR_BNO;
2444
args.total = ap->total;
2445
args.minlen = ap->minlen;
2447
/* apply extent size hints if obtained earlier */
2448
if (unlikely(align)) {
2450
if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
2451
args.mod = (xfs_extlen_t)(args.prod - args.mod);
2452
} else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) {
2456
args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog;
2457
if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
2458
args.mod = (xfs_extlen_t)(args.prod - args.mod);
2461
* If we are not low on available data blocks, and the
2462
* underlying logical volume manager is a stripe, and
2463
* the file offset is zero then try to allocate data
2464
* blocks on stripe unit boundary.
2465
* NOTE: ap->aeof is only set if the allocation length
2466
* is >= the stripe unit and the allocation offset is
2467
* at the end of file.
2469
if (!ap->flist->xbf_low && ap->aeof) {
2471
args.alignment = mp->m_dalign;
2475
* Adjust for alignment
2477
if (blen > args.alignment && blen <= args.maxlen)
2478
args.minlen = blen - args.alignment;
2479
args.minalignslop = 0;
2482
* First try an exact bno allocation.
2483
* If it fails then do a near or start bno
2484
* allocation with alignment turned on.
2488
args.type = XFS_ALLOCTYPE_THIS_BNO;
2491
* Compute the minlen+alignment for the
2492
* next case. Set slop so that the value
2493
* of minlen+alignment+slop doesn't go up
2494
* between the calls.
2496
if (blen > mp->m_dalign && blen <= args.maxlen)
2497
nextminlen = blen - mp->m_dalign;
2499
nextminlen = args.minlen;
2500
if (nextminlen + mp->m_dalign > args.minlen + 1)
2502
nextminlen + mp->m_dalign -
2505
args.minalignslop = 0;
2509
args.minalignslop = 0;
2511
args.minleft = ap->minleft;
2512
args.wasdel = ap->wasdel;
2514
args.userdata = ap->userdata;
2515
if ((error = xfs_alloc_vextent(&args)))
2517
if (tryagain && args.fsbno == NULLFSBLOCK) {
2519
* Exact allocation failed. Now try with alignment
2523
args.fsbno = ap->blkno;
2524
args.alignment = mp->m_dalign;
2525
args.minlen = nextminlen;
2526
args.minalignslop = 0;
2528
if ((error = xfs_alloc_vextent(&args)))
2531
if (isaligned && args.fsbno == NULLFSBLOCK) {
2533
* allocation failed, so turn off alignment and
2537
args.fsbno = ap->blkno;
2539
if ((error = xfs_alloc_vextent(&args)))
2542
if (args.fsbno == NULLFSBLOCK && nullfb &&
2543
args.minlen > ap->minlen) {
2544
args.minlen = ap->minlen;
2545
args.type = XFS_ALLOCTYPE_START_BNO;
2546
args.fsbno = ap->blkno;
2547
if ((error = xfs_alloc_vextent(&args)))
2550
if (args.fsbno == NULLFSBLOCK && nullfb) {
2552
args.type = XFS_ALLOCTYPE_FIRST_AG;
2553
args.total = ap->minlen;
2555
if ((error = xfs_alloc_vextent(&args)))
2557
ap->flist->xbf_low = 1;
2559
if (args.fsbno != NULLFSBLOCK) {
2561
* check the allocation happened at the same or higher AG than
2562
* the first block that was allocated.
2564
ASSERT(*ap->firstblock == NULLFSBLOCK ||
2565
XFS_FSB_TO_AGNO(mp, *ap->firstblock) ==
2566
XFS_FSB_TO_AGNO(mp, args.fsbno) ||
2567
(ap->flist->xbf_low &&
2568
XFS_FSB_TO_AGNO(mp, *ap->firstblock) <
2569
XFS_FSB_TO_AGNO(mp, args.fsbno)));
2571
ap->blkno = args.fsbno;
2572
if (*ap->firstblock == NULLFSBLOCK)
2573
*ap->firstblock = args.fsbno;
2574
ASSERT(nullfb || fb_agno == args.agno ||
2575
(ap->flist->xbf_low && fb_agno < args.agno));
2576
ap->length = args.len;
2577
ap->ip->i_d.di_nblocks += args.len;
2578
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2580
ap->ip->i_delayed_blks -= args.len;
2582
* Adjust the disk quota also. This was reserved
2585
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2586
ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
2587
XFS_TRANS_DQ_BCOUNT,
2590
ap->blkno = NULLFSBLOCK;
2597
* xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
2598
* It figures out where to ask the underlying allocator to put the new extent.
2602
xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2604
if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata)
2605
return xfs_bmap_rtalloc(ap);
2606
return xfs_bmap_btalloc(ap);
2610
* Transform a btree format file with only one leaf node, where the
2611
* extents list will fit in the inode, into an extents format file.
2612
* Since the file extents are already in-core, all we have to do is
2613
* give up the space for the btree root and pitch the leaf block.
2615
STATIC int /* error */
2616
xfs_bmap_btree_to_extents(
2617
xfs_trans_t *tp, /* transaction pointer */
2618
xfs_inode_t *ip, /* incore inode pointer */
2619
xfs_btree_cur_t *cur, /* btree cursor */
2620
int *logflagsp, /* inode logging flags */
2621
int whichfork) /* data or attr fork */
2624
struct xfs_btree_block *cblock;/* child btree block */
2625
xfs_fsblock_t cbno; /* child block number */
2626
xfs_buf_t *cbp; /* child block's buffer */
2627
int error; /* error return value */
2628
xfs_ifork_t *ifp; /* inode fork data */
2629
xfs_mount_t *mp; /* mount point structure */
2630
__be64 *pp; /* ptr to block address */
2631
struct xfs_btree_block *rblock;/* root btree block */
2634
ifp = XFS_IFORK_PTR(ip, whichfork);
2635
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
2636
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
2637
rblock = ifp->if_broot;
2638
ASSERT(be16_to_cpu(rblock->bb_level) == 1);
2639
ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
2640
ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
2641
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
2642
cbno = be64_to_cpu(*pp);
2645
if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
2648
if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp,
2649
XFS_BMAP_BTREE_REF)))
2651
cblock = XFS_BUF_TO_BLOCK(cbp);
2652
if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
2654
xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
2655
ip->i_d.di_nblocks--;
2656
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
2657
xfs_trans_binval(tp, cbp);
2658
if (cur->bc_bufs[0] == cbp)
2659
cur->bc_bufs[0] = NULL;
2660
xfs_iroot_realloc(ip, -1, whichfork);
2661
ASSERT(ifp->if_broot == NULL);
2662
ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
2663
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
2664
*logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2669
* Called by xfs_bmapi to update file extent records and the btree
2670
* after removing space (or undoing a delayed allocation).
2672
STATIC int /* error */
2673
xfs_bmap_del_extent(
2674
xfs_inode_t *ip, /* incore inode pointer */
2675
xfs_trans_t *tp, /* current transaction pointer */
2676
xfs_extnum_t *idx, /* extent number to update/delete */
2677
xfs_bmap_free_t *flist, /* list of extents to be freed */
2678
xfs_btree_cur_t *cur, /* if null, not a btree */
2679
xfs_bmbt_irec_t *del, /* data to remove from extents */
2680
int *logflagsp, /* inode logging flags */
2681
int whichfork) /* data or attr fork */
2683
xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
2684
xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
2685
xfs_fsblock_t del_endblock=0; /* first block past del */
2686
xfs_fileoff_t del_endoff; /* first offset past del */
2687
int delay; /* current block is delayed allocated */
2688
int do_fx; /* free extent at end of routine */
2689
xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
2690
int error; /* error return value */
2691
int flags; /* inode logging flags */
2692
xfs_bmbt_irec_t got; /* current extent entry */
2693
xfs_fileoff_t got_endoff; /* first offset past got */
2694
int i; /* temp state */
2695
xfs_ifork_t *ifp; /* inode fork pointer */
2696
xfs_mount_t *mp; /* mount structure */
2697
xfs_filblks_t nblks; /* quota/sb block count */
2698
xfs_bmbt_irec_t new; /* new record to be inserted */
2700
uint qfield; /* quota field to update */
2701
xfs_filblks_t temp; /* for indirect length calculations */
2702
xfs_filblks_t temp2; /* for indirect length calculations */
2705
XFS_STATS_INC(xs_del_exlist);
2707
if (whichfork == XFS_ATTR_FORK)
2708
state |= BMAP_ATTRFORK;
2711
ifp = XFS_IFORK_PTR(ip, whichfork);
2712
ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
2713
(uint)sizeof(xfs_bmbt_rec_t)));
2714
ASSERT(del->br_blockcount > 0);
2715
ep = xfs_iext_get_ext(ifp, *idx);
2716
xfs_bmbt_get_all(ep, &got);
2717
ASSERT(got.br_startoff <= del->br_startoff);
2718
del_endoff = del->br_startoff + del->br_blockcount;
2719
got_endoff = got.br_startoff + got.br_blockcount;
2720
ASSERT(got_endoff >= del_endoff);
2721
delay = isnullstartblock(got.br_startblock);
2722
ASSERT(isnullstartblock(del->br_startblock) == delay);
2727
* If deleting a real allocation, must free up the disk space.
2730
flags = XFS_ILOG_CORE;
2732
* Realtime allocation. Free it and record di_nblocks update.
2734
if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
2738
ASSERT(do_mod(del->br_blockcount,
2739
mp->m_sb.sb_rextsize) == 0);
2740
ASSERT(do_mod(del->br_startblock,
2741
mp->m_sb.sb_rextsize) == 0);
2742
bno = del->br_startblock;
2743
len = del->br_blockcount;
2744
do_div(bno, mp->m_sb.sb_rextsize);
2745
do_div(len, mp->m_sb.sb_rextsize);
2746
error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
2750
nblks = len * mp->m_sb.sb_rextsize;
2751
qfield = XFS_TRANS_DQ_RTBCOUNT;
2754
* Ordinary allocation.
2758
nblks = del->br_blockcount;
2759
qfield = XFS_TRANS_DQ_BCOUNT;
2762
* Set up del_endblock and cur for later.
2764
del_endblock = del->br_startblock + del->br_blockcount;
2766
if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
2767
got.br_startblock, got.br_blockcount,
2770
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2772
da_old = da_new = 0;
2774
da_old = startblockval(got.br_startblock);
2780
* Set flag value to use in switch statement.
2781
* Left-contig is 2, right-contig is 1.
2783
switch (((got.br_startoff == del->br_startoff) << 1) |
2784
(got_endoff == del_endoff)) {
2787
* Matches the whole extent. Delete the entry.
2789
xfs_iext_remove(ip, *idx, 1,
2790
whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
2795
XFS_IFORK_NEXT_SET(ip, whichfork,
2796
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2797
flags |= XFS_ILOG_CORE;
2799
flags |= xfs_ilog_fext(whichfork);
2802
if ((error = xfs_btree_delete(cur, &i)))
2804
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2809
* Deleting the first part of the extent.
2811
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2812
xfs_bmbt_set_startoff(ep, del_endoff);
2813
temp = got.br_blockcount - del->br_blockcount;
2814
xfs_bmbt_set_blockcount(ep, temp);
2816
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2818
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2819
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2823
xfs_bmbt_set_startblock(ep, del_endblock);
2824
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2826
flags |= xfs_ilog_fext(whichfork);
2829
if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
2830
got.br_blockcount - del->br_blockcount,
2837
* Deleting the last part of the extent.
2839
temp = got.br_blockcount - del->br_blockcount;
2840
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2841
xfs_bmbt_set_blockcount(ep, temp);
2843
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2845
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2846
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2850
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2852
flags |= xfs_ilog_fext(whichfork);
2855
if ((error = xfs_bmbt_update(cur, got.br_startoff,
2857
got.br_blockcount - del->br_blockcount,
2864
* Deleting the middle of the extent.
2866
temp = del->br_startoff - got.br_startoff;
2867
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2868
xfs_bmbt_set_blockcount(ep, temp);
2869
new.br_startoff = del_endoff;
2870
temp2 = got_endoff - del_endoff;
2871
new.br_blockcount = temp2;
2872
new.br_state = got.br_state;
2874
new.br_startblock = del_endblock;
2875
flags |= XFS_ILOG_CORE;
2877
if ((error = xfs_bmbt_update(cur,
2879
got.br_startblock, temp,
2882
if ((error = xfs_btree_increment(cur, 0, &i)))
2884
cur->bc_rec.b = new;
2885
error = xfs_btree_insert(cur, &i);
2886
if (error && error != ENOSPC)
2889
* If get no-space back from btree insert,
2890
* it tried a split, and we have a zero
2891
* block reservation.
2892
* Fix up our state and return the error.
2894
if (error == ENOSPC) {
2896
* Reset the cursor, don't trust
2897
* it after any insert operation.
2899
if ((error = xfs_bmbt_lookup_eq(cur,
2904
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2906
* Update the btree record back
2907
* to the original value.
2909
if ((error = xfs_bmbt_update(cur,
2916
* Reset the extent record back
2917
* to the original value.
2919
xfs_bmbt_set_blockcount(ep,
2922
error = XFS_ERROR(ENOSPC);
2925
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2927
flags |= xfs_ilog_fext(whichfork);
2928
XFS_IFORK_NEXT_SET(ip, whichfork,
2929
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2931
ASSERT(whichfork == XFS_DATA_FORK);
2932
temp = xfs_bmap_worst_indlen(ip, temp);
2933
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2934
temp2 = xfs_bmap_worst_indlen(ip, temp2);
2935
new.br_startblock = nullstartblock((int)temp2);
2936
da_new = temp + temp2;
2937
while (da_new > da_old) {
2941
xfs_bmbt_set_startblock(ep,
2942
nullstartblock((int)temp));
2944
if (da_new == da_old)
2950
nullstartblock((int)temp2);
2954
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2955
xfs_iext_insert(ip, *idx + 1, 1, &new, state);
2960
* If we need to, add to list of extents to delete.
2963
xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist,
2966
* Adjust inode # blocks in the file.
2969
ip->i_d.di_nblocks -= nblks;
2971
* Adjust quota data.
2974
xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
2977
* Account for change in delayed indirect blocks.
2978
* Nothing to do for disk quota accounting here.
2980
ASSERT(da_old >= da_new);
2981
if (da_old > da_new) {
2982
xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
2983
(int64_t)(da_old - da_new), 0);
2991
* Remove the entry "free" from the free item list. Prev points to the
2992
* previous entry, unless "free" is the head of the list.
2996
xfs_bmap_free_t *flist, /* free item list header */
2997
xfs_bmap_free_item_t *prev, /* previous item on list, if any */
2998
xfs_bmap_free_item_t *free) /* list item to be freed */
3001
prev->xbfi_next = free->xbfi_next;
3003
flist->xbf_first = free->xbfi_next;
3005
kmem_zone_free(xfs_bmap_free_item_zone, free);
3009
* Convert an extents-format file into a btree-format file.
3010
* The new file will have a root block (in the inode) and a single child block.
3012
STATIC int /* error */
3013
xfs_bmap_extents_to_btree(
3014
xfs_trans_t *tp, /* transaction pointer */
3015
xfs_inode_t *ip, /* incore inode pointer */
3016
xfs_fsblock_t *firstblock, /* first-block-allocated */
3017
xfs_bmap_free_t *flist, /* blocks freed in xaction */
3018
xfs_btree_cur_t **curp, /* cursor returned to caller */
3019
int wasdel, /* converting a delayed alloc */
3020
int *logflagsp, /* inode logging flags */
3021
int whichfork) /* data or attr fork */
3023
struct xfs_btree_block *ablock; /* allocated (child) bt block */
3024
xfs_buf_t *abp; /* buffer for ablock */
3025
xfs_alloc_arg_t args; /* allocation arguments */
3026
xfs_bmbt_rec_t *arp; /* child record pointer */
3027
struct xfs_btree_block *block; /* btree root block */
3028
xfs_btree_cur_t *cur; /* bmap btree cursor */
3029
xfs_bmbt_rec_host_t *ep; /* extent record pointer */
3030
int error; /* error return value */
3031
xfs_extnum_t i, cnt; /* extent record index */
3032
xfs_ifork_t *ifp; /* inode fork pointer */
3033
xfs_bmbt_key_t *kp; /* root block key pointer */
3034
xfs_mount_t *mp; /* mount structure */
3035
xfs_extnum_t nextents; /* number of file extents */
3036
xfs_bmbt_ptr_t *pp; /* root block address pointer */
3038
ifp = XFS_IFORK_PTR(ip, whichfork);
3039
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
3040
ASSERT(ifp->if_ext_max ==
3041
XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
3043
* Make space in the inode incore.
3045
xfs_iroot_realloc(ip, 1, whichfork);
3046
ifp->if_flags |= XFS_IFBROOT;
3051
block = ifp->if_broot;
3052
block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
3053
block->bb_level = cpu_to_be16(1);
3054
block->bb_numrecs = cpu_to_be16(1);
3055
block->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
3056
block->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
3059
* Need a cursor. Can't allocate until bb_level is filled in.
3062
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
3063
cur->bc_private.b.firstblock = *firstblock;
3064
cur->bc_private.b.flist = flist;
3065
cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
3067
* Convert to a btree with two levels, one record in root.
3069
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
3072
args.firstblock = *firstblock;
3073
if (*firstblock == NULLFSBLOCK) {
3074
args.type = XFS_ALLOCTYPE_START_BNO;
3075
args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
3076
} else if (flist->xbf_low) {
3077
args.type = XFS_ALLOCTYPE_START_BNO;
3078
args.fsbno = *firstblock;
3080
args.type = XFS_ALLOCTYPE_NEAR_BNO;
3081
args.fsbno = *firstblock;
3083
args.minlen = args.maxlen = args.prod = 1;
3084
args.total = args.minleft = args.alignment = args.mod = args.isfl =
3085
args.minalignslop = 0;
3086
args.wasdel = wasdel;
3088
if ((error = xfs_alloc_vextent(&args))) {
3089
xfs_iroot_realloc(ip, -1, whichfork);
3090
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
3094
* Allocation can't fail, the space was reserved.
3096
ASSERT(args.fsbno != NULLFSBLOCK);
3097
ASSERT(*firstblock == NULLFSBLOCK ||
3098
args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
3100
args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
3101
*firstblock = cur->bc_private.b.firstblock = args.fsbno;
3102
cur->bc_private.b.allocated++;
3103
ip->i_d.di_nblocks++;
3104
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
3105
abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
3107
* Fill in the child block.
3109
ablock = XFS_BUF_TO_BLOCK(abp);
3110
ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
3111
ablock->bb_level = 0;
3112
ablock->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
3113
ablock->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
3114
arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
3115
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3116
for (cnt = i = 0; i < nextents; i++) {
3117
ep = xfs_iext_get_ext(ifp, i);
3118
if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
3119
arp->l0 = cpu_to_be64(ep->l0);
3120
arp->l1 = cpu_to_be64(ep->l1);
3124
ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
3125
xfs_btree_set_numrecs(ablock, cnt);
3128
* Fill in the root key and pointer.
3130
kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
3131
arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
3132
kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
3133
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
3134
be16_to_cpu(block->bb_level)));
3135
*pp = cpu_to_be64(args.fsbno);
3138
* Do all this logging at the end so that
3139
* the root is at the right level.
3141
xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
3142
xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
3143
ASSERT(*curp == NULL);
3145
*logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
3150
* Calculate the default attribute fork offset for newly created inodes.
3153
xfs_default_attroffset(
3154
struct xfs_inode *ip)
3156
struct xfs_mount *mp = ip->i_mount;
3159
if (mp->m_sb.sb_inodesize == 256) {
3160
offset = XFS_LITINO(mp) -
3161
XFS_BMDR_SPACE_CALC(MINABTPTRS);
3163
offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
3166
ASSERT(offset < XFS_LITINO(mp));
3171
* Helper routine to reset inode di_forkoff field when switching
3172
* attribute fork from local to extent format - we reset it where
3173
* possible to make space available for inline data fork extents.
3176
xfs_bmap_forkoff_reset(
3181
if (whichfork == XFS_ATTR_FORK &&
3182
ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
3183
ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
3184
ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
3185
uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
3187
if (dfl_forkoff > ip->i_d.di_forkoff) {
3188
ip->i_d.di_forkoff = dfl_forkoff;
3189
ip->i_df.if_ext_max =
3190
XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t);
3191
ip->i_afp->if_ext_max =
3192
XFS_IFORK_ASIZE(ip) / sizeof(xfs_bmbt_rec_t);
3198
* Convert a local file to an extents file.
3199
* This code is out of bounds for data forks of regular files,
3200
* since the file data needs to get logged so things will stay consistent.
3201
* (The bmap-level manipulations are ok, though).
3203
STATIC int /* error */
3204
xfs_bmap_local_to_extents(
3205
xfs_trans_t *tp, /* transaction pointer */
3206
xfs_inode_t *ip, /* incore inode pointer */
3207
xfs_fsblock_t *firstblock, /* first block allocated in xaction */
3208
xfs_extlen_t total, /* total blocks needed by transaction */
3209
int *logflagsp, /* inode logging flags */
3210
int whichfork) /* data or attr fork */
3212
int error; /* error return value */
3213
int flags; /* logging flags returned */
3214
xfs_ifork_t *ifp; /* inode fork pointer */
3217
* We don't want to deal with the case of keeping inode data inline yet.
3218
* So sending the data fork of a regular inode is invalid.
3220
ASSERT(!(S_ISREG(ip->i_d.di_mode) && whichfork == XFS_DATA_FORK));
3221
ifp = XFS_IFORK_PTR(ip, whichfork);
3222
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
3225
if (ifp->if_bytes) {
3226
xfs_alloc_arg_t args; /* allocation arguments */
3227
xfs_buf_t *bp; /* buffer for extent block */
3228
xfs_bmbt_rec_host_t *ep;/* extent record pointer */
3231
args.mp = ip->i_mount;
3232
args.firstblock = *firstblock;
3233
ASSERT((ifp->if_flags &
3234
(XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
3236
* Allocate a block. We know we need only one, since the
3237
* file currently fits in an inode.
3239
if (*firstblock == NULLFSBLOCK) {
3240
args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
3241
args.type = XFS_ALLOCTYPE_START_BNO;
3243
args.fsbno = *firstblock;
3244
args.type = XFS_ALLOCTYPE_NEAR_BNO;
3247
args.mod = args.minleft = args.alignment = args.wasdel =
3248
args.isfl = args.minalignslop = 0;
3249
args.minlen = args.maxlen = args.prod = 1;
3250
if ((error = xfs_alloc_vextent(&args)))
3253
* Can't fail, the space was reserved.
3255
ASSERT(args.fsbno != NULLFSBLOCK);
3256
ASSERT(args.len == 1);
3257
*firstblock = args.fsbno;
3258
bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
3259
memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
3260
xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
3261
xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
3262
xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
3263
xfs_iext_add(ifp, 0, 1);
3264
ep = xfs_iext_get_ext(ifp, 0);
3265
xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
3266
trace_xfs_bmap_post_update(ip, 0,
3267
whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
3269
XFS_IFORK_NEXT_SET(ip, whichfork, 1);
3270
ip->i_d.di_nblocks = 1;
3271
xfs_trans_mod_dquot_byino(tp, ip,
3272
XFS_TRANS_DQ_BCOUNT, 1L);
3273
flags |= xfs_ilog_fext(whichfork);
3275
ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
3276
xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork);
3278
ifp->if_flags &= ~XFS_IFINLINE;
3279
ifp->if_flags |= XFS_IFEXTENTS;
3280
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
3281
flags |= XFS_ILOG_CORE;
3288
* Search the extent records for the entry containing block bno.
3289
* If bno lies in a hole, point to the next entry. If bno lies
3290
* past eof, *eofp will be set, and *prevp will contain the last
3291
* entry (null if none). Else, *lastxp will be set to the index
3292
* of the found entry; *gotp will contain the entry.
3294
STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
3295
xfs_bmap_search_multi_extents(
3296
xfs_ifork_t *ifp, /* inode fork pointer */
3297
xfs_fileoff_t bno, /* block number searched for */
3298
int *eofp, /* out: end of file found */
3299
xfs_extnum_t *lastxp, /* out: last extent index */
3300
xfs_bmbt_irec_t *gotp, /* out: extent entry found */
3301
xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
3303
xfs_bmbt_rec_host_t *ep; /* extent record pointer */
3304
xfs_extnum_t lastx; /* last extent index */
3307
* Initialize the extent entry structure to catch access to
3308
* uninitialized br_startblock field.
3310
gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
3311
gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
3312
gotp->br_state = XFS_EXT_INVALID;
3314
gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
3316
gotp->br_startblock = 0xffffa5a5;
3318
prevp->br_startoff = NULLFILEOFF;
3320
ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
3322
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
3324
if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
3325
xfs_bmbt_get_all(ep, gotp);
3339
* Search the extents list for the inode, for the extent containing bno.
3340
* If bno lies in a hole, point to the next entry. If bno lies past eof,
3341
* *eofp will be set, and *prevp will contain the last entry (null if none).
3342
* Else, *lastxp will be set to the index of the found
3343
* entry; *gotp will contain the entry.
3345
STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
3346
xfs_bmap_search_extents(
3347
xfs_inode_t *ip, /* incore inode pointer */
3348
xfs_fileoff_t bno, /* block number searched for */
3349
int fork, /* data or attr fork */
3350
int *eofp, /* out: end of file found */
3351
xfs_extnum_t *lastxp, /* out: last extent index */
3352
xfs_bmbt_irec_t *gotp, /* out: extent entry found */
3353
xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
3355
xfs_ifork_t *ifp; /* inode fork pointer */
3356
xfs_bmbt_rec_host_t *ep; /* extent record pointer */
3358
XFS_STATS_INC(xs_look_exlist);
3359
ifp = XFS_IFORK_PTR(ip, fork);
3361
ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
3363
if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
3364
!(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
3365
xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
3366
"Access to block zero in inode %llu "
3367
"start_block: %llx start_off: %llx "
3368
"blkcnt: %llx extent-state: %x lastx: %x\n",
3369
(unsigned long long)ip->i_ino,
3370
(unsigned long long)gotp->br_startblock,
3371
(unsigned long long)gotp->br_startoff,
3372
(unsigned long long)gotp->br_blockcount,
3373
gotp->br_state, *lastxp);
3374
*lastxp = NULLEXTNUM;
3382
* Compute the worst-case number of indirect blocks that will be used
3383
* for ip's delayed extent of length "len".
3385
STATIC xfs_filblks_t
3386
xfs_bmap_worst_indlen(
3387
xfs_inode_t *ip, /* incore inode pointer */
3388
xfs_filblks_t len) /* delayed extent length */
3390
int level; /* btree level number */
3391
int maxrecs; /* maximum record count at this level */
3392
xfs_mount_t *mp; /* mount structure */
3393
xfs_filblks_t rval; /* return value */
3396
maxrecs = mp->m_bmap_dmxr[0];
3397
for (level = 0, rval = 0;
3398
level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
3401
do_div(len, maxrecs);
3404
return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
3407
maxrecs = mp->m_bmap_dmxr[1];
3413
* Convert inode from non-attributed to attributed.
3414
* Must not be in a transaction, ip must not be locked.
3416
int /* error code */
3417
xfs_bmap_add_attrfork(
3418
xfs_inode_t *ip, /* incore inode pointer */
3419
int size, /* space new attribute needs */
3420
int rsvd) /* xact may use reserved blks */
3422
xfs_fsblock_t firstblock; /* 1st block/ag allocated */
3423
xfs_bmap_free_t flist; /* freed extent records */
3424
xfs_mount_t *mp; /* mount structure */
3425
xfs_trans_t *tp; /* transaction pointer */
3426
int blks; /* space reservation */
3427
int version = 1; /* superblock attr version */
3428
int committed; /* xaction was committed */
3429
int logflags; /* logging flags */
3430
int error; /* error return value */
3432
ASSERT(XFS_IFORK_Q(ip) == 0);
3433
ASSERT(ip->i_df.if_ext_max ==
3434
XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
3437
ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
3438
tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
3439
blks = XFS_ADDAFORK_SPACE_RES(mp);
3441
tp->t_flags |= XFS_TRANS_RESERVE;
3442
if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0,
3443
XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
3445
xfs_ilock(ip, XFS_ILOCK_EXCL);
3446
error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
3447
XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
3448
XFS_QMOPT_RES_REGBLKS);
3450
xfs_iunlock(ip, XFS_ILOCK_EXCL);
3451
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
3454
if (XFS_IFORK_Q(ip))
3456
if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
3458
* For inodes coming from pre-6.2 filesystems.
3460
ASSERT(ip->i_d.di_aformat == 0);
3461
ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
3463
ASSERT(ip->i_d.di_anextents == 0);
3465
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
3466
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
3468
switch (ip->i_d.di_format) {
3469
case XFS_DINODE_FMT_DEV:
3470
ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
3472
case XFS_DINODE_FMT_UUID:
3473
ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
3475
case XFS_DINODE_FMT_LOCAL:
3476
case XFS_DINODE_FMT_EXTENTS:
3477
case XFS_DINODE_FMT_BTREE:
3478
ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
3479
if (!ip->i_d.di_forkoff)
3480
ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
3481
else if (mp->m_flags & XFS_MOUNT_ATTR2)
3486
error = XFS_ERROR(EINVAL);
3489
ip->i_df.if_ext_max =
3490
XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
3491
ASSERT(ip->i_afp == NULL);
3492
ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
3493
ip->i_afp->if_ext_max =
3494
XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
3495
ip->i_afp->if_flags = XFS_IFEXTENTS;
3497
xfs_bmap_init(&flist, &firstblock);
3498
switch (ip->i_d.di_format) {
3499
case XFS_DINODE_FMT_LOCAL:
3500
error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
3503
case XFS_DINODE_FMT_EXTENTS:
3504
error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
3507
case XFS_DINODE_FMT_BTREE:
3508
error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
3516
xfs_trans_log_inode(tp, ip, logflags);
3519
if (!xfs_sb_version_hasattr(&mp->m_sb) ||
3520
(!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
3521
__int64_t sbfields = 0;
3523
spin_lock(&mp->m_sb_lock);
3524
if (!xfs_sb_version_hasattr(&mp->m_sb)) {
3525
xfs_sb_version_addattr(&mp->m_sb);
3526
sbfields |= XFS_SB_VERSIONNUM;
3528
if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
3529
xfs_sb_version_addattr2(&mp->m_sb);
3530
sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
3533
spin_unlock(&mp->m_sb_lock);
3534
xfs_mod_sb(tp, sbfields);
3536
spin_unlock(&mp->m_sb_lock);
3538
if ((error = xfs_bmap_finish(&tp, &flist, &committed)))
3540
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
3541
ASSERT(ip->i_df.if_ext_max ==
3542
XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
3545
xfs_bmap_cancel(&flist);
3547
xfs_iunlock(ip, XFS_ILOCK_EXCL);
3549
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
3550
ASSERT(ip->i_df.if_ext_max ==
3551
XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
3556
* Add the extent to the list of extents to be free at transaction end.
3557
* The list is maintained sorted (by block number).
3562
xfs_fsblock_t bno, /* fs block number of extent */
3563
xfs_filblks_t len, /* length of extent */
3564
xfs_bmap_free_t *flist, /* list of extents */
3565
xfs_mount_t *mp) /* mount point structure */
3567
xfs_bmap_free_item_t *cur; /* current (next) element */
3568
xfs_bmap_free_item_t *new; /* new element */
3569
xfs_bmap_free_item_t *prev; /* previous element */
3571
xfs_agnumber_t agno;
3572
xfs_agblock_t agbno;
3574
ASSERT(bno != NULLFSBLOCK);
3576
ASSERT(len <= MAXEXTLEN);
3577
ASSERT(!isnullstartblock(bno));
3578
agno = XFS_FSB_TO_AGNO(mp, bno);
3579
agbno = XFS_FSB_TO_AGBNO(mp, bno);
3580
ASSERT(agno < mp->m_sb.sb_agcount);
3581
ASSERT(agbno < mp->m_sb.sb_agblocks);
3582
ASSERT(len < mp->m_sb.sb_agblocks);
3583
ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
3585
ASSERT(xfs_bmap_free_item_zone != NULL);
3586
new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
3587
new->xbfi_startblock = bno;
3588
new->xbfi_blockcount = (xfs_extlen_t)len;
3589
for (prev = NULL, cur = flist->xbf_first;
3591
prev = cur, cur = cur->xbfi_next) {
3592
if (cur->xbfi_startblock >= bno)
3596
prev->xbfi_next = new;
3598
flist->xbf_first = new;
3599
new->xbfi_next = cur;
3604
* Compute and fill in the value of the maximum depth of a bmap btree
3605
* in this filesystem. Done once, during mount.
3608
xfs_bmap_compute_maxlevels(
3609
xfs_mount_t *mp, /* file system mount structure */
3610
int whichfork) /* data or attr fork */
3612
int level; /* btree level */
3613
uint maxblocks; /* max blocks at this level */
3614
uint maxleafents; /* max leaf entries possible */
3615
int maxrootrecs; /* max records in root block */
3616
int minleafrecs; /* min records in leaf block */
3617
int minnoderecs; /* min records in node block */
3618
int sz; /* root block size */
3621
* The maximum number of extents in a file, hence the maximum
3622
* number of leaf entries, is controlled by the type of di_nextents
3623
* (a signed 32-bit number, xfs_extnum_t), or by di_anextents
3624
* (a signed 16-bit number, xfs_aextnum_t).
3626
* Note that we can no longer assume that if we are in ATTR1 that
3627
* the fork offset of all the inodes will be
3628
* (xfs_default_attroffset(ip) >> 3) because we could have mounted
3629
* with ATTR2 and then mounted back with ATTR1, keeping the
3630
* di_forkoff's fixed but probably at various positions. Therefore,
3631
* for both ATTR1 and ATTR2 we have to assume the worst case scenario
3632
* of a minimum size available.
3634
if (whichfork == XFS_DATA_FORK) {
3635
maxleafents = MAXEXTNUM;
3636
sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
3638
maxleafents = MAXAEXTNUM;
3639
sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
3641
maxrootrecs = xfs_bmdr_maxrecs(mp, sz, 0);
3642
minleafrecs = mp->m_bmap_dmnr[0];
3643
minnoderecs = mp->m_bmap_dmnr[1];
3644
maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
3645
for (level = 1; maxblocks > 1; level++) {
3646
if (maxblocks <= maxrootrecs)
3649
maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
3651
mp->m_bm_maxlevels[whichfork] = level;
3655
* Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
3656
* caller. Frees all the extents that need freeing, which must be done
3657
* last due to locking considerations. We never free any extents in
3658
* the first transaction.
3660
* Return 1 if the given transaction was committed and a new one
3661
* started, and 0 otherwise in the committed parameter.
3665
xfs_trans_t **tp, /* transaction pointer addr */
3666
xfs_bmap_free_t *flist, /* i/o: list extents to free */
3667
int *committed) /* xact committed or not */
3669
xfs_efd_log_item_t *efd; /* extent free data */
3670
xfs_efi_log_item_t *efi; /* extent free intention */
3671
int error; /* error return value */
3672
xfs_bmap_free_item_t *free; /* free extent item */
3673
unsigned int logres; /* new log reservation */
3674
unsigned int logcount; /* new log count */
3675
xfs_mount_t *mp; /* filesystem mount structure */
3676
xfs_bmap_free_item_t *next; /* next item on free list */
3677
xfs_trans_t *ntp; /* new transaction pointer */
3679
ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
3680
if (flist->xbf_count == 0) {
3685
efi = xfs_trans_get_efi(ntp, flist->xbf_count);
3686
for (free = flist->xbf_first; free; free = free->xbfi_next)
3687
xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
3688
free->xbfi_blockcount);
3689
logres = ntp->t_log_res;
3690
logcount = ntp->t_log_count;
3691
ntp = xfs_trans_dup(*tp);
3692
error = xfs_trans_commit(*tp, 0);
3696
* We have a new transaction, so we should return committed=1,
3697
* even though we're returning an error.
3703
* transaction commit worked ok so we can drop the extra ticket
3704
* reference that we gained in xfs_trans_dup()
3706
xfs_log_ticket_put(ntp->t_ticket);
3708
if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES,
3711
efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
3712
for (free = flist->xbf_first; free != NULL; free = next) {
3713
next = free->xbfi_next;
3714
if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
3715
free->xbfi_blockcount))) {
3717
* The bmap free list will be cleaned up at a
3718
* higher level. The EFI will be canceled when
3719
* this transaction is aborted.
3720
* Need to force shutdown here to make sure it
3721
* happens, since this transaction may not be
3725
if (!XFS_FORCED_SHUTDOWN(mp))
3726
xfs_force_shutdown(mp,
3727
(error == EFSCORRUPTED) ?
3728
SHUTDOWN_CORRUPT_INCORE :
3729
SHUTDOWN_META_IO_ERROR);
3732
xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
3733
free->xbfi_blockcount);
3734
xfs_bmap_del_free(flist, NULL, free);
3740
* Free up any items left in the list.
3744
xfs_bmap_free_t *flist) /* list of bmap_free_items */
3746
xfs_bmap_free_item_t *free; /* free list item */
3747
xfs_bmap_free_item_t *next;
3749
if (flist->xbf_count == 0)
3751
ASSERT(flist->xbf_first != NULL);
3752
for (free = flist->xbf_first; free; free = next) {
3753
next = free->xbfi_next;
3754
xfs_bmap_del_free(flist, NULL, free);
3756
ASSERT(flist->xbf_count == 0);
3760
* Returns the file-relative block number of the first unused block(s)
3761
* in the file with at least "len" logically contiguous blocks free.
3762
* This is the lowest-address hole if the file has holes, else the first block
3763
* past the end of file.
3764
* Return 0 if the file is currently local (in-inode).
3767
xfs_bmap_first_unused(
3768
xfs_trans_t *tp, /* transaction pointer */
3769
xfs_inode_t *ip, /* incore inode */
3770
xfs_extlen_t len, /* size of hole to find */
3771
xfs_fileoff_t *first_unused, /* unused block */
3772
int whichfork) /* data or attr fork */
3774
int error; /* error return value */
3775
int idx; /* extent record index */
3776
xfs_ifork_t *ifp; /* inode fork pointer */
3777
xfs_fileoff_t lastaddr; /* last block number seen */
3778
xfs_fileoff_t lowest; /* lowest useful block */
3779
xfs_fileoff_t max; /* starting useful block */
3780
xfs_fileoff_t off; /* offset for this block */
3781
xfs_extnum_t nextents; /* number of extent entries */
3783
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
3784
XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
3785
XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
3786
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
3790
ifp = XFS_IFORK_PTR(ip, whichfork);
3791
if (!(ifp->if_flags & XFS_IFEXTENTS) &&
3792
(error = xfs_iread_extents(tp, ip, whichfork)))
3794
lowest = *first_unused;
3795
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3796
for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
3797
xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
3798
off = xfs_bmbt_get_startoff(ep);
3800
* See if the hole before this extent will work.
3802
if (off >= lowest + len && off - max >= len) {
3803
*first_unused = max;
3806
lastaddr = off + xfs_bmbt_get_blockcount(ep);
3807
max = XFS_FILEOFF_MAX(lastaddr, lowest);
3809
*first_unused = max;
3814
* Returns the file-relative block number of the last block + 1 before
3815
* last_block (input value) in the file.
3816
* This is not based on i_size, it is based on the extent records.
3817
* Returns 0 for local files, as they do not have extent records.
3820
xfs_bmap_last_before(
3821
xfs_trans_t *tp, /* transaction pointer */
3822
xfs_inode_t *ip, /* incore inode */
3823
xfs_fileoff_t *last_block, /* last block */
3824
int whichfork) /* data or attr fork */
3826
xfs_fileoff_t bno; /* input file offset */
3827
int eof; /* hit end of file */
3828
xfs_bmbt_rec_host_t *ep; /* pointer to last extent */
3829
int error; /* error return value */
3830
xfs_bmbt_irec_t got; /* current extent value */
3831
xfs_ifork_t *ifp; /* inode fork pointer */
3832
xfs_extnum_t lastx; /* last extent used */
3833
xfs_bmbt_irec_t prev; /* previous extent value */
3835
if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
3836
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
3837
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
3838
return XFS_ERROR(EIO);
3839
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
3843
ifp = XFS_IFORK_PTR(ip, whichfork);
3844
if (!(ifp->if_flags & XFS_IFEXTENTS) &&
3845
(error = xfs_iread_extents(tp, ip, whichfork)))
3847
bno = *last_block - 1;
3848
ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
3850
if (eof || xfs_bmbt_get_startoff(ep) > bno) {
3851
if (prev.br_startoff == NULLFILEOFF)
3854
*last_block = prev.br_startoff + prev.br_blockcount;
3857
* Otherwise *last_block is already the right answer.
3863
xfs_bmap_last_extent(
3864
struct xfs_trans *tp,
3865
struct xfs_inode *ip,
3867
struct xfs_bmbt_irec *rec,
3870
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3874
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
3875
error = xfs_iread_extents(tp, ip, whichfork);
3880
nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
3881
if (nextents == 0) {
3886
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
3892
* Check the last inode extent to determine whether this allocation will result
3893
* in blocks being allocated at the end of the file. When we allocate new data
3894
* blocks at the end of the file which do not start at the previous data block,
3895
* we will try to align the new blocks at stripe unit boundaries.
3897
* Returns 0 in bma->aeof if the file (fork) is empty as any new write will be
3898
* at, or past the EOF.
3902
struct xfs_bmalloca *bma,
3905
struct xfs_bmbt_irec rec;
3910
error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
3912
if (error || is_empty)
3916
* Check if we are allocation or past the last extent, or at least into
3917
* the last delayed allocated extent.
3919
bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
3920
(bma->offset >= rec.br_startoff &&
3921
isnullstartblock(rec.br_startblock));
3926
* Check if the endoff is outside the last extent. If so the caller will grow
3927
* the allocation to a stripe unit boundary. All offsets are considered outside
3928
* the end of file for an empty fork, so 1 is returned in *eof in that case.
3932
struct xfs_inode *ip,
3933
xfs_fileoff_t endoff,
3937
struct xfs_bmbt_irec rec;
3940
error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
3944
*eof = endoff >= rec.br_startoff + rec.br_blockcount;
3949
* Returns the file-relative block number of the first block past eof in
3950
* the file. This is not based on i_size, it is based on the extent records.
3951
* Returns 0 for local files, as they do not have extent records.
3954
xfs_bmap_last_offset(
3955
struct xfs_trans *tp,
3956
struct xfs_inode *ip,
3957
xfs_fileoff_t *last_block,
3960
struct xfs_bmbt_irec rec;
3966
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
3969
if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
3970
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
3971
return XFS_ERROR(EIO);
3973
error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
3974
if (error || is_empty)
3977
*last_block = rec.br_startoff + rec.br_blockcount;
3982
* Returns whether the selected fork of the inode has exactly one
3983
* block or not. For the data fork we check this matches di_size,
3984
* implying the file's range is 0..bsize-1.
3986
int /* 1=>1 block, 0=>otherwise */
3988
xfs_inode_t *ip, /* incore inode */
3989
int whichfork) /* data or attr fork */
3991
xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
3992
xfs_ifork_t *ifp; /* inode fork pointer */
3993
int rval; /* return value */
3994
xfs_bmbt_irec_t s; /* internal version of extent */
3997
if (whichfork == XFS_DATA_FORK) {
3998
return S_ISREG(ip->i_d.di_mode) ?
3999
(ip->i_size == ip->i_mount->m_sb.sb_blocksize) :
4000
(ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize);
4003
if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
4005
if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4007
ifp = XFS_IFORK_PTR(ip, whichfork);
4008
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4009
ep = xfs_iext_get_ext(ifp, 0);
4010
xfs_bmbt_get_all(ep, &s);
4011
rval = s.br_startoff == 0 && s.br_blockcount == 1;
4012
if (rval && whichfork == XFS_DATA_FORK)
4013
ASSERT(ip->i_size == ip->i_mount->m_sb.sb_blocksize);
4018
xfs_bmap_sanity_check(
4019
struct xfs_mount *mp,
4023
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4025
if (block->bb_magic != cpu_to_be32(XFS_BMAP_MAGIC) ||
4026
be16_to_cpu(block->bb_level) != level ||
4027
be16_to_cpu(block->bb_numrecs) == 0 ||
4028
be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
4034
* Read in the extents to if_extents.
4035
* All inode fields are set up by caller, we just traverse the btree
4036
* and copy the records in. If the file system cannot contain unwritten
4037
* extents, the records are checked for no "state" flags.
4040
xfs_bmap_read_extents(
4041
xfs_trans_t *tp, /* transaction pointer */
4042
xfs_inode_t *ip, /* incore inode */
4043
int whichfork) /* data or attr fork */
4045
struct xfs_btree_block *block; /* current btree block */
4046
xfs_fsblock_t bno; /* block # of "block" */
4047
xfs_buf_t *bp; /* buffer for "block" */
4048
int error; /* error return value */
4049
xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */
4050
xfs_extnum_t i, j; /* index into the extents list */
4051
xfs_ifork_t *ifp; /* fork structure */
4052
int level; /* btree level, for checking */
4053
xfs_mount_t *mp; /* file system mount structure */
4054
__be64 *pp; /* pointer to block address */
4056
xfs_extnum_t room; /* number of entries there's room for */
4060
ifp = XFS_IFORK_PTR(ip, whichfork);
4061
exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
4062
XFS_EXTFMT_INODE(ip);
4063
block = ifp->if_broot;
4065
* Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
4067
level = be16_to_cpu(block->bb_level);
4069
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
4070
bno = be64_to_cpu(*pp);
4071
ASSERT(bno != NULLDFSBNO);
4072
ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
4073
ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
4075
* Go down the tree until leaf level is reached, following the first
4076
* pointer (leftmost) at each level.
4078
while (level-- > 0) {
4079
if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4080
XFS_BMAP_BTREE_REF)))
4082
block = XFS_BUF_TO_BLOCK(bp);
4083
XFS_WANT_CORRUPTED_GOTO(
4084
xfs_bmap_sanity_check(mp, bp, level),
4088
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
4089
bno = be64_to_cpu(*pp);
4090
XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
4091
xfs_trans_brelse(tp, bp);
4094
* Here with bp and block set to the leftmost leaf node in the tree.
4096
room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4099
* Loop over all leaf nodes. Copy information to the extent records.
4102
xfs_bmbt_rec_t *frp;
4103
xfs_fsblock_t nextbno;
4104
xfs_extnum_t num_recs;
4107
num_recs = xfs_btree_get_numrecs(block);
4108
if (unlikely(i + num_recs > room)) {
4109
ASSERT(i + num_recs <= room);
4110
xfs_warn(ip->i_mount,
4111
"corrupt dinode %Lu, (btree extents).",
4112
(unsigned long long) ip->i_ino);
4113
XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
4114
XFS_ERRLEVEL_LOW, ip->i_mount, block);
4117
XFS_WANT_CORRUPTED_GOTO(
4118
xfs_bmap_sanity_check(mp, bp, 0),
4121
* Read-ahead the next leaf block, if any.
4123
nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
4124
if (nextbno != NULLFSBLOCK)
4125
xfs_btree_reada_bufl(mp, nextbno, 1);
4127
* Copy records into the extent records.
4129
frp = XFS_BMBT_REC_ADDR(mp, block, 1);
4131
for (j = 0; j < num_recs; j++, i++, frp++) {
4132
xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
4133
trp->l0 = be64_to_cpu(frp->l0);
4134
trp->l1 = be64_to_cpu(frp->l1);
4136
if (exntf == XFS_EXTFMT_NOSTATE) {
4138
* Check all attribute bmap btree records and
4139
* any "older" data bmap btree records for a
4140
* set bit in the "extent flag" position.
4142
if (unlikely(xfs_check_nostate_extents(ifp,
4143
start, num_recs))) {
4144
XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
4150
xfs_trans_brelse(tp, bp);
4153
* If we've reached the end, stop.
4155
if (bno == NULLFSBLOCK)
4157
if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4158
XFS_BMAP_BTREE_REF)))
4160
block = XFS_BUF_TO_BLOCK(bp);
4162
ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4163
ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
4164
XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
4167
xfs_trans_brelse(tp, bp);
4168
return XFS_ERROR(EFSCORRUPTED);
4173
* Add bmap trace insert entries for all the contents of the extent records.
4176
xfs_bmap_trace_exlist(
4177
xfs_inode_t *ip, /* incore inode pointer */
4178
xfs_extnum_t cnt, /* count of entries in the list */
4179
int whichfork, /* data or attr fork */
4180
unsigned long caller_ip)
4182
xfs_extnum_t idx; /* extent record index */
4183
xfs_ifork_t *ifp; /* inode fork pointer */
4186
if (whichfork == XFS_ATTR_FORK)
4187
state |= BMAP_ATTRFORK;
4189
ifp = XFS_IFORK_PTR(ip, whichfork);
4190
ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4191
for (idx = 0; idx < cnt; idx++)
4192
trace_xfs_extlist(ip, idx, whichfork, caller_ip);
4196
* Validate that the bmbt_irecs being returned from bmapi are valid
4197
* given the callers original parameters. Specifically check the
4198
* ranges of the returned irecs to ensure that they only extent beyond
4199
* the given parameters if the XFS_BMAPI_ENTIRE flag was set.
4202
xfs_bmap_validate_ret(
4206
xfs_bmbt_irec_t *mval,
4210
int i; /* index to map values */
4212
ASSERT(ret_nmap <= nmap);
4214
for (i = 0; i < ret_nmap; i++) {
4215
ASSERT(mval[i].br_blockcount > 0);
4216
if (!(flags & XFS_BMAPI_ENTIRE)) {
4217
ASSERT(mval[i].br_startoff >= bno);
4218
ASSERT(mval[i].br_blockcount <= len);
4219
ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
4222
ASSERT(mval[i].br_startoff < bno + len);
4223
ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
4227
mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
4228
mval[i].br_startoff);
4229
ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
4230
mval[i].br_startblock != HOLESTARTBLOCK);
4231
ASSERT(mval[i].br_state == XFS_EXT_NORM ||
4232
mval[i].br_state == XFS_EXT_UNWRITTEN);
4239
* Trim the returned map to the required bounds
4243
struct xfs_bmbt_irec *mval,
4244
struct xfs_bmbt_irec *got,
4252
if ((flags & XFS_BMAPI_ENTIRE) ||
4253
got->br_startoff + got->br_blockcount <= obno) {
4255
if (isnullstartblock(got->br_startblock))
4256
mval->br_startblock = DELAYSTARTBLOCK;
4262
ASSERT((*bno >= obno) || (n == 0));
4264
mval->br_startoff = *bno;
4265
if (isnullstartblock(got->br_startblock))
4266
mval->br_startblock = DELAYSTARTBLOCK;
4268
mval->br_startblock = got->br_startblock +
4269
(*bno - got->br_startoff);
4271
* Return the minimum of what we got and what we asked for for
4272
* the length. We can use the len variable here because it is
4273
* modified below and we could have been there before coming
4274
* here if the first part of the allocation didn't overlap what
4277
mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
4278
got->br_blockcount - (*bno - got->br_startoff));
4279
mval->br_state = got->br_state;
4280
ASSERT(mval->br_blockcount <= len);
4285
* Update and validate the extent map to return
4288
xfs_bmapi_update_map(
4289
struct xfs_bmbt_irec **map,
4297
xfs_bmbt_irec_t *mval = *map;
4299
ASSERT((flags & XFS_BMAPI_ENTIRE) ||
4300
((mval->br_startoff + mval->br_blockcount) <= end));
4301
ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
4302
(mval->br_startoff < obno));
4304
*bno = mval->br_startoff + mval->br_blockcount;
4306
if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
4307
/* update previous map with new information */
4308
ASSERT(mval->br_startblock == mval[-1].br_startblock);
4309
ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
4310
ASSERT(mval->br_state == mval[-1].br_state);
4311
mval[-1].br_blockcount = mval->br_blockcount;
4312
mval[-1].br_state = mval->br_state;
4313
} else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
4314
mval[-1].br_startblock != DELAYSTARTBLOCK &&
4315
mval[-1].br_startblock != HOLESTARTBLOCK &&
4316
mval->br_startblock == mval[-1].br_startblock +
4317
mval[-1].br_blockcount &&
4318
((flags & XFS_BMAPI_IGSTATE) ||
4319
mval[-1].br_state == mval->br_state)) {
4320
ASSERT(mval->br_startoff ==
4321
mval[-1].br_startoff + mval[-1].br_blockcount);
4322
mval[-1].br_blockcount += mval->br_blockcount;
4323
} else if (*n > 0 &&
4324
mval->br_startblock == DELAYSTARTBLOCK &&
4325
mval[-1].br_startblock == DELAYSTARTBLOCK &&
4326
mval->br_startoff ==
4327
mval[-1].br_startoff + mval[-1].br_blockcount) {
4328
mval[-1].br_blockcount += mval->br_blockcount;
4329
mval[-1].br_state = mval->br_state;
4330
} else if (!((*n == 0) &&
4331
((mval->br_startoff + mval->br_blockcount) <=
4340
* Map file blocks to filesystem blocks without allocation.
4344
struct xfs_inode *ip,
4347
struct xfs_bmbt_irec *mval,
4351
struct xfs_mount *mp = ip->i_mount;
4352
struct xfs_ifork *ifp;
4353
struct xfs_bmbt_irec got;
4354
struct xfs_bmbt_irec prev;
4361
int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4362
XFS_ATTR_FORK : XFS_DATA_FORK;
4365
ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
4366
XFS_BMAPI_IGSTATE)));
4368
if (unlikely(XFS_TEST_ERROR(
4369
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4370
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4371
mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4372
XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
4373
return XFS_ERROR(EFSCORRUPTED);
4376
if (XFS_FORCED_SHUTDOWN(mp))
4377
return XFS_ERROR(EIO);
4379
XFS_STATS_INC(xs_blk_mapr);
4381
ifp = XFS_IFORK_PTR(ip, whichfork);
4382
ASSERT(ifp->if_ext_max ==
4383
XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
4385
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4386
error = xfs_iread_extents(NULL, ip, whichfork);
4391
xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev);
4395
while (bno < end && n < *nmap) {
4396
/* Reading past eof, act as though there's a hole up to end. */
4398
got.br_startoff = end;
4399
if (got.br_startoff > bno) {
4400
/* Reading in a hole. */
4401
mval->br_startoff = bno;
4402
mval->br_startblock = HOLESTARTBLOCK;
4403
mval->br_blockcount =
4404
XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4405
mval->br_state = XFS_EXT_NORM;
4406
bno += mval->br_blockcount;
4407
len -= mval->br_blockcount;
4413
/* set up the extent map to return. */
4414
xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4415
xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4417
/* If we're done, stop now. */
4418
if (bno >= end || n >= *nmap)
4421
/* Else go on to the next record. */
4422
if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
4423
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
4432
xfs_bmapi_reserve_delalloc(
4433
struct xfs_inode *ip,
4436
struct xfs_bmbt_irec *got,
4437
struct xfs_bmbt_irec *prev,
4438
xfs_extnum_t *lastx,
4441
struct xfs_mount *mp = ip->i_mount;
4442
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4444
xfs_extlen_t indlen;
4445
char rt = XFS_IS_REALTIME_INODE(ip);
4449
alen = XFS_FILBLKS_MIN(len, MAXEXTLEN);
4451
alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4453
/* Figure out the extent size, adjust alen */
4454
extsz = xfs_get_extsz_hint(ip);
4457
* Make sure we don't exceed a single extent length when we
4458
* align the extent by reducing length we are going to
4459
* allocate by the maximum amount extent size aligment may
4462
alen = XFS_FILBLKS_MIN(len, MAXEXTLEN - (2 * extsz - 1));
4463
error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,
4464
1, 0, &aoff, &alen);
4469
extsz = alen / mp->m_sb.sb_rextsize;
4472
* Make a transaction-less quota reservation for delayed allocation
4473
* blocks. This number gets adjusted later. We return if we haven't
4474
* allocated blocks already inside this loop.
4476
error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
4477
rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4482
* Split changing sb for alen and indlen since they could be coming
4483
* from different places.
4485
indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4489
error = xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
4490
-((int64_t)extsz), 0);
4492
error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
4493
-((int64_t)alen), 0);
4497
goto out_unreserve_quota;
4499
error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
4500
-((int64_t)indlen), 0);
4502
goto out_unreserve_blocks;
4505
ip->i_delayed_blks += alen;
4507
got->br_startoff = aoff;
4508
got->br_startblock = nullstartblock(indlen);
4509
got->br_blockcount = alen;
4510
got->br_state = XFS_EXT_NORM;
4511
xfs_bmap_add_extent_hole_delay(ip, lastx, got);
4514
* Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
4515
* might have merged it into one of the neighbouring ones.
4517
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
4519
ASSERT(got->br_startoff <= aoff);
4520
ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
4521
ASSERT(isnullstartblock(got->br_startblock));
4522
ASSERT(got->br_state == XFS_EXT_NORM);
4525
out_unreserve_blocks:
4527
xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, extsz, 0);
4529
xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, alen, 0);
4530
out_unreserve_quota:
4531
if (XFS_IS_QUOTA_ON(mp))
4532
xfs_trans_unreserve_quota_nblks(NULL, ip, alen, 0, rt ?
4533
XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4538
* Map file blocks to filesystem blocks, adding delayed allocations as needed.
4542
struct xfs_inode *ip, /* incore inode */
4543
xfs_fileoff_t bno, /* starting file offs. mapped */
4544
xfs_filblks_t len, /* length to map in file */
4545
struct xfs_bmbt_irec *mval, /* output: map values */
4546
int *nmap, /* i/o: mval size/count */
4547
int flags) /* XFS_BMAPI_... */
4549
struct xfs_mount *mp = ip->i_mount;
4550
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4551
struct xfs_bmbt_irec got; /* current file extent record */
4552
struct xfs_bmbt_irec prev; /* previous file extent record */
4553
xfs_fileoff_t obno; /* old block number (offset) */
4554
xfs_fileoff_t end; /* end of mapped file region */
4555
xfs_extnum_t lastx; /* last useful extent number */
4556
int eof; /* we've hit the end of extents */
4557
int n = 0; /* current extent index */
4561
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4562
ASSERT(!(flags & ~XFS_BMAPI_ENTIRE));
4564
if (unlikely(XFS_TEST_ERROR(
4565
(XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
4566
XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
4567
mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4568
XFS_ERROR_REPORT("xfs_bmapi_delay", XFS_ERRLEVEL_LOW, mp);
4569
return XFS_ERROR(EFSCORRUPTED);
4572
if (XFS_FORCED_SHUTDOWN(mp))
4573
return XFS_ERROR(EIO);
4575
XFS_STATS_INC(xs_blk_mapw);
4577
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4578
error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
4583
xfs_bmap_search_extents(ip, bno, XFS_DATA_FORK, &eof, &lastx, &got, &prev);
4587
while (bno < end && n < *nmap) {
4588
if (eof || got.br_startoff > bno) {
4589
error = xfs_bmapi_reserve_delalloc(ip, bno, len, &got,
4590
&prev, &lastx, eof);
4600
/* set up the extent map to return. */
4601
xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4602
xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4604
/* If we're done, stop now. */
4605
if (bno >= end || n >= *nmap)
4608
/* Else go on to the next record. */
4610
if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
4611
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
4623
struct xfs_bmalloca *bma,
4626
struct xfs_mount *mp = bma->ip->i_mount;
4627
int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4628
XFS_ATTR_FORK : XFS_DATA_FORK;
4629
struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4630
int tmp_logflags = 0;
4634
ASSERT(bma->length > 0);
4636
rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);
4639
* For the wasdelay case, we could also just allocate the stuff asked
4640
* for in this bmap call but that wouldn't be as good.
4643
bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4644
bma->offset = bma->got.br_startoff;
4645
if (bma->idx != NULLEXTNUM && bma->idx) {
4646
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
4650
bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4652
bma->length = XFS_FILBLKS_MIN(bma->length,
4653
bma->got.br_startoff - bma->offset);
4657
* Indicate if this is the first user data in the file, or just any
4660
if (!(flags & XFS_BMAPI_METADATA)) {
4661
bma->userdata = (bma->offset == 0) ?
4662
XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA;
4665
bma->minlen = (flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4668
* Only want to do the alignment at the eof if it is userdata and
4669
* allocation length is larger than a stripe unit.
4671
if (mp->m_dalign && bma->length >= mp->m_dalign &&
4672
!(flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4673
error = xfs_bmap_isaeof(bma, whichfork);
4678
error = xfs_bmap_alloc(bma);
4682
if (bma->flist->xbf_low)
4685
bma->cur->bc_private.b.firstblock = *bma->firstblock;
4686
if (bma->blkno == NULLFSBLOCK)
4688
if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4689
bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4690
bma->cur->bc_private.b.firstblock = *bma->firstblock;
4691
bma->cur->bc_private.b.flist = bma->flist;
4694
* Bump the number of extents we've allocated
4700
bma->cur->bc_private.b.flags =
4701
bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4703
bma->got.br_startoff = bma->offset;
4704
bma->got.br_startblock = bma->blkno;
4705
bma->got.br_blockcount = bma->length;
4706
bma->got.br_state = XFS_EXT_NORM;
4709
* A wasdelay extent has been initialized, so shouldn't be flagged
4712
if (!bma->wasdel && (flags & XFS_BMAPI_PREALLOC) &&
4713
xfs_sb_version_hasextflgbit(&mp->m_sb))
4714
bma->got.br_state = XFS_EXT_UNWRITTEN;
4717
error = xfs_bmap_add_extent_delay_real(bma);
4719
error = xfs_bmap_add_extent_hole_real(bma, whichfork);
4721
bma->logflags |= tmp_logflags;
4726
* Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4727
* or xfs_bmap_add_extent_hole_real might have merged it into one of
4728
* the neighbouring ones.
4730
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4732
ASSERT(bma->got.br_startoff <= bma->offset);
4733
ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4734
bma->offset + bma->length);
4735
ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4736
bma->got.br_state == XFS_EXT_UNWRITTEN);
4741
xfs_bmapi_convert_unwritten(
4742
struct xfs_bmalloca *bma,
4743
struct xfs_bmbt_irec *mval,
4747
int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4748
XFS_ATTR_FORK : XFS_DATA_FORK;
4749
struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4750
int tmp_logflags = 0;
4753
/* check if we need to do unwritten->real conversion */
4754
if (mval->br_state == XFS_EXT_UNWRITTEN &&
4755
(flags & XFS_BMAPI_PREALLOC))
4758
/* check if we need to do real->unwritten conversion */
4759
if (mval->br_state == XFS_EXT_NORM &&
4760
(flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4761
(XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4765
* Modify (by adding) the state flag, if writing.
4767
ASSERT(mval->br_blockcount <= len);
4768
if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4769
bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4770
bma->ip, whichfork);
4771
bma->cur->bc_private.b.firstblock = *bma->firstblock;
4772
bma->cur->bc_private.b.flist = bma->flist;
4774
mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4775
? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4777
error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx,
4778
&bma->cur, mval, bma->firstblock, bma->flist,
4780
bma->logflags |= tmp_logflags;
4785
* Update our extent pointer, given that
4786
* xfs_bmap_add_extent_unwritten_real might have merged it into one
4787
* of the neighbouring ones.
4789
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4792
* We may have combined previously unwritten space with written space,
4793
* so generate another request.
4795
if (mval->br_blockcount < len)
4801
* Map file blocks to filesystem blocks, and allocate blocks or convert the
4802
* extent state if necessary. Details behaviour is controlled by the flags
4803
* parameter. Only allocates blocks from a single allocation group, to avoid
4806
* The returned value in "firstblock" from the first call in a transaction
4807
* must be remembered and presented to subsequent calls in "firstblock".
4808
* An upper bound for the number of blocks to be allocated is supplied to
4809
* the first call in "total"; if no allocation group has that many free
4810
* blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4814
struct xfs_trans *tp, /* transaction pointer */
4815
struct xfs_inode *ip, /* incore inode */
4816
xfs_fileoff_t bno, /* starting file offs. mapped */
4817
xfs_filblks_t len, /* length to map in file */
4818
int flags, /* XFS_BMAPI_... */
4819
xfs_fsblock_t *firstblock, /* first allocated block
4820
controls a.g. for allocs */
4821
xfs_extlen_t total, /* total blocks needed */
4822
struct xfs_bmbt_irec *mval, /* output: map values */
4823
int *nmap, /* i/o: mval size/count */
4824
struct xfs_bmap_free *flist) /* i/o: list extents to free */
4826
struct xfs_mount *mp = ip->i_mount;
4827
struct xfs_ifork *ifp;
4828
struct xfs_bmalloca bma = { 0 }; /* args for xfs_bmap_alloc */
4829
xfs_fileoff_t end; /* end of mapped file region */
4830
int eof; /* after the end of extents */
4831
int error; /* error return */
4832
int n; /* current extent index */
4833
xfs_fileoff_t obno; /* old block number (offset) */
4834
int whichfork; /* data or attr fork */
4835
char inhole; /* current location is hole in file */
4836
char wasdelay; /* old extent was delayed */
4839
xfs_fileoff_t orig_bno; /* original block number value */
4840
int orig_flags; /* original flags arg value */
4841
xfs_filblks_t orig_len; /* original value of len arg */
4842
struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4843
int orig_nmap; /* original value of *nmap */
4853
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4854
ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4858
whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4859
XFS_ATTR_FORK : XFS_DATA_FORK;
4861
if (unlikely(XFS_TEST_ERROR(
4862
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4863
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4864
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL),
4865
mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4866
XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4867
return XFS_ERROR(EFSCORRUPTED);
4870
if (XFS_FORCED_SHUTDOWN(mp))
4871
return XFS_ERROR(EIO);
4873
ifp = XFS_IFORK_PTR(ip, whichfork);
4874
ASSERT(ifp->if_ext_max ==
4875
XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
4877
XFS_STATS_INC(xs_blk_mapw);
4879
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4880
error = xfs_bmap_local_to_extents(tp, ip, firstblock, total,
4881
&bma.logflags, whichfork);
4886
if (*firstblock == NULLFSBLOCK) {
4887
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4888
bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4895
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4896
error = xfs_iread_extents(tp, ip, whichfork);
4901
xfs_bmap_search_extents(ip, bno, whichfork, &eof, &bma.idx, &bma.got,
4912
bma.firstblock = firstblock;
4914
while (bno < end && n < *nmap) {
4915
inhole = eof || bma.got.br_startoff > bno;
4916
wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
4919
* First, deal with the hole before the allocated space
4920
* that we found, if any.
4922
if (inhole || wasdelay) {
4924
bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4925
bma.wasdel = wasdelay;
4929
* There's a 32/64 bit type mismatch between the
4930
* allocation length request (which can be 64 bits in
4931
* length) and the bma length request, which is
4932
* xfs_extlen_t and therefore 32 bits. Hence we have to
4933
* check for 32-bit overflows and handle them here.
4935
if (len > (xfs_filblks_t)MAXEXTLEN)
4936
bma.length = MAXEXTLEN;
4941
ASSERT(bma.length > 0);
4942
error = xfs_bmapi_allocate(&bma, flags);
4945
if (bma.blkno == NULLFSBLOCK)
4949
/* Deal with the allocated space we found. */
4950
xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4953
/* Execute unwritten extent conversion if necessary */
4954
error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4955
if (error == EAGAIN)
4960
/* update the extent map to return */
4961
xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4964
* If we're done, stop now. Stop when we've allocated
4965
* XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4966
* the transaction may get too big.
4968
if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4971
/* Else go on to the next record. */
4973
if (++bma.idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) {
4974
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx),
4982
* Transform from btree to extents, give it cur.
4984
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
4985
XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
4986
int tmp_logflags = 0;
4989
error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4990
&tmp_logflags, whichfork);
4991
bma.logflags |= tmp_logflags;
4995
ASSERT(ifp->if_ext_max ==
4996
XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
4997
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4998
XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
5002
* Log everything. Do this after conversion, there's no point in
5003
* logging the extent records if we've converted to btree format.
5005
if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
5006
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5007
bma.logflags &= ~xfs_ilog_fext(whichfork);
5008
else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
5009
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5010
bma.logflags &= ~xfs_ilog_fbroot(whichfork);
5012
* Log whatever the flags say, even if error. Otherwise we might miss
5013
* detecting a case where the data is changed, there's an error,
5014
* and it's not logged so we don't shutdown when we should.
5017
xfs_trans_log_inode(tp, ip, bma.logflags);
5021
ASSERT(*firstblock == NULLFSBLOCK ||
5022
XFS_FSB_TO_AGNO(mp, *firstblock) ==
5024
bma.cur->bc_private.b.firstblock) ||
5026
XFS_FSB_TO_AGNO(mp, *firstblock) <
5028
bma.cur->bc_private.b.firstblock)));
5029
*firstblock = bma.cur->bc_private.b.firstblock;
5031
xfs_btree_del_cursor(bma.cur,
5032
error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5035
xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
5041
* Unmap (remove) blocks from a file.
5042
* If nexts is nonzero then the number of extents to remove is limited to
5043
* that value. If not all extents in the block range can be removed then
5048
xfs_trans_t *tp, /* transaction pointer */
5049
struct xfs_inode *ip, /* incore inode */
5050
xfs_fileoff_t bno, /* starting offset to unmap */
5051
xfs_filblks_t len, /* length to unmap in file */
5052
int flags, /* misc flags */
5053
xfs_extnum_t nexts, /* number of extents max */
5054
xfs_fsblock_t *firstblock, /* first allocated block
5055
controls a.g. for allocs */
5056
xfs_bmap_free_t *flist, /* i/o: list extents to free */
5057
int *done) /* set if not done yet */
5059
xfs_btree_cur_t *cur; /* bmap btree cursor */
5060
xfs_bmbt_irec_t del; /* extent being deleted */
5061
int eof; /* is deleting at eof */
5062
xfs_bmbt_rec_host_t *ep; /* extent record pointer */
5063
int error; /* error return value */
5064
xfs_extnum_t extno; /* extent number in list */
5065
xfs_bmbt_irec_t got; /* current extent record */
5066
xfs_ifork_t *ifp; /* inode fork pointer */
5067
int isrt; /* freeing in rt area */
5068
xfs_extnum_t lastx; /* last extent index used */
5069
int logflags; /* transaction logging flags */
5070
xfs_extlen_t mod; /* rt extent offset */
5071
xfs_mount_t *mp; /* mount structure */
5072
xfs_extnum_t nextents; /* number of file extents */
5073
xfs_bmbt_irec_t prev; /* previous extent record */
5074
xfs_fileoff_t start; /* first file offset deleted */
5075
int tmp_logflags; /* partial logging flags */
5076
int wasdel; /* was a delayed alloc extent */
5077
int whichfork; /* data or attribute fork */
5080
trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5082
whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
5083
XFS_ATTR_FORK : XFS_DATA_FORK;
5084
ifp = XFS_IFORK_PTR(ip, whichfork);
5086
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5087
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5088
XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5090
return XFS_ERROR(EFSCORRUPTED);
5093
if (XFS_FORCED_SHUTDOWN(mp))
5094
return XFS_ERROR(EIO);
5098
ASSERT(ifp->if_ext_max ==
5099
XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5100
if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5101
(error = xfs_iread_extents(tp, ip, whichfork)))
5103
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5104
if (nextents == 0) {
5108
XFS_STATS_INC(xs_blk_unmap);
5109
isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5111
bno = start + len - 1;
5112
ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5116
* Check to see if the given block number is past the end of the
5117
* file, back up to the last block if so...
5120
ep = xfs_iext_get_ext(ifp, --lastx);
5121
xfs_bmbt_get_all(ep, &got);
5122
bno = got.br_startoff + got.br_blockcount - 1;
5125
if (ifp->if_flags & XFS_IFBROOT) {
5126
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5127
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5128
cur->bc_private.b.firstblock = *firstblock;
5129
cur->bc_private.b.flist = flist;
5130
cur->bc_private.b.flags = 0;
5134
while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5135
(nexts == 0 || extno < nexts)) {
5137
* Is the found extent after a hole in which bno lives?
5138
* Just back up to the previous extent, if so.
5140
if (got.br_startoff > bno) {
5143
ep = xfs_iext_get_ext(ifp, lastx);
5144
xfs_bmbt_get_all(ep, &got);
5147
* Is the last block of this extent before the range
5148
* we're supposed to delete? If so, we're done.
5150
bno = XFS_FILEOFF_MIN(bno,
5151
got.br_startoff + got.br_blockcount - 1);
5155
* Then deal with the (possibly delayed) allocated space
5160
wasdel = isnullstartblock(del.br_startblock);
5161
if (got.br_startoff < start) {
5162
del.br_startoff = start;
5163
del.br_blockcount -= start - got.br_startoff;
5165
del.br_startblock += start - got.br_startoff;
5167
if (del.br_startoff + del.br_blockcount > bno + 1)
5168
del.br_blockcount = bno + 1 - del.br_startoff;
5169
sum = del.br_startblock + del.br_blockcount;
5171
(mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5173
* Realtime extent not lined up at the end.
5174
* The extent could have been split into written
5175
* and unwritten pieces, or we could just be
5176
* unmapping part of it. But we can't really
5177
* get rid of part of a realtime extent.
5179
if (del.br_state == XFS_EXT_UNWRITTEN ||
5180
!xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5182
* This piece is unwritten, or we're not
5183
* using unwritten extents. Skip over it.
5186
bno -= mod > del.br_blockcount ?
5187
del.br_blockcount : mod;
5188
if (bno < got.br_startoff) {
5190
xfs_bmbt_get_all(xfs_iext_get_ext(
5196
* It's written, turn it unwritten.
5197
* This is better than zeroing it.
5199
ASSERT(del.br_state == XFS_EXT_NORM);
5200
ASSERT(xfs_trans_get_block_res(tp) > 0);
5202
* If this spans a realtime extent boundary,
5203
* chop it back to the start of the one we end at.
5205
if (del.br_blockcount > mod) {
5206
del.br_startoff += del.br_blockcount - mod;
5207
del.br_startblock += del.br_blockcount - mod;
5208
del.br_blockcount = mod;
5210
del.br_state = XFS_EXT_UNWRITTEN;
5211
error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5212
&lastx, &cur, &del, firstblock, flist,
5218
if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5220
* Realtime extent is lined up at the end but not
5221
* at the front. We'll get rid of full extents if
5224
mod = mp->m_sb.sb_rextsize - mod;
5225
if (del.br_blockcount > mod) {
5226
del.br_blockcount -= mod;
5227
del.br_startoff += mod;
5228
del.br_startblock += mod;
5229
} else if ((del.br_startoff == start &&
5230
(del.br_state == XFS_EXT_UNWRITTEN ||
5231
xfs_trans_get_block_res(tp) == 0)) ||
5232
!xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5234
* Can't make it unwritten. There isn't
5235
* a full extent here so just skip it.
5237
ASSERT(bno >= del.br_blockcount);
5238
bno -= del.br_blockcount;
5239
if (got.br_startoff > bno) {
5241
ep = xfs_iext_get_ext(ifp,
5243
xfs_bmbt_get_all(ep, &got);
5247
} else if (del.br_state == XFS_EXT_UNWRITTEN) {
5249
* This one is already unwritten.
5250
* It must have a written left neighbor.
5251
* Unwrite the killed part of that one and
5255
xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
5257
ASSERT(prev.br_state == XFS_EXT_NORM);
5258
ASSERT(!isnullstartblock(prev.br_startblock));
5259
ASSERT(del.br_startblock ==
5260
prev.br_startblock + prev.br_blockcount);
5261
if (prev.br_startoff < start) {
5262
mod = start - prev.br_startoff;
5263
prev.br_blockcount -= mod;
5264
prev.br_startblock += mod;
5265
prev.br_startoff = start;
5267
prev.br_state = XFS_EXT_UNWRITTEN;
5269
error = xfs_bmap_add_extent_unwritten_real(tp,
5270
ip, &lastx, &cur, &prev,
5271
firstblock, flist, &logflags);
5276
ASSERT(del.br_state == XFS_EXT_NORM);
5277
del.br_state = XFS_EXT_UNWRITTEN;
5278
error = xfs_bmap_add_extent_unwritten_real(tp,
5279
ip, &lastx, &cur, &del,
5280
firstblock, flist, &logflags);
5287
ASSERT(startblockval(del.br_startblock) > 0);
5288
/* Update realtime/data freespace, unreserve quota */
5290
xfs_filblks_t rtexts;
5292
rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5293
do_div(rtexts, mp->m_sb.sb_rextsize);
5294
xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
5295
(int64_t)rtexts, 0);
5296
(void)xfs_trans_reserve_quota_nblks(NULL,
5297
ip, -((long)del.br_blockcount), 0,
5298
XFS_QMOPT_RES_RTBLKS);
5300
xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
5301
(int64_t)del.br_blockcount, 0);
5302
(void)xfs_trans_reserve_quota_nblks(NULL,
5303
ip, -((long)del.br_blockcount), 0,
5304
XFS_QMOPT_RES_REGBLKS);
5306
ip->i_delayed_blks -= del.br_blockcount;
5308
cur->bc_private.b.flags |=
5309
XFS_BTCUR_BPRV_WASDEL;
5311
cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5313
* If it's the case where the directory code is running
5314
* with no block reservation, and the deleted block is in
5315
* the middle of its extent, and the resulting insert
5316
* of an extent would cause transformation to btree format,
5317
* then reject it. The calling code will then swap
5318
* blocks around instead.
5319
* We have to do this now, rather than waiting for the
5320
* conversion to btree format, since the transaction
5323
if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
5324
XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5325
XFS_IFORK_NEXTENTS(ip, whichfork) >= ifp->if_ext_max &&
5326
del.br_startoff > got.br_startoff &&
5327
del.br_startoff + del.br_blockcount <
5328
got.br_startoff + got.br_blockcount) {
5329
error = XFS_ERROR(ENOSPC);
5332
error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del,
5333
&tmp_logflags, whichfork);
5334
logflags |= tmp_logflags;
5337
bno = del.br_startoff - 1;
5340
* If not done go on to the next (previous) record.
5342
if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5344
ep = xfs_iext_get_ext(ifp, lastx);
5345
if (xfs_bmbt_get_startoff(ep) > bno) {
5347
ep = xfs_iext_get_ext(ifp,
5350
xfs_bmbt_get_all(ep, &got);
5355
*done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
5356
ASSERT(ifp->if_ext_max ==
5357
XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5359
* Convert to a btree if necessary.
5361
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5362
XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
5363
ASSERT(cur == NULL);
5364
error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
5365
&cur, 0, &tmp_logflags, whichfork);
5366
logflags |= tmp_logflags;
5371
* transform from btree to extents, give it cur
5373
else if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
5374
XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
5375
ASSERT(cur != NULL);
5376
error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5378
logflags |= tmp_logflags;
5383
* transform from extents to local?
5385
ASSERT(ifp->if_ext_max ==
5386
XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5390
* Log everything. Do this after conversion, there's no point in
5391
* logging the extent records if we've converted to btree format.
5393
if ((logflags & xfs_ilog_fext(whichfork)) &&
5394
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5395
logflags &= ~xfs_ilog_fext(whichfork);
5396
else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5397
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5398
logflags &= ~xfs_ilog_fbroot(whichfork);
5400
* Log inode even in the error case, if the transaction
5401
* is dirty we'll need to shut down the filesystem.
5404
xfs_trans_log_inode(tp, ip, logflags);
5407
*firstblock = cur->bc_private.b.firstblock;
5408
cur->bc_private.b.allocated = 0;
5410
xfs_btree_del_cursor(cur,
5411
error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5417
* returns 1 for success, 0 if we failed to map the extent.
5420
xfs_getbmapx_fix_eof_hole(
5421
xfs_inode_t *ip, /* xfs incore inode pointer */
5422
struct getbmapx *out, /* output structure */
5423
int prealloced, /* this is a file with
5424
* preallocated data space */
5425
__int64_t end, /* last block requested */
5426
xfs_fsblock_t startblock)
5429
xfs_mount_t *mp; /* file system mount point */
5430
xfs_ifork_t *ifp; /* inode fork pointer */
5431
xfs_extnum_t lastx; /* last extent pointer */
5432
xfs_fileoff_t fileblock;
5434
if (startblock == HOLESTARTBLOCK) {
5436
out->bmv_block = -1;
5437
fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, ip->i_size));
5438
fixlen -= out->bmv_offset;
5439
if (prealloced && out->bmv_offset + out->bmv_length == end) {
5440
/* Came to hole at EOF. Trim it. */
5443
out->bmv_length = fixlen;
5446
if (startblock == DELAYSTARTBLOCK)
5447
out->bmv_block = -2;
5449
out->bmv_block = xfs_fsb_to_db(ip, startblock);
5450
fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
5451
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
5452
if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
5453
(lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
5454
out->bmv_oflags |= BMV_OF_LAST;
5461
* Get inode's extents as described in bmv, and format for output.
5462
* Calls formatter to fill the user's buffer until all extents
5463
* are mapped, until the passed-in bmv->bmv_count slots have
5464
* been filled, or until the formatter short-circuits the loop,
5465
* if it is tracking filled-in extents on its own.
5467
int /* error code */
5470
struct getbmapx *bmv, /* user bmap structure */
5471
xfs_bmap_format_t formatter, /* format to user */
5472
void *arg) /* formatter arg */
5474
__int64_t bmvend; /* last block requested */
5475
int error = 0; /* return value */
5476
__int64_t fixlen; /* length for -1 case */
5477
int i; /* extent number */
5478
int lock; /* lock state */
5479
xfs_bmbt_irec_t *map; /* buffer for user's data */
5480
xfs_mount_t *mp; /* file system mount point */
5481
int nex; /* # of user extents can do */
5482
int nexleft; /* # of user extents left */
5483
int subnex; /* # of bmapi's can do */
5484
int nmap; /* number of map entries */
5485
struct getbmapx *out; /* output structure */
5486
int whichfork; /* data or attr fork */
5487
int prealloced; /* this is a file with
5488
* preallocated data space */
5489
int iflags; /* interface flags */
5490
int bmapi_flags; /* flags for xfs_bmapi */
5494
iflags = bmv->bmv_iflags;
5495
whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
5497
if (whichfork == XFS_ATTR_FORK) {
5498
if (XFS_IFORK_Q(ip)) {
5499
if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
5500
ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
5501
ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
5502
return XFS_ERROR(EINVAL);
5503
} else if (unlikely(
5504
ip->i_d.di_aformat != 0 &&
5505
ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
5506
XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
5508
return XFS_ERROR(EFSCORRUPTED);
5514
if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
5515
ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
5516
ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
5517
return XFS_ERROR(EINVAL);
5519
if (xfs_get_extsz_hint(ip) ||
5520
ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
5522
fixlen = XFS_MAXIOFFSET(mp);
5525
fixlen = ip->i_size;
5529
if (bmv->bmv_length == -1) {
5530
fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
5532
max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
5533
} else if (bmv->bmv_length == 0) {
5534
bmv->bmv_entries = 0;
5536
} else if (bmv->bmv_length < 0) {
5537
return XFS_ERROR(EINVAL);
5540
nex = bmv->bmv_count - 1;
5542
return XFS_ERROR(EINVAL);
5543
bmvend = bmv->bmv_offset + bmv->bmv_length;
5546
if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
5547
return XFS_ERROR(ENOMEM);
5548
out = kmem_zalloc(bmv->bmv_count * sizeof(struct getbmapx), KM_MAYFAIL);
5550
return XFS_ERROR(ENOMEM);
5552
xfs_ilock(ip, XFS_IOLOCK_SHARED);
5553
if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
5554
if (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size) {
5555
error = xfs_flush_pages(ip, 0, -1, 0, FI_REMAPF);
5557
goto out_unlock_iolock;
5560
* even after flushing the inode, there can still be delalloc
5561
* blocks on the inode beyond EOF due to speculative
5562
* preallocation. These are not removed until the release
5563
* function is called or the inode is inactivated. Hence we
5564
* cannot assert here that ip->i_delayed_blks == 0.
5568
lock = xfs_ilock_map_shared(ip);
5571
* Don't let nex be bigger than the number of extents
5572
* we can have assuming alternating holes and real extents.
5574
if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
5575
nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
5577
bmapi_flags = xfs_bmapi_aflag(whichfork);
5578
if (!(iflags & BMV_IF_PREALLOC))
5579
bmapi_flags |= XFS_BMAPI_IGSTATE;
5582
* Allocate enough space to handle "subnex" maps at a time.
5586
map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
5588
goto out_unlock_ilock;
5590
bmv->bmv_entries = 0;
5592
if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
5593
(whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
5601
nmap = (nexleft > subnex) ? subnex : nexleft;
5602
error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
5603
XFS_BB_TO_FSB(mp, bmv->bmv_length),
5604
map, &nmap, bmapi_flags);
5607
ASSERT(nmap <= subnex);
5609
for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
5610
out[cur_ext].bmv_oflags = 0;
5611
if (map[i].br_state == XFS_EXT_UNWRITTEN)
5612
out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
5613
else if (map[i].br_startblock == DELAYSTARTBLOCK)
5614
out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
5615
out[cur_ext].bmv_offset =
5616
XFS_FSB_TO_BB(mp, map[i].br_startoff);
5617
out[cur_ext].bmv_length =
5618
XFS_FSB_TO_BB(mp, map[i].br_blockcount);
5619
out[cur_ext].bmv_unused1 = 0;
5620
out[cur_ext].bmv_unused2 = 0;
5621
ASSERT(((iflags & BMV_IF_DELALLOC) != 0) ||
5622
(map[i].br_startblock != DELAYSTARTBLOCK));
5623
if (map[i].br_startblock == HOLESTARTBLOCK &&
5624
whichfork == XFS_ATTR_FORK) {
5625
/* came to the end of attribute fork */
5626
out[cur_ext].bmv_oflags |= BMV_OF_LAST;
5630
if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
5632
map[i].br_startblock))
5636
out[cur_ext].bmv_offset +
5637
out[cur_ext].bmv_length;
5639
max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
5642
* In case we don't want to return the hole,
5643
* don't increase cur_ext so that we can reuse
5644
* it in the next loop.
5646
if ((iflags & BMV_IF_NO_HOLES) &&
5647
map[i].br_startblock == HOLESTARTBLOCK) {
5648
memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
5656
} while (nmap && nexleft && bmv->bmv_length);
5661
xfs_iunlock_map_shared(ip, lock);
5663
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
5665
for (i = 0; i < cur_ext; i++) {
5666
int full = 0; /* user array is full */
5668
/* format results & advance arg */
5669
error = formatter(&arg, &out[i], &full);
5679
STATIC struct xfs_buf *
5681
struct xfs_btree_cur *cur,
5684
struct xfs_log_item_desc *lidp;
5690
for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
5691
if (!cur->bc_bufs[i])
5693
if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
5694
return cur->bc_bufs[i];
5697
/* Chase down all the log items to see if the bp is there */
5698
list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
5699
struct xfs_buf_log_item *bip;
5700
bip = (struct xfs_buf_log_item *)lidp->lid_item;
5701
if (bip->bli_item.li_type == XFS_LI_BUF &&
5702
XFS_BUF_ADDR(bip->bli_buf) == bno)
5703
return bip->bli_buf;
5711
struct xfs_btree_block *block,
5717
__be64 *pp, *thispa; /* pointer to block address */
5718
xfs_bmbt_key_t *prevp, *keyp;
5720
ASSERT(be16_to_cpu(block->bb_level) > 0);
5723
for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
5724
dmxr = mp->m_bmap_dmxr[0];
5725
keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
5728
ASSERT(be64_to_cpu(prevp->br_startoff) <
5729
be64_to_cpu(keyp->br_startoff));
5734
* Compare the block numbers to see if there are dups.
5737
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
5739
pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
5741
for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
5743
thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
5745
thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
5746
if (*thispa == *pp) {
5747
xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
5749
(unsigned long long)be64_to_cpu(*thispa));
5750
panic("%s: ptrs are equal in node\n",
5758
* Check that the extents for the inode ip are in the right order in all
5763
xfs_bmap_check_leaf_extents(
5764
xfs_btree_cur_t *cur, /* btree cursor or null */
5765
xfs_inode_t *ip, /* incore inode pointer */
5766
int whichfork) /* data or attr fork */
5768
struct xfs_btree_block *block; /* current btree block */
5769
xfs_fsblock_t bno; /* block # of "block" */
5770
xfs_buf_t *bp; /* buffer for "block" */
5771
int error; /* error return value */
5772
xfs_extnum_t i=0, j; /* index into the extents list */
5773
xfs_ifork_t *ifp; /* fork structure */
5774
int level; /* btree level, for checking */
5775
xfs_mount_t *mp; /* file system mount structure */
5776
__be64 *pp; /* pointer to block address */
5777
xfs_bmbt_rec_t *ep; /* pointer to current extent */
5778
xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
5779
xfs_bmbt_rec_t *nextp; /* pointer to next extent */
5782
if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
5788
ifp = XFS_IFORK_PTR(ip, whichfork);
5789
block = ifp->if_broot;
5791
* Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
5793
level = be16_to_cpu(block->bb_level);
5795
xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
5796
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
5797
bno = be64_to_cpu(*pp);
5799
ASSERT(bno != NULLDFSBNO);
5800
ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
5801
ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
5804
* Go down the tree until leaf level is reached, following the first
5805
* pointer (leftmost) at each level.
5807
while (level-- > 0) {
5808
/* See if buf is in cur first */
5809
bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
5815
if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
5816
XFS_BMAP_BTREE_REF)))
5818
block = XFS_BUF_TO_BLOCK(bp);
5819
XFS_WANT_CORRUPTED_GOTO(
5820
xfs_bmap_sanity_check(mp, bp, level),
5826
* Check this block for basic sanity (increasing keys and
5827
* no duplicate blocks).
5830
xfs_check_block(block, mp, 0, 0);
5831
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
5832
bno = be64_to_cpu(*pp);
5833
XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
5836
xfs_trans_brelse(NULL, bp);
5841
* Here with bp and block set to the leftmost leaf node in the tree.
5846
* Loop over all leaf nodes checking that all extents are in the right order.
5849
xfs_fsblock_t nextbno;
5850
xfs_extnum_t num_recs;
5853
num_recs = xfs_btree_get_numrecs(block);
5856
* Read-ahead the next leaf block, if any.
5859
nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
5862
* Check all the extents to make sure they are OK.
5863
* If we had a previous block, the last entry should
5864
* conform with the first entry in this one.
5867
ep = XFS_BMBT_REC_ADDR(mp, block, 1);
5869
ASSERT(xfs_bmbt_disk_get_startoff(&last) +
5870
xfs_bmbt_disk_get_blockcount(&last) <=
5871
xfs_bmbt_disk_get_startoff(ep));
5873
for (j = 1; j < num_recs; j++) {
5874
nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
5875
ASSERT(xfs_bmbt_disk_get_startoff(ep) +
5876
xfs_bmbt_disk_get_blockcount(ep) <=
5877
xfs_bmbt_disk_get_startoff(nextp));
5885
xfs_trans_brelse(NULL, bp);
5889
* If we've reached the end, stop.
5891
if (bno == NULLFSBLOCK)
5894
bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
5900
if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
5901
XFS_BMAP_BTREE_REF)))
5903
block = XFS_BUF_TO_BLOCK(bp);
5907
xfs_trans_brelse(NULL, bp);
5912
xfs_warn(mp, "%s: at error0", __func__);
5914
xfs_trans_brelse(NULL, bp);
5916
xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
5918
panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
5924
* Count fsblocks of the given fork.
5927
xfs_bmap_count_blocks(
5928
xfs_trans_t *tp, /* transaction pointer */
5929
xfs_inode_t *ip, /* incore inode */
5930
int whichfork, /* data or attr fork */
5931
int *count) /* out: count of blocks */
5933
struct xfs_btree_block *block; /* current btree block */
5934
xfs_fsblock_t bno; /* block # of "block" */
5935
xfs_ifork_t *ifp; /* fork structure */
5936
int level; /* btree level, for checking */
5937
xfs_mount_t *mp; /* file system mount structure */
5938
__be64 *pp; /* pointer to block address */
5942
ifp = XFS_IFORK_PTR(ip, whichfork);
5943
if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
5944
xfs_bmap_count_leaves(ifp, 0,
5945
ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
5951
* Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
5953
block = ifp->if_broot;
5954
level = be16_to_cpu(block->bb_level);
5956
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
5957
bno = be64_to_cpu(*pp);
5958
ASSERT(bno != NULLDFSBNO);
5959
ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
5960
ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
5962
if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
5963
XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
5965
return XFS_ERROR(EFSCORRUPTED);
5972
* Recursively walks each level of a btree
5973
* to count total fsblocks is use.
5975
STATIC int /* error */
5976
xfs_bmap_count_tree(
5977
xfs_mount_t *mp, /* file system mount point */
5978
xfs_trans_t *tp, /* transaction pointer */
5979
xfs_ifork_t *ifp, /* inode fork pointer */
5980
xfs_fsblock_t blockno, /* file system block number */
5981
int levelin, /* level in btree */
5982
int *count) /* Count of blocks */
5985
xfs_buf_t *bp, *nbp;
5986
int level = levelin;
5988
xfs_fsblock_t bno = blockno;
5989
xfs_fsblock_t nextbno;
5990
struct xfs_btree_block *block, *nextblock;
5993
if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF)))
5996
block = XFS_BUF_TO_BLOCK(bp);
5999
/* Not at node above leaves, count this level of nodes */
6000
nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
6001
while (nextbno != NULLFSBLOCK) {
6002
if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
6003
0, &nbp, XFS_BMAP_BTREE_REF)))
6006
nextblock = XFS_BUF_TO_BLOCK(nbp);
6007
nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
6008
xfs_trans_brelse(tp, nbp);
6011
/* Dive to the next level */
6012
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
6013
bno = be64_to_cpu(*pp);
6014
if (unlikely((error =
6015
xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
6016
xfs_trans_brelse(tp, bp);
6017
XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
6018
XFS_ERRLEVEL_LOW, mp);
6019
return XFS_ERROR(EFSCORRUPTED);
6021
xfs_trans_brelse(tp, bp);
6023
/* count all level 1 nodes and their leaves */
6025
nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
6026
numrecs = be16_to_cpu(block->bb_numrecs);
6027
xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
6028
xfs_trans_brelse(tp, bp);
6029
if (nextbno == NULLFSBLOCK)
6032
if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
6033
XFS_BMAP_BTREE_REF)))
6036
block = XFS_BUF_TO_BLOCK(bp);
6043
* Count leaf blocks given a range of extent records.
6046
xfs_bmap_count_leaves(
6054
for (b = 0; b < numrecs; b++) {
6055
xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
6056
*count += xfs_bmbt_get_blockcount(frp);
6061
* Count leaf blocks given a range of extent records originally
6065
xfs_bmap_disk_count_leaves(
6066
struct xfs_mount *mp,
6067
struct xfs_btree_block *block,
6072
xfs_bmbt_rec_t *frp;
6074
for (b = 1; b <= numrecs; b++) {
6075
frp = XFS_BMBT_REC_ADDR(mp, block, b);
6076
*count += xfs_bmbt_disk_get_blockcount(frp);
6081
* dead simple method of punching delalyed allocation blocks from a range in
6082
* the inode. Walks a block at a time so will be slow, but is only executed in
6083
* rare error cases so the overhead is not critical. This will alays punch out
6084
* both the start and end blocks, even if the ranges only partially overlap
6085
* them, so it is up to the caller to ensure that partial blocks are not
6089
xfs_bmap_punch_delalloc_range(
6090
struct xfs_inode *ip,
6091
xfs_fileoff_t start_fsb,
6092
xfs_fileoff_t length)
6094
xfs_fileoff_t remaining = length;
6097
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6101
xfs_bmbt_irec_t imap;
6103
xfs_fsblock_t firstblock;
6104
xfs_bmap_free_t flist;
6107
* Map the range first and check that it is a delalloc extent
6108
* before trying to unmap the range. Otherwise we will be
6109
* trying to remove a real extent (which requires a
6110
* transaction) or a hole, which is probably a bad idea...
6112
error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
6116
/* something screwed, just bail */
6117
if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
6118
xfs_alert(ip->i_mount,
6119
"Failed delalloc mapping lookup ino %lld fsb %lld.",
6120
ip->i_ino, start_fsb);
6128
if (imap.br_startblock != DELAYSTARTBLOCK) {
6129
/* been converted, ignore */
6132
WARN_ON(imap.br_blockcount == 0);
6135
* Note: while we initialise the firstblock/flist pair, they
6136
* should never be used because blocks should never be
6137
* allocated or freed for a delalloc extent and hence we need
6138
* don't cancel or finish them after the xfs_bunmapi() call.
6140
xfs_bmap_init(&flist, &firstblock);
6141
error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
6146
ASSERT(!flist.xbf_count && !flist.xbf_first);
6150
} while(remaining > 0);