165
/* do a sync so that bulkstat will pick up inode changes
166
* that are currently in the inode cache. this is necessary
167
* for incremental dumps in order to have the dump time
168
* accurately reflect what inodes were included in this dump.
165
173
/* copy stat ptrs
167
175
inomap_statphasep = statphasep;
1118
1131
return ( addrp->hnkoff * SEGPERHNK ) + addrp->segoff;
1134
static inline intgen_t
1135
inomap_lastseg( intgen_t hnkoff )
1137
if ( hnkoff == inomap.lastseg.hnkoff )
1138
return inomap.lastseg.segoff;
1140
return SEGPERHNK - 1;
1121
1143
/* called for every inode group in the filesystem in increasing inode
1122
1144
* order. adds a new segment to the inomap and ino-to-gen map.
1239
1267
intgen_t upper;
1241
1269
if ( !inomap_validaddr( addrp ) ) {
1242
memset( addrp, 0, sizeof(seg_addr_t) );
1270
inomap_reset_context( addrp );
1245
1273
if ( !inomap_find_hnk( addrp, ino ) )
1248
1276
/* find the correct segment */
1250
upper = ( addrp->hnkoff == inomap.lastseg.hnkoff ) ?
1251
inomap.lastseg.segoff : SEGPERHNK - 1;
1278
upper = inomap_lastseg(addrp->hnkoff);
1253
1280
while ( upper >= lower ) {
1254
1281
segp = inomap_addr2seg( addrp );
1267
1294
return BOOL_FALSE;
1298
inomap_iter( void *contextp, intgen_t statemask )
1300
xfs_ino_t ino, endino;
1302
seg_addr_t *addrp = (seg_addr_t *)contextp;
1305
addrp->hnkoff <= inomap.lastseg.hnkoff;
1306
addrp->hnkoff++, addrp->segoff = 0, addrp->inooff = 0 ) {
1309
addrp->segoff <= inomap_lastseg(addrp->hnkoff);
1310
addrp->segoff++, addrp->inooff = 0 ) {
1312
segp = inomap_addr2seg( addrp );
1314
ino = segp->base + addrp->inooff;
1315
endino = segp->base + INOPERSEG;
1316
for ( ; ino < endino ; ino++, addrp->inooff++ ) {
1318
st = SEG_GET_BITS( segp, ino );
1319
if ( statemask & ( 1 << st )) {
1320
addrp->inooff++; /* for next call */
1331
inomap_next_nondir(void *contextp, xfs_ino_t lastino)
1333
intgen_t state = 1 << MAP_NDR_CHANGE;
1337
nextino = inomap_iter(contextp, state);
1338
} while (nextino <= lastino);
1344
inomap_next_dir(void *contextp, xfs_ino_t lastino)
1346
intgen_t state = (1 << MAP_DIR_CHANGE) | (1 << MAP_DIR_SUPPRT);
1350
nextino = inomap_iter(contextp, state);
1351
} while (nextino <= lastino);
1270
1356
static intgen_t
1271
1357
inomap_set_state( void *contextp, xfs_ino_t ino, intgen_t state )
1438
inomap_iter_cb( void *contextp,
1440
bool_t ( *funcp )( void *contextp,
1446
ASSERT( ! ( statemask & ( 1 << MAP_INO_UNUSED )));
1448
for ( hnkp = roothnkp ; hnkp != 0 ; hnkp = hnkp->nextp ) {
1449
seg_t *segp = hnkp->seg;
1450
seg_t *endsegp = hnkp->seg + SEGPERHNK;
1451
for ( ; segp < endsegp ; segp++ ) {
1455
if ( hnkp == tailhnkp && segp > lastsegp ) {
1459
endino = segp->base + INOPERSEG;
1460
for ( ; ino < endino ; ino++ ) {
1462
st = SEG_GET_BITS( segp, ino );
1463
if ( statemask & ( 1 << st )) {
1465
ok = ( * funcp )( contextp, ino, st );
1474
/* should not get here
1479
#endif /* NOTUSED */
1481
1522
static intgen_t
1482
1523
subtreelist_parse( jdm_fshandle_t *fshandlep,
1718
1759
if (hsm_fs_ctxtp) {
1721
if (HsmEstimateFileSpace(hsm_fs_ctxtp, statp, &bytes))
1763
/* if -z or multiple streams are being used,
1764
* we need an accurate estimate. otherwise a
1765
* quick estimate will do.
1767
accurate = maxdumpfilesize || drivecnt > 1;
1769
if (HsmEstimateFileSpace(hsm_fs_ctxtp, NULL, statp, &bytes, accurate))
1724
1772
return statp->bs_blocks * ( off64_t )statp->bs_blksize;