935
static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
938
return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
942
971
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
972
struct extent_state **cached_state, gfp_t mask)
945
return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
974
return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
975
NULL, cached_state, mask);
949
978
static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
1015
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1039
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1018
1041
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1023
* helper function to set pages and extents in the tree dirty
1025
int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
1027
unsigned long index = start >> PAGE_CACHE_SHIFT;
1028
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1031
while (index <= end_index) {
1032
page = find_get_page(tree->mapping, index);
1034
__set_page_dirty_nobuffers(page);
1035
page_cache_release(page);
1042
1046
* helper function to set both pages and extents in the tree writeback
1044
1048
static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1800
* IO done from prepare_write is pretty simple, we just unlock
1801
* the structs in the extent tree when done, and set the uptodate bits
1804
static void end_bio_extent_preparewrite(struct bio *bio, int err)
1806
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1807
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1808
struct extent_io_tree *tree;
1813
struct page *page = bvec->bv_page;
1814
tree = &BTRFS_I(page->mapping->host)->io_tree;
1816
start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1818
end = start + bvec->bv_len - 1;
1820
if (--bvec >= bio->bi_io_vec)
1821
prefetchw(&bvec->bv_page->flags);
1824
set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1826
ClearPageUptodate(page);
1830
unlock_extent(tree, start, end, GFP_ATOMIC);
1832
} while (bvec >= bio->bi_io_vec);
1838
1819
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1839
1820
gfp_t gfp_flags)
2016
2004
while (cur <= end) {
2017
2005
if (cur >= last_byte) {
2018
2006
char *userpage;
2019
iosize = PAGE_CACHE_SIZE - page_offset;
2007
struct extent_state *cached = NULL;
2009
iosize = PAGE_CACHE_SIZE - pg_offset;
2020
2010
userpage = kmap_atomic(page, KM_USER0);
2021
memset(userpage + page_offset, 0, iosize);
2011
memset(userpage + pg_offset, 0, iosize);
2022
2012
flush_dcache_page(page);
2023
2013
kunmap_atomic(userpage, KM_USER0);
2024
2014
set_extent_uptodate(tree, cur, cur + iosize - 1,
2026
unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2016
unlock_extent_cached(tree, cur, cur + iosize - 1,
2029
em = get_extent(inode, page, page_offset, cur,
2020
em = get_extent(inode, page, pg_offset, cur,
2030
2021
end - cur + 1, 0);
2031
if (IS_ERR(em) || !em) {
2022
if (IS_ERR_OR_NULL(em)) {
2032
2023
SetPageError(page);
2033
2024
unlock_extent(tree, cur, end, GFP_NOFS);
2063
2054
/* we've found a hole, just zero and go on */
2064
2055
if (block_start == EXTENT_MAP_HOLE) {
2065
2056
char *userpage;
2057
struct extent_state *cached = NULL;
2066
2059
userpage = kmap_atomic(page, KM_USER0);
2067
memset(userpage + page_offset, 0, iosize);
2060
memset(userpage + pg_offset, 0, iosize);
2068
2061
flush_dcache_page(page);
2069
2062
kunmap_atomic(userpage, KM_USER0);
2071
2064
set_extent_uptodate(tree, cur, cur + iosize - 1,
2073
unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2066
unlock_extent_cached(tree, cur, cur + iosize - 1,
2074
2068
cur = cur + iosize;
2075
page_offset += iosize;
2069
pg_offset += iosize;
2078
2072
/* the get_extent function already copied into the page */
2690
* simple commit_write call, set_range_dirty is used to mark both
2691
* the pages and the extent records as dirty
2693
int extent_commit_write(struct extent_io_tree *tree,
2694
struct inode *inode, struct page *page,
2695
unsigned from, unsigned to)
2697
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2699
set_page_extent_mapped(page);
2700
set_page_dirty(page);
2702
if (pos > inode->i_size) {
2703
i_size_write(inode, pos);
2704
mark_inode_dirty(inode);
2709
int extent_prepare_write(struct extent_io_tree *tree,
2710
struct inode *inode, struct page *page,
2711
unsigned from, unsigned to, get_extent_t *get_extent)
2713
u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2714
u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2716
u64 orig_block_start;
2719
struct extent_map *em;
2720
unsigned blocksize = 1 << inode->i_blkbits;
2721
size_t page_offset = 0;
2722
size_t block_off_start;
2723
size_t block_off_end;
2729
set_page_extent_mapped(page);
2731
block_start = (page_start + from) & ~((u64)blocksize - 1);
2732
block_end = (page_start + to - 1) | (blocksize - 1);
2733
orig_block_start = block_start;
2735
lock_extent(tree, page_start, page_end, GFP_NOFS);
2736
while (block_start <= block_end) {
2737
em = get_extent(inode, page, page_offset, block_start,
2738
block_end - block_start + 1, 1);
2739
if (IS_ERR(em) || !em)
2742
cur_end = min(block_end, extent_map_end(em) - 1);
2743
block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2744
block_off_end = block_off_start + blocksize;
2745
isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2747
if (!PageUptodate(page) && isnew &&
2748
(block_off_end > to || block_off_start < from)) {
2751
kaddr = kmap_atomic(page, KM_USER0);
2752
if (block_off_end > to)
2753
memset(kaddr + to, 0, block_off_end - to);
2754
if (block_off_start < from)
2755
memset(kaddr + block_off_start, 0,
2756
from - block_off_start);
2757
flush_dcache_page(page);
2758
kunmap_atomic(kaddr, KM_USER0);
2760
if ((em->block_start != EXTENT_MAP_HOLE &&
2761
em->block_start != EXTENT_MAP_INLINE) &&
2762
!isnew && !PageUptodate(page) &&
2763
(block_off_end > to || block_off_start < from) &&
2764
!test_range_bit(tree, block_start, cur_end,
2765
EXTENT_UPTODATE, 1, NULL)) {
2767
u64 extent_offset = block_start - em->start;
2769
sector = (em->block_start + extent_offset) >> 9;
2770
iosize = (cur_end - block_start + blocksize) &
2771
~((u64)blocksize - 1);
2773
* we've already got the extent locked, but we
2774
* need to split the state such that our end_bio
2775
* handler can clear the lock.
2777
set_extent_bit(tree, block_start,
2778
block_start + iosize - 1,
2779
EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
2780
ret = submit_extent_page(READ, tree, page,
2781
sector, iosize, page_offset, em->bdev,
2783
end_bio_extent_preparewrite, 0,
2788
block_start = block_start + iosize;
2790
set_extent_uptodate(tree, block_start, cur_end,
2792
unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2793
block_start = cur_end + 1;
2795
page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2796
free_extent_map(em);
2799
wait_extent_bit(tree, orig_block_start,
2800
block_end, EXTENT_LOCKED);
2802
check_page_uptodate(tree, page);
2804
/* FIXME, zero out newly allocated blocks on error */
2809
2687
* a helper for releasepage, this tests for areas of the page that
2810
2688
* are locked or under IO and drops the related state bits if it is safe
2811
2689
* to drop the page.
2891
2769
return try_release_extent_state(map, tree, page, mask);
2894
sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2895
get_extent_t *get_extent)
2897
struct inode *inode = mapping->host;
2898
struct extent_state *cached_state = NULL;
2899
u64 start = iblock << inode->i_blkbits;
2900
sector_t sector = 0;
2901
size_t blksize = (1 << inode->i_blkbits);
2902
struct extent_map *em;
2904
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2905
0, &cached_state, GFP_NOFS);
2906
em = get_extent(inode, NULL, 0, start, blksize, 0);
2907
unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
2908
start + blksize - 1, &cached_state, GFP_NOFS);
2909
if (!em || IS_ERR(em))
2912
if (em->block_start > EXTENT_MAP_LAST_BYTE)
2915
sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2917
free_extent_map(em);
2922
2773
* helper function for fiemap, which doesn't want to see any holes.
2923
2774
* This maps until we find something past 'last'