~ubuntu-branches/debian/jessie/btrfs-tools/jessie

« back to all changes in this revision

Viewing changes to chunk-recover.c

  • Committer: Package Import Robot
  • Author(s): Dimitri John Ledkov
  • Date: 2014-04-19 12:12:11 UTC
  • mfrom: (1.2.12) (6.1.37 sid)
  • Revision ID: package-import@ubuntu.com-20140419121211-mski0g757tsdv4x1
Tags: 3.14.1-1
* New upstream release.
* Switch to git-dpm.
* Rebase and cleanup patches.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
1
/*
2
 
 * Copyright (C) 2013 Fujitsu.  All rights reserved.
 
2
 * Copyright (C) 2013 FUJITSU LIMITED.  All rights reserved.
3
3
 *
4
4
 * This program is free software; you can redistribute it and/or
5
5
 * modify it under the terms of the GNU General Public
26
26
#include <fcntl.h>
27
27
#include <unistd.h>
28
28
#include <uuid/uuid.h>
 
29
#include <pthread.h>
29
30
 
30
31
#include "kerncompat.h"
31
32
#include "list.h"
41
42
#include "btrfsck.h"
42
43
#include "commands.h"
43
44
 
44
 
#define BTRFS_STRIPE_LEN                        (64 * 1024)
45
45
#define BTRFS_NUM_MIRRORS                       2
46
46
 
47
47
struct recover_control {
64
64
        struct list_head good_chunks;
65
65
        struct list_head bad_chunks;
66
66
        struct list_head unrepaired_chunks;
 
67
        pthread_mutex_t rc_lock;
67
68
};
68
69
 
69
70
struct extent_record {
75
76
        int nmirrors;
76
77
};
77
78
 
 
79
struct device_scan {
 
80
        struct recover_control *rc;
 
81
        struct btrfs_device *dev;
 
82
        int fd;
 
83
};
 
84
 
78
85
static struct extent_record *btrfs_new_extent_record(struct extent_buffer *eb)
79
86
{
80
87
        struct extent_record *rec;
202
209
 
203
210
        rc->verbose = verbose;
204
211
        rc->yes = yes;
 
212
        pthread_mutex_init(&rc->rc_lock, NULL);
205
213
}
206
214
 
207
215
static void free_recover_control(struct recover_control *rc)
210
218
        free_chunk_cache_tree(&rc->chunk);
211
219
        free_device_extent_tree(&rc->devext);
212
220
        free_extent_record_tree(&rc->eb_cache);
 
221
        pthread_mutex_destroy(&rc->rc_lock);
213
222
}
214
223
 
215
224
static int process_block_group_item(struct block_group_tree *bg_cache,
694
703
                btrfs_item_key_to_cpu(leaf, &key, i);
695
704
                switch (key.type) {
696
705
                case BTRFS_BLOCK_GROUP_ITEM_KEY:
 
706
                        pthread_mutex_lock(&rc->rc_lock);
697
707
                        ret = process_block_group_item(&rc->bg, leaf, &key, i);
 
708
                        pthread_mutex_unlock(&rc->rc_lock);
698
709
                        break;
699
710
                case BTRFS_CHUNK_ITEM_KEY:
 
711
                        pthread_mutex_lock(&rc->rc_lock);
700
712
                        ret = process_chunk_item(&rc->chunk, leaf, &key, i);
 
713
                        pthread_mutex_unlock(&rc->rc_lock);
701
714
                        break;
702
715
                case BTRFS_DEV_EXTENT_KEY:
 
716
                        pthread_mutex_lock(&rc->rc_lock);
703
717
                        ret = process_device_extent_item(&rc->devext, leaf,
704
718
                                                         &key, i);
 
719
                        pthread_mutex_unlock(&rc->rc_lock);
705
720
                        break;
706
721
                }
707
722
                if (ret)
721
736
        return 0;
722
737
}
723
738
 
724
 
static int scan_one_device(struct recover_control *rc, int fd,
725
 
                           struct btrfs_device *device)
 
739
static int scan_one_device(void *dev_scan_struct)
726
740
{
727
741
        struct extent_buffer *buf;
728
742
        u64 bytenr;
729
743
        int ret = 0;
 
744
        struct device_scan *dev_scan = (struct device_scan *)dev_scan_struct;
 
745
        struct recover_control *rc = dev_scan->rc;
 
746
        struct btrfs_device *device = dev_scan->dev;
 
747
        int fd = dev_scan->fd;
 
748
 
 
749
        ret = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
 
750
        if (ret)
 
751
                return 1;
730
752
 
731
753
        buf = malloc(sizeof(*buf) + rc->leafsize);
732
754
        if (!buf)
754
776
                        continue;
755
777
                }
756
778
 
 
779
                pthread_mutex_lock(&rc->rc_lock);
757
780
                ret = process_extent_buffer(&rc->eb_cache, buf, device, bytenr);
 
781
                pthread_mutex_unlock(&rc->rc_lock);
758
782
                if (ret)
759
783
                        goto out;
760
784
 
784
808
                bytenr += rc->leafsize;
785
809
        }
786
810
out:
 
811
        close(fd);
787
812
        free(buf);
788
813
        return ret;
789
814
}
793
818
        int ret = 0;
794
819
        int fd;
795
820
        struct btrfs_device *dev;
 
821
        struct device_scan *dev_scans;
 
822
        pthread_t *t_scans;
 
823
        int *t_rets;
 
824
        int devnr = 0;
 
825
        int devidx = 0;
 
826
        int cancel_from = 0;
 
827
        int cancel_to = 0;
 
828
        int i;
 
829
 
 
830
        list_for_each_entry(dev, &rc->fs_devices->devices, dev_list)
 
831
                devnr++;
 
832
        dev_scans = (struct device_scan *)malloc(sizeof(struct device_scan)
 
833
                                                 * devnr);
 
834
        if (!dev_scans)
 
835
                return -ENOMEM;
 
836
        t_scans = (pthread_t *)malloc(sizeof(pthread_t) * devnr);
 
837
        if (!t_scans)
 
838
                return -ENOMEM;
 
839
        t_rets = (int *)malloc(sizeof(int) * devnr);
 
840
        if (!t_rets)
 
841
                return -ENOMEM;
796
842
 
797
843
        list_for_each_entry(dev, &rc->fs_devices->devices, dev_list) {
798
844
                fd = open(dev->name, O_RDONLY);
801
847
                                dev->name);
802
848
                        return -1;
803
849
                }
804
 
                ret = scan_one_device(rc, fd, dev);
805
 
                close(fd);
806
 
                if (ret)
807
 
                        return ret;
808
 
        }
809
 
        return ret;
 
850
                dev_scans[devidx].rc = rc;
 
851
                dev_scans[devidx].dev = dev;
 
852
                dev_scans[devidx].fd = fd;
 
853
                ret = pthread_create(&t_scans[devidx], NULL,
 
854
                                     (void *)scan_one_device,
 
855
                                     (void *)&dev_scans[devidx]);
 
856
                if (ret) {
 
857
                        cancel_from = 0;
 
858
                        cancel_to = devidx - 1;
 
859
                        goto out;
 
860
                }
 
861
                devidx++;
 
862
        }
 
863
 
 
864
        i = 0;
 
865
        while (i < devidx) {
 
866
                ret = pthread_join(t_scans[i], (void **)&t_rets[i]);
 
867
                if (ret || t_rets[i]) {
 
868
                        ret = 1;
 
869
                        cancel_from = i + 1;
 
870
                        cancel_to = devnr - 1;
 
871
                        break;
 
872
                }
 
873
                i++;
 
874
        }
 
875
out:
 
876
        while (cancel_from <= cancel_to) {
 
877
                pthread_cancel(t_scans[cancel_from]);
 
878
                cancel_from++;
 
879
        }
 
880
        free(dev_scans);
 
881
        free(t_scans);
 
882
        free(t_rets);
 
883
        return !!ret;
810
884
}
811
885
 
812
886
static int build_device_map_by_chunk_record(struct btrfs_root *root,
1034
1108
        disk_key.type = BTRFS_DEV_ITEM_KEY;
1035
1109
        disk_key.offset = min_devid;
1036
1110
 
1037
 
        cow = btrfs_alloc_free_block(trans, root, root->sectorsize,
 
1111
        cow = btrfs_alloc_free_block(trans, root, root->nodesize,
1038
1112
                                     BTRFS_CHUNK_TREE_OBJECTID,
1039
1113
                                     &disk_key, 0, 0, 0);
1040
1114
        btrfs_set_header_bytenr(cow, cow->start);
1047
1121
                        btrfs_header_fsid(), BTRFS_FSID_SIZE);
1048
1122
 
1049
1123
        write_extent_buffer(cow, root->fs_info->chunk_tree_uuid,
1050
 
                        (unsigned long)btrfs_header_chunk_tree_uuid(cow),
 
1124
                        btrfs_header_chunk_tree_uuid(cow),
1051
1125
                        BTRFS_UUID_SIZE);
1052
1126
 
1053
1127
        root->node = cow;
1197
1271
                fprintf(stderr, "Failed to allocate memory for fs_info\n");
1198
1272
                return ERR_PTR(-ENOMEM);
1199
1273
        }
 
1274
        fs_info->is_chunk_recover = 1;
1200
1275
 
1201
1276
        fs_info->fs_devices = rc->fs_devices;
1202
1277
        ret = btrfs_open_devices(fs_info->fs_devices, O_RDWR);
1235
1310
 
1236
1311
        eb = fs_info->tree_root->node;
1237
1312
        read_extent_buffer(eb, fs_info->chunk_tree_uuid,
1238
 
                           (unsigned long)btrfs_header_chunk_tree_uuid(eb),
 
1313
                           btrfs_header_chunk_tree_uuid(eb),
1239
1314
                           BTRFS_UUID_SIZE);
1240
1315
 
1241
1316
        return fs_info->fs_root;
1565
1640
        return ret;
1566
1641
}
1567
1642
 
 
1643
static int next_csum(struct btrfs_root *root,
 
1644
                     struct extent_buffer **leaf,
 
1645
                     struct btrfs_path *path,
 
1646
                     int *slot,
 
1647
                     u64 *csum_offset,
 
1648
                     u32 *tree_csum,
 
1649
                     u64 end,
 
1650
                     struct btrfs_key *key)
 
1651
{
 
1652
        int ret = 0;
 
1653
        struct btrfs_root *csum_root = root->fs_info->csum_root;
 
1654
        struct btrfs_csum_item *csum_item;
 
1655
        u32 blocksize = root->sectorsize;
 
1656
        u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
 
1657
        int csums_in_item = btrfs_item_size_nr(*leaf, *slot) / csum_size;
 
1658
 
 
1659
        if (*csum_offset >= csums_in_item) {
 
1660
                ++(*slot);
 
1661
                *csum_offset = 0;
 
1662
                if (*slot >= btrfs_header_nritems(*leaf)) {
 
1663
                        ret = btrfs_next_leaf(csum_root, path);
 
1664
                        if (ret < 0)
 
1665
                                return -1;
 
1666
                        else if (ret > 0)
 
1667
                                return 1;
 
1668
                        *leaf = path->nodes[0];
 
1669
                        *slot = path->slots[0];
 
1670
                }
 
1671
                btrfs_item_key_to_cpu(*leaf, key, *slot);
 
1672
        }
 
1673
 
 
1674
        if (key->offset + (*csum_offset) * blocksize >= end)
 
1675
                return 2;
 
1676
        csum_item = btrfs_item_ptr(*leaf, *slot, struct btrfs_csum_item);
 
1677
        csum_item = (struct btrfs_csum_item *)((unsigned char *)csum_item
 
1678
                                             + (*csum_offset) * csum_size);
 
1679
        read_extent_buffer(*leaf, tree_csum,
 
1680
                          (unsigned long)csum_item, csum_size);
 
1681
        return ret;
 
1682
}
 
1683
 
 
1684
static u64 calc_data_offset(struct btrfs_key *key,
 
1685
                            struct chunk_record *chunk,
 
1686
                            u64 dev_offset,
 
1687
                            u64 csum_offset,
 
1688
                            u32 blocksize)
 
1689
{
 
1690
        u64 data_offset;
 
1691
        int logical_stripe_nr;
 
1692
        int dev_stripe_nr;
 
1693
        int nr_data_stripes;
 
1694
 
 
1695
        data_offset = key->offset + csum_offset * blocksize - chunk->offset;
 
1696
        nr_data_stripes = chunk->num_stripes;
 
1697
 
 
1698
        if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID5)
 
1699
                nr_data_stripes -= 1;
 
1700
        else if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID6)
 
1701
                nr_data_stripes -= 2;
 
1702
 
 
1703
        logical_stripe_nr = data_offset / chunk->stripe_len;
 
1704
        dev_stripe_nr = logical_stripe_nr / nr_data_stripes;
 
1705
 
 
1706
        data_offset -= logical_stripe_nr * chunk->stripe_len;
 
1707
        data_offset += dev_stripe_nr * chunk->stripe_len;
 
1708
 
 
1709
        return dev_offset + data_offset;
 
1710
}
 
1711
 
 
1712
static int check_one_csum(int fd, u64 start, u32 len, u32 tree_csum)
 
1713
{
 
1714
        char *data;
 
1715
        int ret = 0;
 
1716
        u32 csum_result = ~(u32)0;
 
1717
 
 
1718
        data = malloc(len);
 
1719
        if (!data)
 
1720
                return -1;
 
1721
        ret = pread64(fd, data, len, start);
 
1722
        if (ret < 0 || ret != len) {
 
1723
                ret = -1;
 
1724
                goto out;
 
1725
        }
 
1726
        ret = 0;
 
1727
        csum_result = btrfs_csum_data(NULL, data, csum_result, len);
 
1728
        btrfs_csum_final(csum_result, (char *)&csum_result);
 
1729
        if (csum_result != tree_csum)
 
1730
                ret = 1;
 
1731
out:
 
1732
        free(data);
 
1733
        return ret;
 
1734
}
 
1735
 
 
1736
static u64 item_end_offset(struct btrfs_root *root, struct btrfs_key *key,
 
1737
                           struct extent_buffer *leaf, int slot) {
 
1738
        u32 blocksize = root->sectorsize;
 
1739
        u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
 
1740
 
 
1741
        u64 offset = btrfs_item_size_nr(leaf, slot);
 
1742
        offset /= csum_size;
 
1743
        offset *= blocksize;
 
1744
        offset += key->offset;
 
1745
 
 
1746
        return offset;
 
1747
}
 
1748
 
 
1749
static int insert_stripe(struct list_head *devexts,
 
1750
                         struct recover_control *rc,
 
1751
                         struct chunk_record *chunk,
 
1752
                         int index) {
 
1753
        struct device_extent_record *devext;
 
1754
        struct btrfs_device *dev;
 
1755
 
 
1756
        devext = list_entry(devexts->next, struct device_extent_record,
 
1757
                            chunk_list);
 
1758
        dev = btrfs_find_device_by_devid(rc->fs_devices, devext->objectid,
 
1759
                                        0);
 
1760
        if (!dev)
 
1761
                return 1;
 
1762
        BUG_ON(btrfs_find_device_by_devid(rc->fs_devices, devext->objectid,
 
1763
                                        1));
 
1764
 
 
1765
        chunk->stripes[index].devid = devext->objectid;
 
1766
        chunk->stripes[index].offset = devext->offset;
 
1767
        memcpy(chunk->stripes[index].dev_uuid, dev->uuid, BTRFS_UUID_SIZE);
 
1768
 
 
1769
        list_move(&devext->chunk_list, &chunk->dextents);
 
1770
 
 
1771
        return 0;
 
1772
}
 
1773
 
 
1774
#define EQUAL_STRIPE (1 << 0)
 
1775
 
 
1776
static int rebuild_raid_data_chunk_stripes(struct recover_control *rc,
 
1777
                                           struct btrfs_root *root,
 
1778
                                           struct chunk_record *chunk,
 
1779
                                           u8 *flags)
 
1780
{
 
1781
        int i;
 
1782
        int ret = 0;
 
1783
        int slot;
 
1784
        struct btrfs_path path;
 
1785
        struct btrfs_key prev_key;
 
1786
        struct btrfs_key key;
 
1787
        struct btrfs_root *csum_root;
 
1788
        struct extent_buffer *leaf;
 
1789
        struct device_extent_record *devext;
 
1790
        struct device_extent_record *next;
 
1791
        struct btrfs_device *dev;
 
1792
        u64 start = chunk->offset;
 
1793
        u64 end = start + chunk->stripe_len;
 
1794
        u64 chunk_end = chunk->offset + chunk->length;
 
1795
        u64 csum_offset = 0;
 
1796
        u64 data_offset;
 
1797
        u32 blocksize = root->sectorsize;
 
1798
        u32 tree_csum;
 
1799
        int index = 0;
 
1800
        int num_unordered = 0;
 
1801
        LIST_HEAD(unordered);
 
1802
        LIST_HEAD(candidates);
 
1803
 
 
1804
        csum_root = root->fs_info->csum_root;
 
1805
        btrfs_init_path(&path);
 
1806
        list_splice_init(&chunk->dextents, &candidates);
 
1807
again:
 
1808
        if (list_is_last(candidates.next, &candidates))
 
1809
                goto out;
 
1810
 
 
1811
        key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
 
1812
        key.type = BTRFS_EXTENT_CSUM_KEY;
 
1813
        key.offset = start;
 
1814
 
 
1815
        ret = btrfs_search_slot(NULL, csum_root, &key, &path, 0, 0);
 
1816
        if (ret < 0) {
 
1817
                fprintf(stderr, "Search csum failed(%d)\n", ret);
 
1818
                goto fail_out;
 
1819
        }
 
1820
        leaf = path.nodes[0];
 
1821
        slot = path.slots[0];
 
1822
        if (ret > 0) {
 
1823
                if (slot >= btrfs_header_nritems(leaf)) {
 
1824
                        ret = btrfs_next_leaf(csum_root, &path);
 
1825
                        if (ret < 0) {
 
1826
                                fprintf(stderr,
 
1827
                                        "Walk tree failed(%d)\n", ret);
 
1828
                                goto fail_out;
 
1829
                        } else if (ret > 0) {
 
1830
                                slot = btrfs_header_nritems(leaf) - 1;
 
1831
                                btrfs_item_key_to_cpu(leaf, &key, slot);
 
1832
                                if (item_end_offset(root, &key, leaf, slot)
 
1833
                                                                > start) {
 
1834
                                        csum_offset = start - key.offset;
 
1835
                                        csum_offset /= blocksize;
 
1836
                                        goto next_csum;
 
1837
                                }
 
1838
                                goto next_stripe;
 
1839
                        }
 
1840
                        leaf = path.nodes[0];
 
1841
                        slot = path.slots[0];
 
1842
                }
 
1843
                btrfs_item_key_to_cpu(leaf, &key, slot);
 
1844
                ret = btrfs_previous_item(csum_root, &path, 0,
 
1845
                                          BTRFS_EXTENT_CSUM_KEY);
 
1846
                if (ret < 0)
 
1847
                        goto fail_out;
 
1848
                else if (ret > 0) {
 
1849
                        if (key.offset >= end)
 
1850
                                goto next_stripe;
 
1851
                        else
 
1852
                                goto next_csum;
 
1853
                }
 
1854
                leaf = path.nodes[0];
 
1855
                slot = path.slots[0];
 
1856
 
 
1857
                btrfs_item_key_to_cpu(leaf, &prev_key, slot);
 
1858
                if (item_end_offset(root, &prev_key, leaf, slot) > start) {
 
1859
                        csum_offset = start - prev_key.offset;
 
1860
                        csum_offset /= blocksize;
 
1861
                        btrfs_item_key_to_cpu(leaf, &key, slot);
 
1862
                } else {
 
1863
                        if (key.offset >= end)
 
1864
                                goto next_stripe;
 
1865
                }
 
1866
 
 
1867
                if (key.offset + csum_offset * blocksize > chunk_end)
 
1868
                        goto out;
 
1869
        }
 
1870
next_csum:
 
1871
        ret = next_csum(root, &leaf, &path, &slot, &csum_offset, &tree_csum,
 
1872
                        end, &key);
 
1873
        if (ret < 0) {
 
1874
                fprintf(stderr, "Fetch csum failed\n");
 
1875
                goto fail_out;
 
1876
        } else if (ret == 1) {
 
1877
                list_for_each_entry(devext, &unordered, chunk_list)
 
1878
                        num_unordered++;
 
1879
                if (!(*flags & EQUAL_STRIPE))
 
1880
                        *flags |= EQUAL_STRIPE;
 
1881
                goto out;
 
1882
        } else if (ret == 2)
 
1883
                goto next_stripe;
 
1884
 
 
1885
        list_for_each_entry_safe(devext, next, &candidates, chunk_list) {
 
1886
                data_offset = calc_data_offset(&key, chunk, devext->offset,
 
1887
                                               csum_offset, blocksize);
 
1888
                dev = btrfs_find_device_by_devid(rc->fs_devices,
 
1889
                                                 devext->objectid, 0);
 
1890
                if (!dev) {
 
1891
                        ret = 1;
 
1892
                        goto fail_out;
 
1893
                }
 
1894
                BUG_ON(btrfs_find_device_by_devid(rc->fs_devices,
 
1895
                                                  devext->objectid, 1));
 
1896
 
 
1897
                ret = check_one_csum(dev->fd, data_offset, blocksize,
 
1898
                                     tree_csum);
 
1899
                if (ret < 0)
 
1900
                        goto fail_out;
 
1901
                else if (ret > 0)
 
1902
                        list_move(&devext->chunk_list, &unordered);
 
1903
        }
 
1904
 
 
1905
        if (list_empty(&candidates)) {
 
1906
                list_for_each_entry(devext, &unordered, chunk_list)
 
1907
                        num_unordered++;
 
1908
                if (chunk->type_flags & BTRFS_BLOCK_GROUP_RAID6
 
1909
                                        && num_unordered == 2) {
 
1910
                        list_splice_init(&unordered, &chunk->dextents);
 
1911
                        btrfs_release_path(&path);
 
1912
                        return 0;
 
1913
                } else
 
1914
                        ret = 1;
 
1915
 
 
1916
                goto fail_out;
 
1917
        }
 
1918
 
 
1919
        if (list_is_last(candidates.next, &candidates)) {
 
1920
                index = btrfs_calc_stripe_index(chunk,
 
1921
                        key.offset + csum_offset * blocksize);
 
1922
                if (chunk->stripes[index].devid)
 
1923
                        goto next_stripe;
 
1924
                ret = insert_stripe(&candidates, rc, chunk, index);
 
1925
                if (ret)
 
1926
                        goto fail_out;
 
1927
        } else {
 
1928
                csum_offset++;
 
1929
                goto next_csum;
 
1930
        }
 
1931
next_stripe:
 
1932
        start = btrfs_next_stripe_logical_offset(chunk, start);
 
1933
        end = min(start + chunk->stripe_len, chunk_end);
 
1934
        list_splice_init(&unordered, &candidates);
 
1935
        btrfs_release_path(&path);
 
1936
        csum_offset = 0;
 
1937
        if (end < chunk_end)
 
1938
                goto again;
 
1939
out:
 
1940
        ret = 0;
 
1941
        list_splice_init(&candidates, &unordered);
 
1942
        list_for_each_entry(devext, &unordered, chunk_list)
 
1943
                num_unordered++;
 
1944
        if (num_unordered == 1) {
 
1945
                for (i = 0; i < chunk->num_stripes; i++) {
 
1946
                        if (!chunk->stripes[i].devid) {
 
1947
                                index = i;
 
1948
                                break;
 
1949
                        }
 
1950
                }
 
1951
                ret = insert_stripe(&unordered, rc, chunk, index);
 
1952
                if (ret)
 
1953
                        goto fail_out;
 
1954
        } else {
 
1955
                if ((num_unordered == 2 && chunk->type_flags
 
1956
                        & BTRFS_BLOCK_GROUP_RAID5)
 
1957
                 || (num_unordered == 3 && chunk->type_flags
 
1958
                        & BTRFS_BLOCK_GROUP_RAID6)) {
 
1959
                        for (i = 0; i < chunk->num_stripes; i++) {
 
1960
                                if (!chunk->stripes[i].devid) {
 
1961
                                        ret = insert_stripe(&unordered, rc,
 
1962
                                                        chunk, i);
 
1963
                                        if (ret)
 
1964
                                                break;
 
1965
                                }
 
1966
                        }
 
1967
                }
 
1968
        }
 
1969
fail_out:
 
1970
        ret = !!ret || (list_empty(&unordered) ? 0 : 1);
 
1971
        list_splice_init(&candidates, &chunk->dextents);
 
1972
        list_splice_init(&unordered, &chunk->dextents);
 
1973
        btrfs_release_path(&path);
 
1974
 
 
1975
        return ret;
 
1976
}
 
1977
 
 
1978
static int btrfs_rebuild_ordered_data_chunk_stripes(struct recover_control *rc,
 
1979
                                           struct btrfs_root *root)
 
1980
{
 
1981
        struct chunk_record *chunk;
 
1982
        struct chunk_record *next;
 
1983
        int ret = 0;
 
1984
        int err;
 
1985
        u8 flags;
 
1986
 
 
1987
        list_for_each_entry_safe(chunk, next, &rc->unrepaired_chunks, list) {
 
1988
                if ((chunk->type_flags & BTRFS_BLOCK_GROUP_DATA)
 
1989
                 && (chunk->type_flags & BTRFS_ORDERED_RAID)) {
 
1990
                        flags = 0;
 
1991
                        err = rebuild_raid_data_chunk_stripes(rc, root, chunk,
 
1992
                                                              &flags);
 
1993
                        if (err) {
 
1994
                                list_move(&chunk->list, &rc->bad_chunks);
 
1995
                                if (flags & EQUAL_STRIPE)
 
1996
                                        fprintf(stderr,
 
1997
                        "Failure: too many equal stripes in chunk[%llu %llu]\n",
 
1998
                                                chunk->offset, chunk->length);
 
1999
                                if (!ret)
 
2000
                                        ret = err;
 
2001
                        } else
 
2002
                                list_move(&chunk->list, &rc->good_chunks);
 
2003
                }
 
2004
        }
 
2005
        return ret;
 
2006
}
 
2007
 
1568
2008
static int btrfs_recover_chunks(struct recover_control *rc)
1569
2009
{
1570
2010
        struct chunk_record *chunk;
1684
2124
                 * droppped from the fs. Don't deal with them now, we will
1685
2125
                 * check it after the fs is opened.
1686
2126
                 */
 
2127
        } else {
 
2128
                fprintf(stderr, "Check chunks successfully with no orphans\n");
 
2129
                goto fail_rc;
1687
2130
        }
1688
2131
 
1689
2132
        root = open_ctree_with_broken_chunk(&rc);
1699
2142
                goto fail_close_ctree;
1700
2143
        }
1701
2144
 
 
2145
        ret = btrfs_rebuild_ordered_data_chunk_stripes(&rc, root);
 
2146
        if (ret) {
 
2147
                fprintf(stderr, "Failed to rebuild ordered chunk stripes.\n");
 
2148
                goto fail_close_ctree;
 
2149
        }
 
2150
 
1702
2151
        if (!rc.yes) {
1703
2152
                ret = ask_user("We are going to rebuild the chunk tree on disk, it might destroy the old metadata on the disk, Are you sure?");
1704
2153
                if (!ret) {