78
82
struct physical_volume *pv;
90
LV_TYPE_EXTERNAL_ORIGIN,
93
LV_TYPE_THIN_SNAPSHOT,
95
LV_TYPE_THIN_POOL_DATA,
96
LV_TYPE_THIN_POOL_METADATA,
99
LV_TYPE_CACHE_POOL_DATA,
100
LV_TYPE_CACHE_POOL_METADATA,
101
LV_TYPE_POOL_METADATA_SPARE,
105
LV_TYPE_RAID_METADATA,
107
LV_TYPE_MIRROR_IMAGE,
113
static const char *_lv_type_names[] = {
114
[LV_TYPE_UNKNOWN] = "unknown",
115
[LV_TYPE_PVMOVE] = "pvmove",
116
[LV_TYPE_ORIGIN] = "origin",
117
[LV_TYPE_EXTERNAL_ORIGIN] = "external-origin",
118
[LV_TYPE_SNAPSHOT] = "snapshot",
119
[LV_TYPE_THIN] = "thin",
120
[LV_TYPE_THIN_SNAPSHOT] = "thin-snapshot",
121
[LV_TYPE_THIN_POOL] = "thin-pool",
122
[LV_TYPE_THIN_POOL_DATA] = "thin-pool-data",
123
[LV_TYPE_THIN_POOL_METADATA] = "thin-pool-metadata",
124
[LV_TYPE_CACHE] = "cache",
125
[LV_TYPE_CACHE_POOL] = "cache-pool",
126
[LV_TYPE_CACHE_POOL_DATA] = "cache-pool-data",
127
[LV_TYPE_CACHE_POOL_METADATA] = "cache-pool-metadata",
128
[LV_TYPE_POOL_METADATA_SPARE] = "pool-metadata-spare",
129
[LV_TYPE_VIRTUAL] = "virtual",
130
[LV_TYPE_RAID] = "raid",
131
[LV_TYPE_RAID_IMAGE] = "raid-image",
132
[LV_TYPE_RAID_METADATA] = "raid-metadata",
133
[LV_TYPE_MIRROR] = "mirror",
134
[LV_TYPE_MIRROR_IMAGE] = "mirror-image",
135
[LV_TYPE_MIRROR_LOG] = "mirror-log",
136
[LV_TYPE_LINEAR] = "linear",
137
[LV_TYPE_STRIPED] = "striped"
140
static lv_type_t _get_lv_type(const struct logical_volume *lv)
142
lv_type_t type = LV_TYPE_UNKNOWN;
143
struct lv_segment *seg;
145
if (lv->status & PVMOVE)
146
type = LV_TYPE_PVMOVE;
147
else if (lv_is_origin(lv))
148
type = LV_TYPE_ORIGIN;
149
else if (lv_is_external_origin(lv))
150
type = LV_TYPE_EXTERNAL_ORIGIN;
151
else if (lv_is_cow(lv))
152
type = LV_TYPE_SNAPSHOT;
153
else if (lv_is_thin_volume(lv))
154
type = first_seg(lv)->origin ? LV_TYPE_THIN_SNAPSHOT : LV_TYPE_THIN;
155
else if (lv_is_thin_pool(lv))
156
type = LV_TYPE_THIN_POOL;
157
else if (lv_is_thin_pool_data(lv))
158
type = LV_TYPE_THIN_POOL_DATA;
159
else if (lv_is_thin_pool_metadata(lv))
160
type = LV_TYPE_THIN_POOL_METADATA;
161
else if (lv_is_pool_metadata_spare(lv))
162
type = LV_TYPE_POOL_METADATA_SPARE;
163
else if (lv_is_cache(lv))
164
type = LV_TYPE_CACHE;
165
else if (lv_is_cache_pool(lv))
166
type = LV_TYPE_CACHE_POOL;
167
else if (lv_is_cache_pool_data(lv))
168
type = LV_TYPE_CACHE_POOL_DATA;
169
else if (lv_is_cache_pool_metadata(lv))
170
type = LV_TYPE_CACHE_POOL_METADATA;
171
else if (lv_is_virtual(lv))
172
type = LV_TYPE_VIRTUAL;
173
else if (lv_is_raid(lv))
175
else if (lv_is_raid_image(lv))
176
type = LV_TYPE_RAID_IMAGE;
177
else if (lv_is_raid_metadata(lv))
178
type = LV_TYPE_RAID_METADATA;
179
else if (lv_is_mirrored(lv))
180
type = LV_TYPE_MIRROR;
181
else if (lv_is_mirror_image(lv))
182
type = LV_TYPE_MIRROR_IMAGE;
183
else if (lv_is_mirror_log(lv))
184
type = LV_TYPE_MIRROR_LOG;
186
/* none of the above, check linear... */
187
if (type == LV_TYPE_UNKNOWN) {
188
type = LV_TYPE_LINEAR;
189
dm_list_iterate_items(seg, &lv->segments) {
190
if (!seg_is_linear(seg)) {
191
type = LV_TYPE_UNKNOWN;
197
/* ...if not even linear, check striped... */
198
if (type == LV_TYPE_UNKNOWN) {
199
type = LV_TYPE_STRIPED;
200
dm_list_iterate_items(seg, &lv->segments) {
201
if (!seg_is_striped(seg)) {
202
type = LV_TYPE_UNKNOWN;
211
const char *lv_type_name(const struct logical_volume *lv) {
212
lv_type_t type = _get_lv_type(lv);
213
return _lv_type_names[type];
216
int lv_is_linear(const struct logical_volume *lv)
218
lv_type_t type = _get_lv_type(lv);
219
return type == LV_TYPE_LINEAR;
222
int lv_is_striped(const struct logical_volume *lv)
224
lv_type_t type = _get_lv_type(lv);
225
return type == LV_TYPE_STRIPED;
81
228
static int _lv_is_on_pv(struct logical_volume *lv, void *data)
83
230
int *is_on_pv = ((struct pv_and_int *)data)->i;
1273
1420
* areas if the number of areas matches.
1275
1422
if (alloc_parms->prev_lvseg &&
1276
((ah->area_count + ah->parity_count) == prev_lvseg->area_count))
1423
((ah->area_count + ah->parity_count) == prev_lvseg->area_count)) {
1277
1424
alloc_parms->flags |= A_AREA_COUNT_MATCHES;
1279
/* Are there any preceding segments we must follow on from? */
1280
if (alloc_parms->prev_lvseg &&
1281
(alloc_parms->flags & A_AREA_COUNT_MATCHES)) {
1282
if (alloc_parms->alloc == ALLOC_CONTIGUOUS)
1426
/* Are there any preceding segments we must follow on from? */
1427
if (alloc_parms->alloc == ALLOC_CONTIGUOUS) {
1283
1428
alloc_parms->flags |= A_CONTIGUOUS_TO_LVSEG;
1284
else if ((alloc_parms->alloc == ALLOC_CLING) ||
1285
(alloc_parms->alloc == ALLOC_CLING_BY_TAGS))
1429
alloc_parms->flags |= A_POSITIONAL_FILL;
1430
} else if ((alloc_parms->alloc == ALLOC_CLING) ||
1431
(alloc_parms->alloc == ALLOC_CLING_BY_TAGS)) {
1286
1432
alloc_parms->flags |= A_CLING_TO_LVSEG;
1433
alloc_parms->flags |= A_POSITIONAL_FILL;
1289
1437
* A cling allocation that follows a successful contiguous
1290
1438
* allocation must use the same PVs (or else fail).
1292
1440
if ((alloc_parms->alloc == ALLOC_CLING) ||
1293
(alloc_parms->alloc == ALLOC_CLING_BY_TAGS))
1441
(alloc_parms->alloc == ALLOC_CLING_BY_TAGS)) {
1294
1442
alloc_parms->flags |= A_CLING_TO_ALLOCED;
1443
alloc_parms->flags |= A_POSITIONAL_FILL;
1296
1446
if (alloc_parms->alloc == ALLOC_CLING_BY_TAGS)
1297
1447
alloc_parms->flags |= A_CLING_BY_TAGS;
1469
1618
* allocation, we store the images at the beginning
1470
1619
* of the areas array and the metadata at the end.
1472
s += ah->area_count + ah->parity_count;
1473
aa[s].pv = pva->map->pv;
1474
aa[s].pe = pva->start;
1475
aa[s].len = ah->log_len;
1621
smeta = s + ah->area_count + ah->parity_count;
1622
aa[smeta].pv = pva->map->pv;
1623
aa[smeta].pe = pva->start;
1624
aa[smeta].len = ah->log_len;
1477
1626
log_debug_alloc("Allocating parallel metadata area %" PRIu32
1478
1627
" on %s start PE %" PRIu32
1479
1628
" length %" PRIu32 ".",
1480
(s - (ah->area_count + ah->parity_count)),
1481
pv_dev_name(aa[s].pv), aa[s].pe,
1629
(smeta - (ah->area_count + ah->parity_count)),
1630
pv_dev_name(aa[smeta].pv), aa[smeta].pe,
1484
1633
consume_pv_area(pva, ah->log_len);
1485
dm_list_add(&ah->alloced_areas[s], &aa[s].list);
1486
s -= ah->area_count + ah->parity_count;
1634
dm_list_add(&ah->alloced_areas[smeta], &aa[smeta].list);
1488
1636
aa[s].len = (ah->alloc_and_split_meta && !ah->split_metadata_is_allocated) ? len - ah->log_len : len;
1489
1637
/* Skip empty allocations */
1714
static void _reserve_area(struct pv_area_used *area_used, struct pv_area *pva, uint32_t required,
1861
static void _reserve_area(struct alloc_state *alloc_state, struct pv_area *pva, uint32_t required,
1715
1862
uint32_t ix_pva, uint32_t unreserved)
1864
struct pv_area_used *area_used = &alloc_state->areas[ix_pva];
1717
1866
log_debug_alloc("%s allocation area %" PRIu32 " %s %s start PE %" PRIu32
1718
1867
" length %" PRIu32 " leaving %" PRIu32 ".",
1719
1868
area_used->pva ? "Changing " : "Considering",
1720
ix_pva - 1, area_used->pva ? "to" : "as",
1869
ix_pva, area_used->pva ? "to" : "as",
1721
1870
dev_name(pva->map->pv->dev), pva->start, required, unreserved);
1723
1872
area_used->pva = pva;
1724
1873
area_used->used = required;
1876
static int _reserve_required_area(struct alloc_state *alloc_state, struct pv_area *pva, uint32_t required,
1877
uint32_t ix_pva, uint32_t unreserved)
1881
/* Expand areas array if needed after an area was split. */
1882
if (ix_pva >= alloc_state->areas_size) {
1883
alloc_state->areas_size *= 2;
1884
if (!(alloc_state->areas = dm_realloc(alloc_state->areas, sizeof(*alloc_state->areas) * (alloc_state->areas_size)))) {
1885
log_error("Memory reallocation for parallel areas failed.");
1888
for (s = alloc_state->areas_size / 2; s < alloc_state->areas_size; s++)
1889
alloc_state->areas[s].pva = NULL;
1892
_reserve_area(alloc_state, pva, required, ix_pva, unreserved);
1727
1897
static int _is_condition(struct cmd_context *cmd __attribute__((unused)),
1728
1898
struct pv_segment *pvseg, uint32_t s,
1731
1901
struct pv_match *pvmatch = data;
1902
int positional = pvmatch->alloc_state->alloc_parms->flags & A_POSITIONAL_FILL;
1733
if (pvmatch->areas[s].pva)
1904
if (positional && pvmatch->alloc_state->areas[s].pva)
1734
1905
return 1; /* Area already assigned */
1736
1907
if (!pvmatch->condition(pvmatch, pvseg, pvmatch->pva))
1737
1908
return 1; /* Continue */
1739
if (s >= pvmatch->areas_size)
1910
if (positional && (s >= pvmatch->alloc_state->areas_size))
1743
1914
* Only used for cling and contiguous policies (which only make one allocation per PV)
1744
1915
* so it's safe to say all the available space is used.
1746
_reserve_area(&pvmatch->areas[s], pvmatch->pva, pvmatch->pva->count, s + 1, 0);
1918
_reserve_required_area(pvmatch->alloc_state, pvmatch->pva, pvmatch->pva->count, s, 0);
1748
1920
return 2; /* Finished */
1897
2070
/* Cling to prev_lvseg? */
1898
2071
if (((alloc_parms->flags & A_CLING_TO_LVSEG) ||
1899
(ah->maximise_cling && alloc_parms->prev_lvseg && (alloc_parms->flags & A_AREA_COUNT_MATCHES))) &&
2072
(ah->maximise_cling && (alloc_parms->flags & A_AREA_COUNT_MATCHES))) &&
1900
2073
_check_cling(ah, NULL, alloc_parms->prev_lvseg, pva, alloc_state))
1901
2074
/* If this PV is suitable, use this first area */
1904
2077
/* Cling_to_alloced? */
1905
2078
if ((alloc_parms->flags & A_CLING_TO_ALLOCED) &&
1906
2079
_check_cling_to_alloced(ah, NULL, pva, alloc_state))
1909
2082
/* Cling_by_tags? */
1910
2083
if (!(alloc_parms->flags & A_CLING_BY_TAGS) || !ah->cling_tag_list_cn)
1911
2084
return NEXT_PV;
1913
if (alloc_parms->prev_lvseg && (alloc_parms->flags & A_AREA_COUNT_MATCHES)) {
2086
if ((alloc_parms->flags & A_AREA_COUNT_MATCHES)) {
1914
2087
if (_check_cling(ah, ah->cling_tag_list_cn, alloc_parms->prev_lvseg, pva, alloc_state))
1916
2089
} else if (_check_cling_to_alloced(ah, ah->cling_tag_list_cn, pva, alloc_state))
1919
2092
/* All areas on this PV give same result so pointless checking more */
1920
2093
return NEXT_PV;
1961
2138
return required;
1964
static int _reserve_required_area(struct alloc_handle *ah, uint32_t max_to_allocate,
1965
unsigned ix_pva, struct pv_area *pva,
1966
struct alloc_state *alloc_state, alloc_policy_t alloc)
1968
uint32_t required = _calc_required_extents(ah, pva, ix_pva, max_to_allocate, alloc);
1971
/* Expand areas array if needed after an area was split. */
1972
if (ix_pva > alloc_state->areas_size) {
1973
alloc_state->areas_size *= 2;
1974
if (!(alloc_state->areas = dm_realloc(alloc_state->areas, sizeof(*alloc_state->areas) * (alloc_state->areas_size)))) {
1975
log_error("Memory reallocation for parallel areas failed.");
1978
for (s = alloc_state->areas_size / 2; s < alloc_state->areas_size; s++)
1979
alloc_state->areas[s].pva = NULL;
1982
_reserve_area(&alloc_state->areas[ix_pva - 1], pva, required, ix_pva, pva->unreserved);
1987
2141
static void _clear_areas(struct alloc_state *alloc_state)
2062
2217
struct alloced_area *aa;
2064
2219
uint32_t devices_needed = ah->area_count + ah->parity_count;
2066
2222
/* ix_offset holds the number of parallel allocations that must be contiguous/cling */
2067
2223
/* At most one of A_CONTIGUOUS_TO_LVSEG, A_CLING_TO_LVSEG or A_CLING_TO_ALLOCED may be set */
2068
if (alloc_parms->flags & (A_CONTIGUOUS_TO_LVSEG | A_CLING_TO_LVSEG))
2224
if (!(alloc_parms->flags & A_POSITIONAL_FILL))
2226
else if (alloc_parms->flags & (A_CONTIGUOUS_TO_LVSEG | A_CLING_TO_LVSEG))
2069
2227
ix_offset = _stripes_per_mimage(alloc_parms->prev_lvseg) * alloc_parms->prev_lvseg->area_count;
2071
if (alloc_parms->flags & A_CLING_TO_ALLOCED)
2228
else if (alloc_parms->flags & A_CLING_TO_ALLOCED)
2072
2229
ix_offset = ah->area_count;
2074
2231
if (alloc_parms->alloc == ALLOC_NORMAL || (alloc_parms->flags & A_CLING_TO_ALLOCED))
2075
2232
log_debug_alloc("Cling_to_allocated is %sset",
2076
2233
alloc_parms->flags & A_CLING_TO_ALLOCED ? "" : "not ");
2235
if (alloc_parms->flags & A_POSITIONAL_FILL)
2236
log_debug_alloc("%u preferred area(s) to be filled positionally.", ix_offset);
2238
log_debug_alloc("Areas to be sorted and filled sequentially.");
2078
2240
_clear_areas(alloc_state);
2079
2241
_reset_unreserved(pvms);
2143
2305
* There are two types of allocations, which can't be mixed at present:
2145
2307
* PREFERRED are stored immediately in a specific parallel slot.
2308
* This is only used if the A_POSITIONAL_FILL flag is set.
2146
2309
* This requires the number of slots to match, so if comparing with
2147
2310
* prev_lvseg then A_AREA_COUNT_MATCHES must be set.
2149
2312
* USE_AREA are stored for later, then sorted and chosen from.
2151
switch(_check_pva(ah, pva, max_to_allocate, alloc_parms,
2314
switch(_check_pva(ah, pva, max_to_allocate,
2152
2315
alloc_state, already_found_one, iteration_count, log_iteration_count)) {
2154
2317
case PREFERRED:
2684
2856
* Add new areas to mirrored segments
2858
int lv_add_segmented_mirror_image(struct alloc_handle *ah,
2859
struct logical_volume *lv, uint32_t le,
2860
uint32_t region_size)
2863
struct alloced_area *aa;
2864
struct lv_segment *seg, *new_seg;
2865
uint32_t current_le = le;
2867
struct segment_type *segtype;
2868
struct logical_volume *orig_lv, *copy_lv;
2870
if (!(lv->status & PVMOVE)) {
2871
log_error(INTERNAL_ERROR
2872
"Non-pvmove LV, %s, passed as argument", lv->name);
2876
if (seg_type(first_seg(lv), 0) != AREA_PV) {
2877
log_error(INTERNAL_ERROR
2878
"Bad segment type for first segment area");
2883
* If the allocator provided two or more PV allocations for any
2884
* single segment of the original LV, that LV segment must be
2885
* split up to match.
2887
dm_list_iterate_items(aa, &ah->alloced_areas[0]) {
2888
if (!(seg = find_seg_by_le(lv, current_le))) {
2889
log_error("Failed to find segment for %s extent %"
2890
PRIu32, lv->name, current_le);
2894
/* Allocator assures aa[0].len <= seg->area_len */
2895
if (aa[0].len < seg->area_len) {
2896
if (!lv_split_segment(lv, seg->le + aa[0].len)) {
2897
log_error("Failed to split segment at %s "
2898
"extent %" PRIu32, lv->name, le);
2902
current_le += seg->area_len;
2907
if (!insert_layer_for_lv(lv->vg->cmd, lv, PVMOVE, "_mimage_0")) {
2908
log_error("Failed to build pvmove LV-type mirror, %s",
2912
orig_lv = seg_lv(first_seg(lv), 0);
2913
if (!(image_name = dm_pool_strdup(lv->vg->vgmem, orig_lv->name)))
2915
image_name[strlen(image_name) - 1] = '1';
2917
if (!(copy_lv = lv_create_empty(image_name, NULL,
2919
ALLOC_INHERIT, lv->vg)))
2922
if (!lv_add_mirror_lvs(lv, ©_lv, 1, MIRROR_IMAGE, region_size))
2925
if (!(segtype = get_segtype_from_string(lv->vg->cmd, "striped")))
2928
dm_list_iterate_items(aa, &ah->alloced_areas[0]) {
2929
if (!(seg = find_seg_by_le(orig_lv, current_le))) {
2930
log_error("Failed to find segment for %s extent %"
2931
PRIu32, lv->name, current_le);
2935
if (!(new_seg = alloc_lv_segment(segtype, copy_lv,
2936
seg->le, seg->len, PVMOVE, 0,
2937
NULL, NULL, 1, seg->len,
2941
for (s = 0; s < ah->area_count; s++) {
2942
if (!set_lv_segment_area_pv(new_seg, s,
2943
aa[s].pv, aa[s].pe))
2947
dm_list_add(©_lv->segments, &new_seg->list);
2949
current_le += seg->area_len;
2950
copy_lv->le_count += seg->area_len;
2952
lv->status |= MIRRORED;
2954
/* FIXME: add log */
2956
if (lv->vg->fid->fmt->ops->lv_setup &&
2957
!lv->vg->fid->fmt->ops->lv_setup(lv->vg->fid, lv))
2964
* Add new areas to mirrored segments
2686
2966
int lv_add_mirror_areas(struct alloc_handle *ah,
2687
2967
struct logical_volume *lv, uint32_t le,
2688
2968
uint32_t region_size)
3546
3827
static int _adjust_policy_params(struct cmd_context *cmd,
3547
3828
struct logical_volume *lv, struct lvresize_params *lp)
3830
dm_percent_t percent;
3550
3831
int policy_threshold, policy_amount;
3552
3833
if (lv_is_thin_pool(lv)) {
3553
3834
policy_threshold =
3554
3835
find_config_tree_int(cmd, activation_thin_pool_autoextend_threshold_CFG,
3555
lv_config_profile(lv)) * PERCENT_1;
3836
lv_config_profile(lv)) * DM_PERCENT_1;
3556
3837
policy_amount =
3557
3838
find_config_tree_int(cmd, activation_thin_pool_autoextend_percent_CFG,
3558
3839
lv_config_profile(lv));
3559
if (!policy_amount && policy_threshold < PERCENT_100)
3840
if (!policy_amount && policy_threshold < DM_PERCENT_100)
3562
3843
policy_threshold =
3563
find_config_tree_int(cmd, activation_snapshot_autoextend_threshold_CFG, NULL) * PERCENT_1;
3844
find_config_tree_int(cmd, activation_snapshot_autoextend_threshold_CFG, NULL) * DM_PERCENT_1;
3564
3845
policy_amount =
3565
3846
find_config_tree_int(cmd, activation_snapshot_autoextend_percent_CFG, NULL);
3568
if (policy_threshold >= PERCENT_100)
3849
if (policy_threshold >= DM_PERCENT_100)
3569
3850
return 1; /* nothing to do */
3571
3852
if (lv_is_thin_pool(lv)) {
3572
3853
if (!lv_thin_pool_percent(lv, 1, &percent))
3574
if ((PERCENT_0 < percent && percent <= PERCENT_100) &&
3855
if ((DM_PERCENT_0 < percent && percent <= DM_PERCENT_100) &&
3575
3856
(percent > policy_threshold)) {
3576
3857
if (!thin_pool_feature_supported(lv, THIN_FEATURE_METADATA_RESIZE)) {
3577
3858
log_error_once("Online metadata resize for %s/%s is not supported.",
3578
lp->vg_name, lp->lv_name);
3859
lv->vg->name, lv->name);
3581
3862
lp->poolmetadatasize = (first_seg(lv)->metadata_lv->size *
4529
* Construct dm_list of segments of LVs showing which PVs they use.
4530
* For pvmove we use the *parent* LV so we can pick up stripes & existing mirrors etc.
4824
* build_parallel_areas_from_lv
4826
* @use_pvmove_parent_lv
4827
* @create_single_list
4829
* For each segment in an LV, create a list of PVs used by the segment.
4830
* Thus, the returned list is really a list of segments (seg_pvs)
4831
* containing a list of PVs that are in use by that segment.
4833
* use_pvmove_parent_lv: For pvmove we use the *parent* LV so we can
4834
* pick up stripes & existing mirrors etc.
4835
* create_single_list : Instead of creating a list of segments that
4836
* each contain a list of PVs, return a list
4837
* containing just one segment (i.e. seg_pvs)
4838
* that contains a list of all the PVs used by
4839
* the entire LV and all it's segments.
4532
4841
struct dm_list *build_parallel_areas_from_lv(struct logical_volume *lv,
4533
unsigned use_pvmove_parent_lv)
4842
unsigned use_pvmove_parent_lv,
4843
unsigned create_single_list)
4535
4845
struct cmd_context *cmd = lv->vg->cmd;
4536
4846
struct dm_list *parallel_areas;
4537
struct seg_pvs *spvs;
4847
struct seg_pvs *spvs = NULL;
4538
4848
uint32_t current_le = 0;
4539
4849
uint32_t raid_multiple;
4540
4850
struct lv_segment *seg = first_seg(lv);
4547
4857
dm_list_init(parallel_areas);
4550
if (!(spvs = dm_pool_zalloc(cmd->mem, sizeof(*spvs)))) {
4551
log_error("allocation failed");
4860
if (!spvs || !create_single_list) {
4861
if (!(spvs = dm_pool_zalloc(cmd->mem, sizeof(*spvs)))) {
4862
log_error("allocation failed");
4866
dm_list_init(&spvs->pvs);
4867
dm_list_add(parallel_areas, &spvs->list);
4555
dm_list_init(&spvs->pvs);
4557
4869
spvs->le = current_le;
4558
4870
spvs->len = lv->le_count - current_le;
4560
dm_list_add(parallel_areas, &spvs->list);
4562
if (use_pvmove_parent_lv && !(seg = find_seg_by_le(lv, current_le))) {
4872
if (use_pvmove_parent_lv &&
4873
!(seg = find_seg_by_le(lv, current_le))) {
4563
4874
log_error("Failed to find segment for %s extent %" PRIu32,
4564
4875
lv->name, current_le);
5721
6049
override_lv_skip_flag)
5724
log_verbose("ACTIVATON_SKIP flag set for LV %s/%s, skipping activation.",
6052
log_verbose("ACTIVATION_SKIP flag set for LV %s/%s, skipping activation.",
5725
6053
lv->vg->name, lv->name);
5729
/* Greatest common divisor */
5730
static unsigned long _gcd(unsigned long n1, unsigned long n2)
5732
unsigned long remainder;
5735
remainder = n1 % n2;
5743
/* Least common multiple */
5744
static unsigned long _lcm(unsigned long n1, unsigned long n2)
5748
return (n1 * n2) / _gcd(n1, n2);
5751
static int _recalculate_pool_chunk_size_with_dev_hints(struct lvcreate_params *lp,
5752
struct logical_volume *pool_lv)
5754
struct logical_volume *pool_data_lv;
5755
struct lv_segment *seg;
5756
struct physical_volume *pv;
5757
struct cmd_context *cmd = pool_lv->vg->cmd;
5758
unsigned long previous_hint = 0, hint = 0;
5759
uint32_t chunk_size = lp->chunk_size;
5760
uint32_t default_chunk_size;
5761
uint32_t min_chunk_size, max_chunk_size;
5763
if (lp->passed_args & PASS_ARG_CHUNK_SIZE)
5766
if (seg_is_thin_pool(lp)) {
5767
if (find_config_tree_int(cmd, allocation_thin_pool_chunk_size_CFG, NULL))
5770
min_chunk_size = DM_THIN_MIN_DATA_BLOCK_SIZE;
5771
max_chunk_size = DM_THIN_MAX_DATA_BLOCK_SIZE;
5772
default_chunk_size = get_default_allocation_thin_pool_chunk_size_CFG(cmd, NULL) * 2;
5773
} else if (seg_is_cache_pool(lp)) {
5774
if (find_config_tree_int(cmd, allocation_cache_pool_chunk_size_CFG, NULL))
5776
min_chunk_size = DM_CACHE_MIN_DATA_BLOCK_SIZE;
5777
max_chunk_size = DM_CACHE_MAX_DATA_BLOCK_SIZE;
5778
default_chunk_size = get_default_allocation_cache_pool_chunk_size_CFG(cmd, NULL) * 2;
5780
log_error(INTERNAL_ERROR "%s is not a thin pool or cache pool",
5785
pool_data_lv = seg_lv(first_seg(pool_lv), 0);
5787
dm_list_iterate_items(seg, &pool_data_lv->segments) {
5788
pv = seg_pv(seg, 0);
5789
if (lp->thin_chunk_size_calc_policy == THIN_CHUNK_SIZE_CALC_METHOD_PERFORMANCE)
5790
hint = dev_optimal_io_size(cmd->dev_types, pv_dev(pv));
5792
hint = dev_minimum_io_size(cmd->dev_types, pv_dev(pv));
5797
hint = _lcm(previous_hint, hint);
5798
previous_hint = hint;
5802
log_debug_alloc("No usable device hint found while recalculating"
5803
" thin pool chunk size for %s.", pool_lv->name);
5807
if ((hint < min_chunk_size) || (hint > max_chunk_size)) {
5808
log_debug_alloc("Calculated chunk size value of %ld sectors for"
5809
" thin pool %s is out of allowed range (%d-%d).",
5810
hint, pool_lv->name,
5811
min_chunk_size, max_chunk_size);
5813
chunk_size = (hint >= default_chunk_size) ?
5814
hint : default_chunk_size;
5816
first_seg(pool_lv)->chunk_size = chunk_size;
5820
6057
static int _should_wipe_lv(struct lvcreate_params *lp, struct logical_volume *lv) {
5821
6058
int r = lp->zero | lp->wipe_signatures;
6169
6413
if (!lv_extend(lv, lp->segtype,
6170
6414
lp->stripes, lp->stripe_size,
6172
(seg_is_thin_pool(lp) || seg_is_cache_pool(lp)) ?
6173
lp->poolmetadataextents : lp->region_size,
6416
seg_is_pool(lp) ? lp->poolmetadataextents : lp->region_size,
6174
6417
seg_is_thin_volume(lp) ? lp->voriginextents : lp->extents,
6175
6418
thin_name, lp->pvh, lp->alloc, lp->approx_alloc))
6178
6421
if (seg_is_cache_pool(lp)) {
6179
if (!_recalculate_pool_chunk_size_with_dev_hints(lp, lv))
6422
first_seg(lv)->chunk_size = lp->chunk_size;
6181
6423
first_seg(lv)->feature_flags = lp->feature_flags;
6424
/* TODO: some calc_policy solution for cache ? */
6425
if (!recalculate_pool_chunk_size_with_dev_hints(lv, lp->passed_args,
6426
THIN_CHUNK_SIZE_CALC_METHOD_GENERIC))
6182
6428
} else if (seg_is_thin_pool(lp)) {
6183
if (!_recalculate_pool_chunk_size_with_dev_hints(lp, lv))
6429
first_seg(lv)->chunk_size = lp->chunk_size;
6185
6430
first_seg(lv)->zero_new_blocks = lp->zero ? 1 : 0;
6186
6431
first_seg(lv)->discards = lp->discards;
6187
6432
/* FIXME: use lowwatermark via lvm.conf global for all thinpools ? */
6188
6433
first_seg(lv)->low_water_mark = 0;
6434
if (!recalculate_pool_chunk_size_with_dev_hints(lv, lp->passed_args,
6435
lp->thin_chunk_size_calc_policy))
6189
6437
} else if (seg_is_thin_volume(lp)) {
6190
6438
pool_lv = first_seg(lv)->pool_lv;
6191
6439
if (!(first_seg(lv)->device_id =