144
144
struct lvmcache_info *info;
146
info = info_from_pvid((const char *)&pv->id.uuid, 0);
147
return info ? dm_list_size(&info->mdas) : UINT64_C(0);
146
info = lvmcache_info_from_pvid((const char *)&pv->id.uuid, 0);
147
return info ? lvmcache_mda_count(info) : UINT64_C(0);
150
static int _count_unignored(struct metadata_area *mda, void *baton)
152
uint32_t *count = baton;
153
if (!mda_is_ignored(mda))
150
158
uint32_t pv_mda_used_count(const struct physical_volume *pv)
152
160
struct lvmcache_info *info;
153
struct metadata_area *mda;
154
161
uint32_t used_count=0;
156
info = info_from_pvid((const char *)&pv->id.uuid, 0);
163
info = lvmcache_info_from_pvid((const char *)&pv->id.uuid, 0);
159
dm_list_iterate_items(mda, &info->mdas) {
160
if (!mda_is_ignored(mda))
166
lvmcache_foreach_mda(info, _count_unignored, &used_count);
163
167
return used_count;
208
213
const char *pvid = (const char *)(&pv->id.uuid);
210
215
/* PVs could have 2 mdas of different sizes (rounding effect) */
211
if ((info = info_from_pvid(pvid, 0)))
212
min_mda_size = find_min_mda_size(&info->mdas);
216
if ((info = lvmcache_info_from_pvid(pvid, 0)))
217
min_mda_size = lvmcache_smallest_mda_size(info);
213
218
return min_mda_size;
221
static int _pv_mda_free(struct metadata_area *mda, void *baton) {
223
uint64_t *freespace = baton;
225
if (!mda->ops->mda_free_sectors)
228
mda_free = mda->ops->mda_free_sectors(mda);
229
if (mda_free < *freespace)
230
*freespace = mda_free;
216
234
uint64_t pv_mda_free(const struct physical_volume *pv)
218
236
struct lvmcache_info *info;
219
uint64_t freespace = UINT64_MAX, mda_free;
237
uint64_t freespace = UINT64_MAX;
220
238
const char *pvid = (const char *)&pv->id.uuid;
221
struct metadata_area *mda;
223
if ((info = info_from_pvid(pvid, 0)))
224
dm_list_iterate_items(mda, &info->mdas) {
225
if (!mda->ops->mda_free_sectors)
227
mda_free = mda->ops->mda_free_sectors(mda);
228
if (mda_free < freespace)
229
freespace = mda_free;
240
if ((info = lvmcache_info_from_pvid(pvid, 0)))
241
lvmcache_foreach_mda(info, _pv_mda_free, &freespace);
232
243
if (freespace == UINT64_MAX)
233
244
freespace = UINT64_C(0);
234
246
return freespace;
260
struct _pv_mda_set_ignored_baton {
261
unsigned mda_ignored;
262
struct dm_list *mdas_in_use, *mdas_ignored, *mdas_to_change;
265
static int _pv_mda_set_ignored_one(struct metadata_area *mda, void *baton)
267
struct _pv_mda_set_ignored_baton *b = baton;
268
struct metadata_area *vg_mda, *tmda;
270
if (mda_is_ignored(mda) && !b->mda_ignored) {
271
/* Changing an ignored mda to one in_use requires moving it */
272
dm_list_iterate_items_safe(vg_mda, tmda, b->mdas_ignored)
273
if (mda_locns_match(mda, vg_mda)) {
274
mda_set_ignored(vg_mda, b->mda_ignored);
275
dm_list_move(b->mdas_in_use, &vg_mda->list);
279
dm_list_iterate_items_safe(vg_mda, tmda, b->mdas_in_use)
280
if (mda_locns_match(mda, vg_mda))
281
/* Don't move mda: needs writing to disk. */
282
mda_set_ignored(vg_mda, b->mda_ignored);
284
mda_set_ignored(mda, b->mda_ignored);
248
288
unsigned pv_mda_set_ignored(const struct physical_volume *pv, unsigned mda_ignored)
250
290
struct lvmcache_info *info;
251
struct metadata_area *mda, *vg_mda, *tmda;
252
struct dm_list *mdas_in_use, *mdas_ignored, *mdas_to_change;
291
struct _pv_mda_set_ignored_baton baton;
292
struct metadata_area *mda;
254
if (!(info = info_from_pvid((const char *)&pv->id.uuid, 0)))
294
if (!(info = lvmcache_info_from_pvid((const char *)&pv->id.uuid, 0)))
257
mdas_in_use = &pv->fid->metadata_areas_in_use;
258
mdas_ignored = &pv->fid->metadata_areas_ignored;
259
mdas_to_change = mda_ignored ? mdas_in_use : mdas_ignored;
297
baton.mda_ignored = mda_ignored;
298
baton.mdas_in_use = &pv->fid->metadata_areas_in_use;
299
baton.mdas_ignored = &pv->fid->metadata_areas_ignored;
300
baton.mdas_to_change = baton.mda_ignored ? baton.mdas_in_use : baton.mdas_ignored;
261
302
if (is_orphan(pv)) {
262
dm_list_iterate_items(mda, mdas_to_change)
263
mda_set_ignored(mda, mda_ignored);
303
dm_list_iterate_items(mda, baton.mdas_to_change)
304
mda_set_ignored(mda, baton.mda_ignored);
287
328
/* FIXME: Try not to update the cache here! Also, try to iterate over
288
329
* PV mdas only using the format instance's index somehow
289
330
* (i.e. try to avoid using mda_locn_match call). */
290
dm_list_iterate_items(mda, &info->mdas) {
291
if (mda_is_ignored(mda) && !mda_ignored)
292
/* Changing an ignored mda to one in_use requires moving it */
293
dm_list_iterate_items_safe(vg_mda, tmda, mdas_ignored)
294
if (mda_locns_match(mda, vg_mda)) {
295
mda_set_ignored(vg_mda, mda_ignored);
296
dm_list_move(mdas_in_use, &vg_mda->list);
299
dm_list_iterate_items_safe(vg_mda, tmda, mdas_in_use)
300
if (mda_locns_match(mda, vg_mda))
301
/* Don't move mda: needs writing to disk. */
302
mda_set_ignored(vg_mda, mda_ignored);
304
mda_set_ignored(mda, mda_ignored);
332
lvmcache_foreach_mda(info, _pv_mda_set_ignored_one, &baton);