89
#define LCK_SCOPE_MASK 0x00000008U
90
#define LCK_VG 0x00000000U
91
#define LCK_LV 0x00000008U
89
#define LCK_SCOPE_MASK 0x00001008U
90
#define LCK_VG 0x00000000U /* Volume Group */
91
#define LCK_LV 0x00000008U /* Logical Volume */
92
#define LCK_ACTIVATION 0x00001000U /* Activation */
132
133
#define LCK_NONE (LCK_VG | LCK_NULL)
135
#define LCK_ACTIVATE_LOCK (LCK_ACTIVATION | LCK_WRITE | LCK_HOLD)
136
#define LCK_ACTIVATE_UNLOCK (LCK_ACTIVATION | LCK_UNLOCK)
134
138
#define LCK_VG_READ (LCK_VG | LCK_READ | LCK_HOLD)
135
139
#define LCK_VG_WRITE (LCK_VG | LCK_WRITE | LCK_HOLD)
136
140
#define LCK_VG_UNLOCK (LCK_VG | LCK_UNLOCK)
161
165
lock_vol(cmd, (lv)->lvid.s, flags | LCK_LV_CLUSTERED(lv), lv) : \
169
* Activation locks are wrapped around activation commands that have to
170
* be processed atomically one-at-a-time.
171
* If a VG WRITE lock is held, an activation lock is redundant.
173
* FIXME Test and support this for thin and cache types.
174
* FIXME Add cluster support.
176
#define lv_supports_activation_locking(lv) (!vg_is_clustered((lv)->vg) && !lv_is_thin_type(lv) && !lv_is_cache_type(lv))
177
#define lock_activation(cmd, lv) (vg_write_lock_held() && lv_supports_activation_locking(lv) ? 1 : lock_vol(cmd, (lv)->lvid.s, LCK_ACTIVATE_LOCK, lv))
178
#define unlock_activation(cmd, lv) (vg_write_lock_held() && lv_supports_activation_locking(lv) ? 1 : lock_vol(cmd, (lv)->lvid.s, LCK_ACTIVATE_UNLOCK, lv))
181
* Place temporary exclusive 'activation' lock around an LV locking operation
184
#define lock_lv_vol_serially(cmd, lv, flags) \
188
if (lock_activation((cmd), (lv))) { \
189
rr = lock_lv_vol((cmd), (lv), (flags)); \
190
unlock_activation((cmd), (lv)); \
164
195
#define unlock_vg(cmd, vol) \
166
197
if (is_real_vg(vol)) \
173
204
release_vg(vg); \
176
#define resume_lv(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_RESUME)
207
#define resume_lv(cmd, lv) \
209
int rr = lock_lv_vol((cmd), (lv), LCK_LV_RESUME); \
210
unlock_activation((cmd), (lv)); \
177
213
#define resume_lv_origin(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_RESUME | LCK_ORIGIN_ONLY)
178
#define revert_lv(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_RESUME | LCK_REVERT)
179
#define suspend_lv(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_SUSPEND | LCK_HOLD)
214
#define revert_lv(cmd, lv) \
216
int rr = lock_lv_vol((cmd), (lv), LCK_LV_RESUME | LCK_REVERT); \
218
unlock_activation((cmd), (lv)); \
221
#define suspend_lv(cmd, lv) \
222
(lock_activation((cmd), (lv)) ? lock_lv_vol((cmd), (lv), LCK_LV_SUSPEND | LCK_HOLD) : 0)
180
223
#define suspend_lv_origin(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_SUSPEND | LCK_HOLD | LCK_ORIGIN_ONLY)
181
#define deactivate_lv(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_DEACTIVATE)
224
#define deactivate_lv(cmd, lv) lock_lv_vol_serially(cmd, lv, LCK_LV_DEACTIVATE)
183
#define activate_lv(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_ACTIVATE | LCK_HOLD)
226
#define activate_lv(cmd, lv) lock_lv_vol_serially(cmd, lv, LCK_LV_ACTIVATE | LCK_HOLD)
184
227
#define activate_lv_excl_local(cmd, lv) \
185
lock_lv_vol(cmd, lv, LCK_LV_EXCLUSIVE | LCK_HOLD | LCK_LOCAL)
228
lock_lv_vol_serially(cmd, lv, LCK_LV_EXCLUSIVE | LCK_HOLD | LCK_LOCAL)
186
229
#define activate_lv_excl_remote(cmd, lv) \
187
230
lock_lv_vol(cmd, lv, LCK_LV_EXCLUSIVE | LCK_HOLD | LCK_REMOTE)
190
233
int activate_lv_excl(struct cmd_context *cmd, struct logical_volume *lv);
192
235
#define activate_lv_local(cmd, lv) \
193
lock_lv_vol(cmd, lv, LCK_LV_ACTIVATE | LCK_HOLD | LCK_LOCAL)
236
lock_lv_vol_serially(cmd, lv, LCK_LV_ACTIVATE | LCK_HOLD | LCK_LOCAL)
194
237
#define deactivate_lv_local(cmd, lv) \
195
lock_lv_vol(cmd, lv, LCK_LV_DEACTIVATE | LCK_LOCAL)
238
lock_lv_vol_serially(cmd, lv, LCK_LV_DEACTIVATE | LCK_LOCAL)
196
239
#define drop_cached_metadata(vg) \
197
240
lock_vol((vg)->cmd, (vg)->name, LCK_VG_DROP_CACHE, NULL)
198
241
#define remote_commit_cached_metadata(vg) \