26
26
#include <linux/freezer.h>
27
27
#include <linux/workqueue.h>
28
28
#include <linux/jiffies.h>
29
#include <linux/rcupdate.h>
30
#include <linux/rculist_bl.h>
31
#include <linux/bit_spinlock.h>
31
34
#include "incore.h"
41
44
#define CREATE_TRACE_POINTS
42
45
#include "trace_gfs2.h"
44
struct gfs2_gl_hash_bucket {
45
struct hlist_head hb_list;
48
47
struct gfs2_glock_iter {
49
48
int hash; /* hash bucket index */
50
49
struct gfs2_sbd *sdp; /* incore superblock */
55
54
typedef void (*glock_examiner) (struct gfs2_glock * gl);
57
static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
58
56
static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
59
57
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
60
58
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
70
68
#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
71
69
#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
73
static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
71
static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
74
72
static struct dentry *gfs2_root;
77
* Despite what you might think, the numbers below are not arbitrary :-)
78
* They are taken from the ipv4 routing hash code, which is well tested
79
* and thus should be nearly optimal. Later on we might tweek the numbers
80
* but for now this should be fine.
82
* The reason for putting the locks in a separate array from the list heads
83
* is that we can have fewer locks than list heads and save memory. We use
84
* the same hash function for both, but with a different hash mask.
86
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
87
defined(CONFIG_PROVE_LOCKING)
90
# define GL_HASH_LOCK_SZ 256
93
# define GL_HASH_LOCK_SZ 4096
95
# define GL_HASH_LOCK_SZ 2048
97
# define GL_HASH_LOCK_SZ 1024
99
# define GL_HASH_LOCK_SZ 512
101
# define GL_HASH_LOCK_SZ 256
105
/* We never want more locks than chains */
106
#if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
107
# undef GL_HASH_LOCK_SZ
108
# define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
111
static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
113
static inline rwlock_t *gl_lock_addr(unsigned int x)
115
return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
117
#else /* not SMP, so no spinlocks required */
118
static inline rwlock_t *gl_lock_addr(unsigned int x)
125
75
* gl_hash() - Turn glock number into hash bucket number
126
76
* @lock: The glock number
145
* glock_free() - Perform a few checks and then release struct gfs2_glock
146
* @gl: The glock to release
148
* Also calls lock module to release its internal structure for this glock.
152
static void glock_free(struct gfs2_glock *gl)
94
static inline void spin_lock_bucket(unsigned int hash)
96
hlist_bl_lock(&gl_hash_table[hash]);
99
static inline void spin_unlock_bucket(unsigned int hash)
101
hlist_bl_unlock(&gl_hash_table[hash]);
104
static void gfs2_glock_dealloc(struct rcu_head *rcu)
106
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
108
if (gl->gl_ops->go_flags & GLOF_ASPACE)
109
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
111
kmem_cache_free(gfs2_glock_cachep, gl);
114
void gfs2_glock_free(struct gfs2_glock *gl)
154
116
struct gfs2_sbd *sdp = gl->gl_sbd;
155
struct address_space *mapping = gfs2_glock2aspace(gl);
156
struct kmem_cache *cachep = gfs2_glock_cachep;
158
GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
159
trace_gfs2_glock_put(gl);
161
cachep = gfs2_glock_aspace_cachep;
162
sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
118
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
119
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
120
wake_up(&sdp->sd_glock_wait);
198
* gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
203
static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
156
void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
206
may_reclaim = (demote_ok(gl) &&
207
(atomic_read(&gl->gl_ref) == 1 ||
208
(gl->gl_name.ln_type == LM_TYPE_INODE &&
209
atomic_read(&gl->gl_ref) <= 2)));
210
158
spin_lock(&lru_lock);
211
if (list_empty(&gl->gl_lru) && may_reclaim) {
212
list_add_tail(&gl->gl_lru, &lru_list);
160
if (!list_empty(&gl->gl_lru))
161
list_del_init(&gl->gl_lru);
213
163
atomic_inc(&lru_count);
165
list_add_tail(&gl->gl_lru, &lru_list);
166
set_bit(GLF_LRU, &gl->gl_flags);
167
spin_unlock(&lru_lock);
170
static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
172
spin_lock(&lru_lock);
173
if (!list_empty(&gl->gl_lru)) {
174
list_del_init(&gl->gl_lru);
175
atomic_dec(&lru_count);
176
clear_bit(GLF_LRU, &gl->gl_flags);
215
178
spin_unlock(&lru_lock);
182
* __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
185
* If the glock is demotable, then we add it (or move it) to the end
186
* of the glock LRU list.
189
static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
192
gfs2_glock_add_to_lru(gl);
219
196
* gfs2_glock_put_nolock() - Decrement reference count on glock
220
197
* @gl: The glock to put
239
int gfs2_glock_put(struct gfs2_glock *gl)
215
void gfs2_glock_put(struct gfs2_glock *gl)
217
struct gfs2_sbd *sdp = gl->gl_sbd;
218
struct address_space *mapping = gfs2_glock2aspace(gl);
243
write_lock(gl_lock_addr(gl->gl_hash));
244
if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
245
hlist_del(&gl->gl_list);
246
if (!list_empty(&gl->gl_lru)) {
247
list_del_init(&gl->gl_lru);
248
atomic_dec(&lru_count);
250
spin_unlock(&lru_lock);
251
write_unlock(gl_lock_addr(gl->gl_hash));
220
if (atomic_dec_and_test(&gl->gl_ref)) {
221
spin_lock_bucket(gl->gl_hash);
222
hlist_bl_del_rcu(&gl->gl_list);
223
spin_unlock_bucket(gl->gl_hash);
224
gfs2_glock_remove_from_lru(gl);
252
225
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
226
GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
227
trace_gfs2_glock_put(gl);
228
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
257
spin_lock(&gl->gl_spin);
258
gfs2_glock_schedule_for_reclaim(gl);
259
spin_unlock(&gl->gl_spin);
260
write_unlock(gl_lock_addr(gl->gl_hash));
275
242
const struct lm_lockname *name)
277
244
struct gfs2_glock *gl;
278
struct hlist_node *h;
245
struct hlist_bl_node *h;
280
hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
247
hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
281
248
if (!lm_name_equal(&gl->gl_name, name))
283
250
if (gl->gl_sbd != sdp)
286
atomic_inc(&gl->gl_ref);
252
if (atomic_inc_not_zero(&gl->gl_ref))
576
541
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
578
543
gfs2_glock_hold(gl);
579
if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
580
gl->gl_state == LM_ST_DEFERRED) &&
581
!(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
582
lck_flags |= LM_FLAG_TRY_1CB;
584
544
if (sdp->sd_lockstruct.ls_ops->lm_lock) {
586
546
ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
682
642
/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
685
inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
645
inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
687
647
inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
688
648
if (inode && !IS_ERR(inode)) {
705
665
spin_lock(&gl->gl_spin);
706
if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
666
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
707
667
gl->gl_state != LM_ST_UNLOCKED &&
708
668
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
709
669
unsigned long holdtime, now = jiffies;
710
671
holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
711
672
if (time_before(now, holdtime))
712
673
delay = holdtime - now;
713
set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
676
clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
677
set_bit(GLF_DEMOTE, &gl->gl_flags);
715
680
run_queue(gl, 0);
716
681
spin_unlock(&gl->gl_spin);
743
708
struct gfs2_glock *gl, *tmp;
744
709
unsigned int hash = gl_hash(sdp, &name);
745
710
struct address_space *mapping;
711
struct kmem_cache *cachep;
747
read_lock(gl_lock_addr(hash));
748
714
gl = search_bucket(hash, sdp, &name);
749
read_unlock(gl_lock_addr(hash));
757
723
if (glops->go_flags & GLOF_ASPACE)
758
gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
724
cachep = gfs2_glock_aspace_cachep;
760
gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
726
cachep = gfs2_glock_cachep;
727
gl = kmem_cache_alloc(cachep, GFP_KERNEL);
790
757
mapping->writeback_index = 0;
793
write_lock(gl_lock_addr(hash));
760
spin_lock_bucket(hash);
794
761
tmp = search_bucket(hash, sdp, &name);
796
write_unlock(gl_lock_addr(hash));
763
spin_unlock_bucket(hash);
764
kmem_cache_free(cachep, gl);
765
atomic_dec(&sdp->sd_glock_disposal);
800
hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
801
write_unlock(gl_lock_addr(hash));
768
hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
769
spin_unlock_bucket(hash);
1007
975
insert_pt = &gh2->gh_list;
1009
977
set_bit(GLF_QUEUED, &gl->gl_flags);
978
trace_gfs2_glock_queue(gh, 1);
1010
979
if (likely(insert_pt == NULL)) {
1011
980
list_add_tail(&gh->gh_list, &gl->gl_holders);
1012
981
if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1016
trace_gfs2_glock_queue(gh, 1);
1017
985
list_add_tail(&gh->gh_list, insert_pt);
1019
987
gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1056
1024
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1027
if (test_bit(GLF_LRU, &gl->gl_flags))
1028
gfs2_glock_remove_from_lru(gl);
1059
1030
spin_lock(&gl->gl_spin);
1060
1031
add_to_queue(gh);
1061
1032
if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1113
1084
!test_bit(GLF_DEMOTE, &gl->gl_flags))
1087
if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
1088
__gfs2_glock_schedule_for_reclaim(gl);
1116
1089
trace_gfs2_glock_queue(gh, 0);
1117
1090
spin_unlock(&gl->gl_spin);
1118
1091
if (likely(fast_path))
1151
1124
* @number: the lock number
1152
1125
* @glops: the glock operations for the type of glock
1153
1126
* @state: the state to acquire the glock in
1154
* @flags: modifier flags for the aquisition
1127
* @flags: modifier flags for the acquisition
1155
1128
* @gh: the struct gfs2_holder
1157
1130
* Returns: errno
1292
1263
void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1296
for (x = 0; x < num_gh; x++)
1297
gfs2_glock_dq_uninit(&ghs[x]);
1266
gfs2_glock_dq_uninit(&ghs[num_gh]);
1300
1269
void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1385
static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
1354
static int gfs2_shrink_glock_memory(struct shrinker *shrink,
1355
struct shrink_control *sc)
1387
1357
struct gfs2_glock *gl;
1388
1358
int may_demote;
1389
1359
int nr_skipped = 0;
1360
int nr = sc->nr_to_scan;
1361
gfp_t gfp_mask = sc->gfp_mask;
1390
1362
LIST_HEAD(skipped);
1399
1371
while(nr && !list_empty(&lru_list)) {
1400
1372
gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1401
1373
list_del_init(&gl->gl_lru);
1374
clear_bit(GLF_LRU, &gl->gl_flags);
1402
1375
atomic_dec(&lru_count);
1404
1377
/* Test for being demotable */
1423
1396
list_add(&gl->gl_lru, &skipped);
1397
set_bit(GLF_LRU, &gl->gl_flags);
1425
1399
list_splice(&skipped, &lru_list);
1426
1400
atomic_add(nr_skipped, &lru_count);
1440
1414
* @sdp: the filesystem
1441
1415
* @bucket: the bucket
1443
* Returns: 1 if the bucket has entries
1446
static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1419
static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1447
1420
unsigned int hash)
1449
struct gfs2_glock *gl, *prev = NULL;
1450
int has_entries = 0;
1451
struct hlist_head *head = &gl_hash_table[hash].hb_list;
1422
struct gfs2_glock *gl;
1423
struct hlist_bl_head *head = &gl_hash_table[hash];
1424
struct hlist_bl_node *pos;
1453
read_lock(gl_lock_addr(hash));
1454
/* Can't use hlist_for_each_entry - don't want prefetch here */
1455
if (hlist_empty(head))
1457
gl = list_entry(head->first, struct gfs2_glock, gl_list);
1459
if (!sdp || gl->gl_sbd == sdp) {
1460
gfs2_glock_hold(gl);
1461
read_unlock(gl_lock_addr(hash));
1463
gfs2_glock_put(prev);
1427
hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1428
if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
1467
read_lock(gl_lock_addr(hash));
1469
if (gl->gl_list.next == NULL)
1471
gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1474
read_unlock(gl_lock_addr(hash));
1476
gfs2_glock_put(prev);
1477
1432
cond_resched();
1435
static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1439
for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1440
examine_bucket(examiner, sdp, x);
1506
1468
static void clear_glock(struct gfs2_glock *gl)
1508
spin_lock(&lru_lock);
1509
if (!list_empty(&gl->gl_lru)) {
1510
list_del_init(&gl->gl_lru);
1511
atomic_dec(&lru_count);
1513
spin_unlock(&lru_lock);
1470
gfs2_glock_remove_from_lru(gl);
1515
1472
spin_lock(&gl->gl_spin);
1516
1473
if (gl->gl_state != LM_ST_UNLOCKED)
1530
1487
void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1534
for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1535
examine_bucket(thaw_glock, sdp, x);
1489
glock_hash_walk(thaw_glock, sdp);
1492
static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1495
spin_lock(&gl->gl_spin);
1496
ret = __dump_glock(seq, gl);
1497
spin_unlock(&gl->gl_spin);
1501
static void dump_glock_func(struct gfs2_glock *gl)
1503
dump_glock(NULL, gl);
1546
1514
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1550
for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1551
examine_bucket(clear_glock, sdp, x);
1516
glock_hash_walk(clear_glock, sdp);
1552
1517
flush_workqueue(glock_workqueue);
1553
1518
wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1554
gfs2_dump_lockstate(sdp);
1519
glock_hash_walk(dump_glock_func, sdp);
1557
1522
void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1640
static const char *gflags2str(char *buf, const unsigned long *gflags)
1605
static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1607
const unsigned long *gflags = &gl->gl_flags;
1643
1610
if (test_bit(GLF_LOCK, gflags))
1645
1612
if (test_bit(GLF_DEMOTE, gflags))
1696
1667
dtime *= 1000000/HZ; /* demote time in uSec */
1697
1668
if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1699
gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
1670
gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d\n",
1700
1671
state2str(gl->gl_state),
1701
1672
gl->gl_name.ln_type,
1702
1673
(unsigned long long)gl->gl_name.ln_number,
1703
gflags2str(gflags_buf, &gl->gl_flags),
1674
gflags2str(gflags_buf, gl),
1704
1675
state2str(gl->gl_target),
1705
1676
state2str(gl->gl_demote_state), dtime,
1706
1677
atomic_read(&gl->gl_ail_count),
1678
atomic_read(&gl->gl_revokes),
1707
1679
atomic_read(&gl->gl_ref));
1709
1681
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1720
static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1723
spin_lock(&gl->gl_spin);
1724
ret = __dump_glock(seq, gl);
1725
spin_unlock(&gl->gl_spin);
1730
* gfs2_dump_lockstate - print out the current lockstate
1731
* @sdp: the filesystem
1732
* @ub: the buffer to copy the information into
1734
* If @ub is NULL, dump the lockstate to the console.
1738
static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1740
struct gfs2_glock *gl;
1741
struct hlist_node *h;
1745
for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1747
read_lock(gl_lock_addr(x));
1749
hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1750
if (gl->gl_sbd != sdp)
1753
error = dump_glock(NULL, gl);
1758
read_unlock(gl_lock_addr(x));
1769
1695
int __init gfs2_glock_init(void)
1772
1698
for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1773
INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1775
#ifdef GL_HASH_LOCK_SZ
1776
for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1777
rwlock_init(&gl_hash_locks[i]);
1699
INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1781
1702
glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1782
1703
WQ_HIGHPRI | WQ_FREEZABLE, 0);
1802
1723
destroy_workqueue(gfs2_delete_workqueue);
1726
static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1728
return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1729
struct gfs2_glock, gl_list);
1732
static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1734
return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1735
struct gfs2_glock, gl_list);
1805
1738
static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1807
1740
struct gfs2_glock *gl;
1810
read_lock(gl_lock_addr(gi->hash));
1813
gi->gl = hlist_entry(gl->gl_list.next,
1814
struct gfs2_glock, gl_list);
1816
gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1817
struct gfs2_glock, gl_list);
1820
gfs2_glock_hold(gi->gl);
1821
read_unlock(gl_lock_addr(gi->hash));
1824
while (gi->gl == NULL) {
1826
if (gi->hash >= GFS2_GL_HASH_SIZE)
1828
read_lock(gl_lock_addr(gi->hash));
1829
gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1830
struct gfs2_glock, gl_list);
1832
gfs2_glock_hold(gi->gl);
1833
read_unlock(gl_lock_addr(gi->hash));
1836
if (gi->sdp != gi->gl->gl_sbd)
1745
gi->gl = glock_hash_next(gl);
1747
gi->gl = glock_hash_chain(gi->hash);
1749
while (gi->gl == NULL) {
1751
if (gi->hash >= GFS2_GL_HASH_SIZE) {
1755
gi->gl = glock_hash_chain(gi->hash);
1757
/* Skip entries for other sb and dead entries */
1758
} while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
1842
static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
1845
gfs2_glock_put(gi->gl);
1849
1763
static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1851
1765
struct gfs2_glock_iter *gi = seq->private;
1852
1766
loff_t n = *pos;
1857
if (gfs2_glock_iter_next(gi)) {
1858
gfs2_glock_iter_free(gi);
1772
if (gfs2_glock_iter_next(gi))
1881
1792
static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1883
1794
struct gfs2_glock_iter *gi = seq->private;
1884
gfs2_glock_iter_free(gi);
1887
1801
static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)