~ubuntu-branches/ubuntu/precise/linux-lowlatency/precise

« back to all changes in this revision

Viewing changes to include/linux/buffer_head.h

  • Committer: Package Import Robot
  • Author(s): Alessio Igor Bogani
  • Date: 2011-10-26 11:13:05 UTC
  • Revision ID: package-import@ubuntu.com-20111026111305-tz023xykf0i6eosh
Tags: upstream-3.2.0
ImportĀ upstreamĀ versionĀ 3.2.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*
 
2
 * include/linux/buffer_head.h
 
3
 *
 
4
 * Everything to do with buffer_heads.
 
5
 */
 
6
 
 
7
#ifndef _LINUX_BUFFER_HEAD_H
 
8
#define _LINUX_BUFFER_HEAD_H
 
9
 
 
10
#include <linux/types.h>
 
11
#include <linux/fs.h>
 
12
#include <linux/linkage.h>
 
13
#include <linux/pagemap.h>
 
14
#include <linux/wait.h>
 
15
#include <linux/atomic.h>
 
16
 
 
17
#ifdef CONFIG_BLOCK
 
18
 
 
19
enum bh_state_bits {
 
20
        BH_Uptodate,    /* Contains valid data */
 
21
        BH_Dirty,       /* Is dirty */
 
22
        BH_Lock,        /* Is locked */
 
23
        BH_Req,         /* Has been submitted for I/O */
 
24
        BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
 
25
                          * IO completion of other buffers in the page
 
26
                          */
 
27
 
 
28
        BH_Mapped,      /* Has a disk mapping */
 
29
        BH_New,         /* Disk mapping was newly created by get_block */
 
30
        BH_Async_Read,  /* Is under end_buffer_async_read I/O */
 
31
        BH_Async_Write, /* Is under end_buffer_async_write I/O */
 
32
        BH_Delay,       /* Buffer is not yet allocated on disk */
 
33
        BH_Boundary,    /* Block is followed by a discontiguity */
 
34
        BH_Write_EIO,   /* I/O error on write */
 
35
        BH_Unwritten,   /* Buffer is allocated on disk but not written */
 
36
        BH_Quiet,       /* Buffer Error Prinks to be quiet */
 
37
 
 
38
        BH_PrivateStart,/* not a state bit, but the first bit available
 
39
                         * for private allocation by other entities
 
40
                         */
 
41
};
 
42
 
 
43
#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
 
44
 
 
45
struct page;
 
46
struct buffer_head;
 
47
struct address_space;
 
48
typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
 
49
 
 
50
/*
 
51
 * Historically, a buffer_head was used to map a single block
 
52
 * within a page, and of course as the unit of I/O through the
 
53
 * filesystem and block layers.  Nowadays the basic I/O unit
 
54
 * is the bio, and buffer_heads are used for extracting block
 
55
 * mappings (via a get_block_t call), for tracking state within
 
56
 * a page (via a page_mapping) and for wrapping bio submission
 
57
 * for backward compatibility reasons (e.g. submit_bh).
 
58
 */
 
59
struct buffer_head {
 
60
        unsigned long b_state;          /* buffer state bitmap (see above) */
 
61
        struct buffer_head *b_this_page;/* circular list of page's buffers */
 
62
        struct page *b_page;            /* the page this bh is mapped to */
 
63
 
 
64
        sector_t b_blocknr;             /* start block number */
 
65
        size_t b_size;                  /* size of mapping */
 
66
        char *b_data;                   /* pointer to data within the page */
 
67
 
 
68
        struct block_device *b_bdev;
 
69
        bh_end_io_t *b_end_io;          /* I/O completion */
 
70
        void *b_private;                /* reserved for b_end_io */
 
71
        struct list_head b_assoc_buffers; /* associated with another mapping */
 
72
        struct address_space *b_assoc_map;      /* mapping this buffer is
 
73
                                                   associated with */
 
74
        atomic_t b_count;               /* users using this buffer_head */
 
75
};
 
76
 
 
77
/*
 
78
 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
 
79
 * and buffer_foo() functions.
 
80
 */
 
81
#define BUFFER_FNS(bit, name)                                           \
 
82
static inline void set_buffer_##name(struct buffer_head *bh)            \
 
83
{                                                                       \
 
84
        set_bit(BH_##bit, &(bh)->b_state);                              \
 
85
}                                                                       \
 
86
static inline void clear_buffer_##name(struct buffer_head *bh)          \
 
87
{                                                                       \
 
88
        clear_bit(BH_##bit, &(bh)->b_state);                            \
 
89
}                                                                       \
 
90
static inline int buffer_##name(const struct buffer_head *bh)           \
 
91
{                                                                       \
 
92
        return test_bit(BH_##bit, &(bh)->b_state);                      \
 
93
}
 
94
 
 
95
/*
 
96
 * test_set_buffer_foo() and test_clear_buffer_foo()
 
97
 */
 
98
#define TAS_BUFFER_FNS(bit, name)                                       \
 
99
static inline int test_set_buffer_##name(struct buffer_head *bh)        \
 
100
{                                                                       \
 
101
        return test_and_set_bit(BH_##bit, &(bh)->b_state);              \
 
102
}                                                                       \
 
103
static inline int test_clear_buffer_##name(struct buffer_head *bh)      \
 
104
{                                                                       \
 
105
        return test_and_clear_bit(BH_##bit, &(bh)->b_state);            \
 
106
}                                                                       \
 
107
 
 
108
/*
 
109
 * Emit the buffer bitops functions.   Note that there are also functions
 
110
 * of the form "mark_buffer_foo()".  These are higher-level functions which
 
111
 * do something in addition to setting a b_state bit.
 
112
 */
 
113
BUFFER_FNS(Uptodate, uptodate)
 
114
BUFFER_FNS(Dirty, dirty)
 
115
TAS_BUFFER_FNS(Dirty, dirty)
 
116
BUFFER_FNS(Lock, locked)
 
117
BUFFER_FNS(Req, req)
 
118
TAS_BUFFER_FNS(Req, req)
 
119
BUFFER_FNS(Mapped, mapped)
 
120
BUFFER_FNS(New, new)
 
121
BUFFER_FNS(Async_Read, async_read)
 
122
BUFFER_FNS(Async_Write, async_write)
 
123
BUFFER_FNS(Delay, delay)
 
124
BUFFER_FNS(Boundary, boundary)
 
125
BUFFER_FNS(Write_EIO, write_io_error)
 
126
BUFFER_FNS(Unwritten, unwritten)
 
127
 
 
128
#define bh_offset(bh)           ((unsigned long)(bh)->b_data & ~PAGE_MASK)
 
129
#define touch_buffer(bh)        mark_page_accessed(bh->b_page)
 
130
 
 
131
/* If we *know* page->private refers to buffer_heads */
 
132
#define page_buffers(page)                                      \
 
133
        ({                                                      \
 
134
                BUG_ON(!PagePrivate(page));                     \
 
135
                ((struct buffer_head *)page_private(page));     \
 
136
        })
 
137
#define page_has_buffers(page)  PagePrivate(page)
 
138
 
 
139
/*
 
140
 * Declarations
 
141
 */
 
142
 
 
143
void mark_buffer_dirty(struct buffer_head *bh);
 
144
void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
 
145
void set_bh_page(struct buffer_head *bh,
 
146
                struct page *page, unsigned long offset);
 
147
int try_to_free_buffers(struct page *);
 
148
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
 
149
                int retry);
 
150
void create_empty_buffers(struct page *, unsigned long,
 
151
                        unsigned long b_state);
 
152
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
 
153
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
 
154
void end_buffer_async_write(struct buffer_head *bh, int uptodate);
 
155
 
 
156
/* Things to do with buffers at mapping->private_list */
 
157
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
 
158
int inode_has_buffers(struct inode *);
 
159
void invalidate_inode_buffers(struct inode *);
 
160
int remove_inode_buffers(struct inode *inode);
 
161
int sync_mapping_buffers(struct address_space *mapping);
 
162
void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
 
163
 
 
164
void mark_buffer_async_write(struct buffer_head *bh);
 
165
void __wait_on_buffer(struct buffer_head *);
 
166
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
 
167
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
 
168
                        unsigned size);
 
169
struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
 
170
                        unsigned size);
 
171
void __brelse(struct buffer_head *);
 
172
void __bforget(struct buffer_head *);
 
173
void __breadahead(struct block_device *, sector_t block, unsigned int size);
 
174
struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size);
 
175
void invalidate_bh_lrus(void);
 
176
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
 
177
void free_buffer_head(struct buffer_head * bh);
 
178
void unlock_buffer(struct buffer_head *bh);
 
179
void __lock_buffer(struct buffer_head *bh);
 
180
void ll_rw_block(int, int, struct buffer_head * bh[]);
 
181
int sync_dirty_buffer(struct buffer_head *bh);
 
182
int __sync_dirty_buffer(struct buffer_head *bh, int rw);
 
183
void write_dirty_buffer(struct buffer_head *bh, int rw);
 
184
int submit_bh(int, struct buffer_head *);
 
185
void write_boundary_block(struct block_device *bdev,
 
186
                        sector_t bblock, unsigned blocksize);
 
187
int bh_uptodate_or_lock(struct buffer_head *bh);
 
188
int bh_submit_read(struct buffer_head *bh);
 
189
 
 
190
extern int buffer_heads_over_limit;
 
191
 
 
192
/*
 
193
 * Generic address_space_operations implementations for buffer_head-backed
 
194
 * address_spaces.
 
195
 */
 
196
void block_invalidatepage(struct page *page, unsigned long offset);
 
197
int block_write_full_page(struct page *page, get_block_t *get_block,
 
198
                                struct writeback_control *wbc);
 
199
int block_write_full_page_endio(struct page *page, get_block_t *get_block,
 
200
                        struct writeback_control *wbc, bh_end_io_t *handler);
 
201
int block_read_full_page(struct page*, get_block_t*);
 
202
int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
 
203
                                unsigned long from);
 
204
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
 
205
                unsigned flags, struct page **pagep, get_block_t *get_block);
 
206
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
 
207
                get_block_t *get_block);
 
208
int block_write_end(struct file *, struct address_space *,
 
209
                                loff_t, unsigned, unsigned,
 
210
                                struct page *, void *);
 
211
int generic_write_end(struct file *, struct address_space *,
 
212
                                loff_t, unsigned, unsigned,
 
213
                                struct page *, void *);
 
214
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
 
215
int cont_write_begin(struct file *, struct address_space *, loff_t,
 
216
                        unsigned, unsigned, struct page **, void **,
 
217
                        get_block_t *, loff_t *);
 
218
int generic_cont_expand_simple(struct inode *inode, loff_t size);
 
219
int block_commit_write(struct page *page, unsigned from, unsigned to);
 
220
int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
 
221
                                get_block_t get_block);
 
222
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
 
223
                                get_block_t get_block);
 
224
/* Convert errno to return value from ->page_mkwrite() call */
 
225
static inline int block_page_mkwrite_return(int err)
 
226
{
 
227
        if (err == 0)
 
228
                return VM_FAULT_LOCKED;
 
229
        if (err == -EFAULT)
 
230
                return VM_FAULT_NOPAGE;
 
231
        if (err == -ENOMEM)
 
232
                return VM_FAULT_OOM;
 
233
        if (err == -EAGAIN)
 
234
                return VM_FAULT_RETRY;
 
235
        /* -ENOSPC, -EDQUOT, -EIO ... */
 
236
        return VM_FAULT_SIGBUS;
 
237
}
 
238
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
 
239
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
 
240
int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
 
241
                                struct page **, void **, get_block_t*);
 
242
int nobh_write_end(struct file *, struct address_space *,
 
243
                                loff_t, unsigned, unsigned,
 
244
                                struct page *, void *);
 
245
int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
 
246
int nobh_writepage(struct page *page, get_block_t *get_block,
 
247
                        struct writeback_control *wbc);
 
248
 
 
249
void buffer_init(void);
 
250
 
 
251
/*
 
252
 * inline definitions
 
253
 */
 
254
 
 
255
static inline void attach_page_buffers(struct page *page,
 
256
                struct buffer_head *head)
 
257
{
 
258
        page_cache_get(page);
 
259
        SetPagePrivate(page);
 
260
        set_page_private(page, (unsigned long)head);
 
261
}
 
262
 
 
263
static inline void get_bh(struct buffer_head *bh)
 
264
{
 
265
        atomic_inc(&bh->b_count);
 
266
}
 
267
 
 
268
static inline void put_bh(struct buffer_head *bh)
 
269
{
 
270
        smp_mb__before_atomic_dec();
 
271
        atomic_dec(&bh->b_count);
 
272
}
 
273
 
 
274
static inline void brelse(struct buffer_head *bh)
 
275
{
 
276
        if (bh)
 
277
                __brelse(bh);
 
278
}
 
279
 
 
280
static inline void bforget(struct buffer_head *bh)
 
281
{
 
282
        if (bh)
 
283
                __bforget(bh);
 
284
}
 
285
 
 
286
static inline struct buffer_head *
 
287
sb_bread(struct super_block *sb, sector_t block)
 
288
{
 
289
        return __bread(sb->s_bdev, block, sb->s_blocksize);
 
290
}
 
291
 
 
292
static inline void
 
293
sb_breadahead(struct super_block *sb, sector_t block)
 
294
{
 
295
        __breadahead(sb->s_bdev, block, sb->s_blocksize);
 
296
}
 
297
 
 
298
static inline struct buffer_head *
 
299
sb_getblk(struct super_block *sb, sector_t block)
 
300
{
 
301
        return __getblk(sb->s_bdev, block, sb->s_blocksize);
 
302
}
 
303
 
 
304
static inline struct buffer_head *
 
305
sb_find_get_block(struct super_block *sb, sector_t block)
 
306
{
 
307
        return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
 
308
}
 
309
 
 
310
static inline void
 
311
map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
 
312
{
 
313
        set_buffer_mapped(bh);
 
314
        bh->b_bdev = sb->s_bdev;
 
315
        bh->b_blocknr = block;
 
316
        bh->b_size = sb->s_blocksize;
 
317
}
 
318
 
 
319
static inline void wait_on_buffer(struct buffer_head *bh)
 
320
{
 
321
        might_sleep();
 
322
        if (buffer_locked(bh))
 
323
                __wait_on_buffer(bh);
 
324
}
 
325
 
 
326
static inline int trylock_buffer(struct buffer_head *bh)
 
327
{
 
328
        return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
 
329
}
 
330
 
 
331
static inline void lock_buffer(struct buffer_head *bh)
 
332
{
 
333
        might_sleep();
 
334
        if (!trylock_buffer(bh))
 
335
                __lock_buffer(bh);
 
336
}
 
337
 
 
338
extern int __set_page_dirty_buffers(struct page *page);
 
339
 
 
340
#else /* CONFIG_BLOCK */
 
341
 
 
342
static inline void buffer_init(void) {}
 
343
static inline int try_to_free_buffers(struct page *page) { return 1; }
 
344
static inline int inode_has_buffers(struct inode *inode) { return 0; }
 
345
static inline void invalidate_inode_buffers(struct inode *inode) {}
 
346
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
 
347
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
 
348
 
 
349
#endif /* CONFIG_BLOCK */
 
350
#endif /* _LINUX_BUFFER_HEAD_H */