1
#include <linux/ceph/ceph_debug.h>
3
#include <linux/module.h>
4
#include <linux/sched.h>
5
#include <linux/slab.h>
6
#include <linux/file.h>
7
#include <linux/namei.h>
8
#include <linux/writeback.h>
10
#include <linux/ceph/libceph.h>
13
* build a vector of user pages
15
struct page **ceph_get_direct_page_vector(const char __user *data,
16
int num_pages, bool write_page)
22
pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
24
return ERR_PTR(-ENOMEM);
26
down_read(¤t->mm->mmap_sem);
27
while (got < num_pages) {
28
rc = get_user_pages(current, current->mm,
29
(unsigned long)data + ((unsigned long)got * PAGE_SIZE),
30
num_pages - got, write_page, 0, pages + got, NULL);
36
up_read(¤t->mm->mmap_sem);
42
ceph_put_page_vector(pages, got, false);
45
EXPORT_SYMBOL(ceph_get_direct_page_vector);
47
void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
51
for (i = 0; i < num_pages; i++) {
53
set_page_dirty_lock(pages[i]);
58
EXPORT_SYMBOL(ceph_put_page_vector);
60
void ceph_release_page_vector(struct page **pages, int num_pages)
64
for (i = 0; i < num_pages; i++)
65
__free_pages(pages[i], 0);
68
EXPORT_SYMBOL(ceph_release_page_vector);
71
* allocate a vector new pages
73
struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
78
pages = kmalloc(sizeof(*pages) * num_pages, flags);
80
return ERR_PTR(-ENOMEM);
81
for (i = 0; i < num_pages; i++) {
82
pages[i] = __page_cache_alloc(flags);
83
if (pages[i] == NULL) {
84
ceph_release_page_vector(pages, i);
85
return ERR_PTR(-ENOMEM);
90
EXPORT_SYMBOL(ceph_alloc_page_vector);
93
* copy user data into a page vector
95
int ceph_copy_user_to_page_vector(struct page **pages,
96
const char __user *data,
97
loff_t off, size_t len)
100
int po = off & ~PAGE_CACHE_MASK;
105
l = min_t(int, PAGE_CACHE_SIZE-po, left);
106
bad = copy_from_user(page_address(pages[i]) + po, data, l);
112
if (po == PAGE_CACHE_SIZE) {
119
EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
121
int ceph_copy_to_page_vector(struct page **pages,
123
loff_t off, size_t len)
126
size_t po = off & ~PAGE_CACHE_MASK;
131
l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
132
memcpy(page_address(pages[i]) + po, data, l);
136
if (po == PAGE_CACHE_SIZE) {
143
EXPORT_SYMBOL(ceph_copy_to_page_vector);
145
int ceph_copy_from_page_vector(struct page **pages,
147
loff_t off, size_t len)
150
size_t po = off & ~PAGE_CACHE_MASK;
155
l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
156
memcpy(data, page_address(pages[i]) + po, l);
160
if (po == PAGE_CACHE_SIZE) {
167
EXPORT_SYMBOL(ceph_copy_from_page_vector);
170
* copy user data from a page vector into a user pointer
172
int ceph_copy_page_vector_to_user(struct page **pages,
174
loff_t off, size_t len)
177
int po = off & ~PAGE_CACHE_MASK;
182
l = min_t(int, left, PAGE_CACHE_SIZE-po);
183
bad = copy_to_user(data, page_address(pages[i]) + po, l);
190
if (po == PAGE_CACHE_SIZE)
197
EXPORT_SYMBOL(ceph_copy_page_vector_to_user);
200
* Zero an extent within a page vector. Offset is relative to the
201
* start of the first page.
203
void ceph_zero_page_vector_range(int off, int len, struct page **pages)
205
int i = off >> PAGE_CACHE_SHIFT;
207
off &= ~PAGE_CACHE_MASK;
209
dout("zero_page_vector_page %u~%u\n", off, len);
211
/* leading partial page? */
213
int end = min((int)PAGE_CACHE_SIZE, off + len);
214
dout("zeroing %d %p head from %d\n", i, pages[i],
216
zero_user_segment(pages[i], off, end);
220
while (len >= PAGE_CACHE_SIZE) {
221
dout("zeroing %d %p len=%d\n", i, pages[i], len);
222
zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
223
len -= PAGE_CACHE_SIZE;
226
/* trailing partial page? */
228
dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
229
zero_user_segment(pages[i], 0, len);
232
EXPORT_SYMBOL(ceph_zero_page_vector_range);