165
static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
165
static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
166
166
sector_t isect, struct page *page,
167
167
struct pnfs_block_extent *be,
168
168
void (*end_io)(struct bio *, int err),
169
struct parallel_io *par)
169
struct parallel_io *par,
170
unsigned int offset, int len)
172
isect = isect + (offset >> SECTOR_SHIFT);
173
dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
174
npg, rw, (unsigned long long)isect, offset, len);
173
177
bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
175
179
return ERR_PTR(-ENOMEM);
177
if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
181
if (bio_add_page(bio, page, len, offset) < len) {
178
182
bio = bl_submit_bio(rw, bio);
188
static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
189
sector_t isect, struct page *page,
190
struct pnfs_block_extent *be,
191
void (*end_io)(struct bio *, int err),
192
struct parallel_io *par)
194
return do_add_page_to_bio(bio, npg, rw, isect, page, be,
195
end_io, par, 0, PAGE_CACHE_SIZE);
184
198
/* This is basically copied from mpage_end_io_read */
185
199
static void bl_end_io_read(struct bio *bio, int err)
238
252
sector_t isect, extent_length = 0;
239
253
struct parallel_io *par;
240
254
loff_t f_offset = rdata->args.offset;
255
size_t bytes_left = rdata->args.count;
256
unsigned int pg_offset, pg_len;
241
257
struct page **pages = rdata->args.pages;
242
258
int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
259
const bool is_dio = (header->dreq != NULL);
244
261
dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
245
262
rdata->pages.npages, f_offset, (unsigned int)rdata->args.count);
273
290
extent_length = min(extent_length, cow_length);
295
pg_offset = f_offset & ~PAGE_CACHE_MASK;
296
if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
297
pg_len = PAGE_CACHE_SIZE - pg_offset;
302
bytes_left -= pg_len;
303
isect += (pg_offset >> SECTOR_SHIFT);
306
pg_len = PAGE_CACHE_SIZE;
276
309
hole = is_hole(be, isect);
277
310
if (hole && !cow_read) {
278
311
bio = bl_submit_bio(READ, bio);
279
312
/* Fill hole w/ zeroes w/o accessing device */
280
313
dprintk("%s Zeroing page for hole\n", __func__);
281
zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
314
zero_user_segment(pages[i], pg_offset, pg_len);
282
315
print_page(pages[i]);
283
316
SetPageUptodate(pages[i]);
285
318
struct pnfs_block_extent *be_read;
287
320
be_read = (hole && cow_read) ? cow_read : be;
288
bio = bl_add_page_to_bio(bio, rdata->pages.npages - i,
321
bio = do_add_page_to_bio(bio, rdata->pages.npages - i,
290
323
isect, pages[i], be_read,
291
bl_end_io_read, par);
292
326
if (IS_ERR(bio)) {
293
327
header->pnfs_error = PTR_ERR(bio);
298
isect += PAGE_CACHE_SECTORS;
332
isect += (pg_len >> SECTOR_SHIFT);
299
333
extent_length -= PAGE_CACHE_SECTORS;
301
335
if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
302
336
rdata->res.eof = 1;
303
rdata->res.count = header->inode->i_size - f_offset;
337
rdata->res.count = header->inode->i_size - rdata->args.offset;
305
rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
339
rdata->res.count = (isect << SECTOR_SHIFT) - rdata->args.offset;
308
342
bl_put_extent(be);
488
bl_read_single_end_io(struct bio *bio, int error)
490
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
491
struct page *page = bvec->bv_page;
493
/* Only one page in bvec */
498
bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
499
unsigned int offset, unsigned int len)
502
struct page *shadow_page;
504
char *kaddr, *kshadow_addr;
507
dprintk("%s: offset %u len %u\n", __func__, offset, len);
509
shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
510
if (shadow_page == NULL)
513
bio = bio_alloc(GFP_NOIO, 1);
517
isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
518
(offset / SECTOR_SIZE);
520
bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
521
bio->bi_bdev = be->be_mdev;
522
bio->bi_end_io = bl_read_single_end_io;
524
lock_page(shadow_page);
525
if (bio_add_page(bio, shadow_page,
526
SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
527
unlock_page(shadow_page);
532
submit_bio(READ, bio);
533
wait_on_page_locked(shadow_page);
534
if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
537
kaddr = kmap_atomic(page);
538
kshadow_addr = kmap_atomic(shadow_page);
539
memcpy(kaddr + offset, kshadow_addr + offset, len);
540
kunmap_atomic(kshadow_addr);
541
kunmap_atomic(kaddr);
543
__free_page(shadow_page);
550
bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
551
unsigned int dirty_offset, unsigned int dirty_len,
555
unsigned int start, end;
559
end = PAGE_CACHE_SIZE;
561
start = round_down(dirty_offset, SECTOR_SIZE);
562
end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
565
dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
567
zero_user_segments(page, start, dirty_offset,
568
dirty_offset + dirty_len, end);
569
if (start == 0 && end == PAGE_CACHE_SIZE &&
570
trylock_page(page)) {
571
SetPageUptodate(page);
577
if (start != dirty_offset)
578
ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
580
if (!ret && (dirty_offset + dirty_len < end))
581
ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
582
end - dirty_offset - dirty_len);
453
587
/* Given an unmapped page, zero it or read in page for COW, page is locked
552
685
struct bio *bio = NULL;
553
686
struct pnfs_block_extent *be = NULL, *cow_read = NULL;
554
687
sector_t isect, last_isect = 0, extent_length = 0;
555
struct parallel_io *par;
688
struct parallel_io *par = NULL;
556
689
loff_t offset = wdata->args.offset;
557
690
size_t count = wdata->args.count;
691
unsigned int pg_offset, pg_len, saved_len;
558
692
struct page **pages = wdata->args.pages;
559
693
struct page *page;
563
697
NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
565
699
dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
701
if (header->dreq != NULL &&
702
(!IS_ALIGNED(offset, NFS_SERVER(header->inode)->pnfs_blksize) ||
703
!IS_ALIGNED(count, NFS_SERVER(header->inode)->pnfs_blksize))) {
704
dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n");
566
707
/* At this point, wdata->pages is a (sequential) list of nfs_pages.
567
708
* We want to write each, and if there is an error set pnfs_error
568
709
* to have it redone using nfs.
679
821
extent_length = be->be_length -
680
822
(isect - be->be_f_offset);
682
if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
825
dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
826
pg_offset = offset & ~PAGE_CACHE_MASK;
827
if (pg_offset + count > PAGE_CACHE_SIZE)
828
pg_len = PAGE_CACHE_SIZE - pg_offset;
833
if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
834
!bl_is_sector_init(be->be_inval, isect)) {
835
ret = bl_read_partial_page_sync(pages[i], cow_read,
836
pg_offset, pg_len, true);
838
dprintk("%s bl_read_partial_page_sync fail %d\n",
840
header->pnfs_error = ret;
683
844
ret = bl_mark_sectors_init(be->be_inval, isect,
684
845
PAGE_CACHE_SECTORS);
685
846
if (unlikely(ret)) {
688
849
header->pnfs_error = ret;
853
/* Expand to full page write */
855
pg_len = PAGE_CACHE_SIZE;
856
} else if ((pg_offset & (SECTOR_SIZE - 1)) ||
857
(pg_len & (SECTOR_SIZE - 1))){
858
/* ahh, nasty case. We have to do sync full sector
859
* read-modify-write cycles.
861
unsigned int saved_offset = pg_offset;
862
ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
864
pg_offset = round_down(pg_offset, SECTOR_SIZE);
865
pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
692
bio = bl_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
870
bio = do_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
693
871
isect, pages[i], be,
694
bl_end_io_write, par);
872
bl_end_io_write, par,
695
874
if (IS_ERR(bio)) {
696
875
header->pnfs_error = PTR_ERR(bio);
700
881
isect += PAGE_CACHE_SECTORS;
701
882
last_isect = isect;
702
883
extent_length -= PAGE_CACHE_SECTORS;
1180
is_aligned_req(struct nfs_page *req, unsigned int alignment)
1182
return IS_ALIGNED(req->wb_offset, alignment) &&
1183
IS_ALIGNED(req->wb_bytes, alignment);
1187
bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1189
if (pgio->pg_dreq != NULL &&
1190
!is_aligned_req(req, SECTOR_SIZE))
1191
nfs_pageio_reset_read_mds(pgio);
1193
pnfs_generic_pg_init_read(pgio, req);
1197
bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1198
struct nfs_page *req)
1200
if (pgio->pg_dreq != NULL &&
1201
!is_aligned_req(req, SECTOR_SIZE))
1204
return pnfs_generic_pg_test(pgio, prev, req);
1208
bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1210
if (pgio->pg_dreq != NULL &&
1211
!is_aligned_req(req, PAGE_CACHE_SIZE))
1212
nfs_pageio_reset_write_mds(pgio);
1214
pnfs_generic_pg_init_write(pgio, req);
1218
bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1219
struct nfs_page *req)
1221
if (pgio->pg_dreq != NULL &&
1222
!is_aligned_req(req, PAGE_CACHE_SIZE))
1225
return pnfs_generic_pg_test(pgio, prev, req);
999
1228
static const struct nfs_pageio_ops bl_pg_read_ops = {
1000
.pg_init = pnfs_generic_pg_init_read,
1001
.pg_test = pnfs_generic_pg_test,
1229
.pg_init = bl_pg_init_read,
1230
.pg_test = bl_pg_test_read,
1002
1231
.pg_doio = pnfs_generic_pg_readpages,
1005
1234
static const struct nfs_pageio_ops bl_pg_write_ops = {
1006
.pg_init = pnfs_generic_pg_init_write,
1007
.pg_test = pnfs_generic_pg_test,
1235
.pg_init = bl_pg_init_write,
1236
.pg_test = bl_pg_test_write,
1008
1237
.pg_doio = pnfs_generic_pg_writepages,