diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-03 10:35:43 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-03 10:35:43 -0700 |
| commit | f00654007fe1c154dafbdc1f5953c132e8c27c38 (patch) | |
| tree | 69dd4b82b1a1be18b789ef3010b13e4834c8e353 /fs | |
| parent | e087437a6fef3acc11aaa1ade84731fe1571b808 (diff) | |
| parent | cf5e7a652168fba45410ac6f5b363fcf8677dea5 (diff) | |
| download | linux-f00654007fe1c154dafbdc1f5953c132e8c27c38.tar.gz linux-f00654007fe1c154dafbdc1f5953c132e8c27c38.tar.bz2 linux-f00654007fe1c154dafbdc1f5953c132e8c27c38.zip | |
Merge tag 'folio-6.0' of git://git.infradead.org/users/willy/pagecache
Pull folio updates from Matthew Wilcox:
- Fix an accounting bug that made NR_FILE_DIRTY grow without limit
when running xfstests
- Convert more of mpage to use folios
- Remove add_to_page_cache() and add_to_page_cache_locked()
- Convert find_get_pages_range() to filemap_get_folios()
- Improvements to the read_cache_page() family of functions
- Remove a few unnecessary checks of PageError
- Some straightforward filesystem conversions to use folios
- Split PageMovable users out from address_space_operations into
their own movable_operations
- Convert aops->migratepage to aops->migrate_folio
- Remove nobh support (Christoph Hellwig)
* tag 'folio-6.0' of git://git.infradead.org/users/willy/pagecache: (78 commits)
fs: remove the NULL get_block case in mpage_writepages
fs: don't call ->writepage from __mpage_writepage
fs: remove the nobh helpers
jfs: stop using the nobh helper
ext2: remove nobh support
ntfs3: refactor ntfs_writepages
mm/folio-compat: Remove migration compatibility functions
fs: Remove aops->migratepage()
secretmem: Convert to migrate_folio
hugetlb: Convert to migrate_folio
aio: Convert to migrate_folio
f2fs: Convert to filemap_migrate_folio()
ubifs: Convert to filemap_migrate_folio()
btrfs: Convert btrfs_migratepage to migrate_folio
mm/migrate: Add filemap_migrate_folio()
mm/migrate: Convert migrate_page() to migrate_folio()
nfs: Convert to migrate_folio
btrfs: Convert btree_migratepage to migrate_folio
mm/migrate: Convert expected_page_refs() to folio_expected_refs()
mm/migrate: Convert buffer_migrate_page() to buffer_migrate_folio()
...
Diffstat (limited to 'fs')
54 files changed, 350 insertions, 985 deletions
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index bbb2c210d139..97f50e9fd9eb 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -132,12 +132,6 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt) if (IS_ERR(page)) return PTR_ERR(page); - if (PageError(page)) { - ret = afs_bad(AFS_FS_I(d_inode(mntpt)), afs_file_error_mntpt); - put_page(page); - return ret; - } - buf = kmap(page); ret = -EINVAL; if (buf[size - 1] == '.') @@ -400,8 +400,8 @@ static const struct file_operations aio_ring_fops = { }; #if IS_ENABLED(CONFIG_MIGRATION) -static int aio_migratepage(struct address_space *mapping, struct page *new, - struct page *old, enum migrate_mode mode) +static int aio_migrate_folio(struct address_space *mapping, struct folio *dst, + struct folio *src, enum migrate_mode mode) { struct kioctx *ctx; unsigned long flags; @@ -435,10 +435,10 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, goto out; } - idx = old->index; + idx = src->index; if (idx < (pgoff_t)ctx->nr_pages) { - /* Make sure the old page hasn't already been changed */ - if (ctx->ring_pages[idx] != old) + /* Make sure the old folio hasn't already been changed */ + if (ctx->ring_pages[idx] != &src->page) rc = -EAGAIN; } else rc = -EINVAL; @@ -447,27 +447,27 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, goto out_unlock; /* Writeback must be complete */ - BUG_ON(PageWriteback(old)); - get_page(new); + BUG_ON(folio_test_writeback(src)); + folio_get(dst); - rc = migrate_page_move_mapping(mapping, new, old, 1); + rc = folio_migrate_mapping(mapping, dst, src, 1); if (rc != MIGRATEPAGE_SUCCESS) { - put_page(new); + folio_put(dst); goto out_unlock; } /* Take completion_lock to prevent other writes to the ring buffer - * while the old page is copied to the new. This prevents new + * while the old folio is copied to the new. This prevents new * events from being lost. */ spin_lock_irqsave(&ctx->completion_lock, flags); - migrate_page_copy(new, old); - BUG_ON(ctx->ring_pages[idx] != old); - ctx->ring_pages[idx] = new; + folio_migrate_copy(dst, src); + BUG_ON(ctx->ring_pages[idx] != &src->page); + ctx->ring_pages[idx] = &dst->page; spin_unlock_irqrestore(&ctx->completion_lock, flags); - /* The old page is no longer accessible. */ - put_page(old); + /* The old folio is no longer accessible. */ + folio_put(src); out_unlock: mutex_unlock(&ctx->ring_lock); @@ -475,13 +475,13 @@ out: spin_unlock(&mapping->private_lock); return rc; } +#else +#define aio_migrate_folio NULL #endif static const struct address_space_operations aio_ctx_aops = { .dirty_folio = noop_dirty_folio, -#if IS_ENABLED(CONFIG_MIGRATION) - .migratepage = aio_migratepage, -#endif + .migrate_folio = aio_migrate_folio, }; static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index be383fa46b12..32749fcee090 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -108,8 +108,7 @@ static const struct export_operations befs_export_operations = { * passes it the address of befs_get_block, for mapping file * positions to disk blocks. */ -static int -befs_read_folio(struct file *file, struct folio *folio) +static int befs_read_folio(struct file *file, struct folio *folio) { return block_read_full_folio(folio, befs_get_block); } @@ -470,13 +469,12 @@ befs_destroy_inodecache(void) */ static int befs_symlink_read_folio(struct file *unused, struct folio *folio) { - struct page *page = &folio->page; - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct super_block *sb = inode->i_sb; struct befs_inode_info *befs_ino = BEFS_I(inode); befs_data_stream *data = &befs_ino->i_data.ds; befs_off_t len = data->size; - char *link = page_address(page); + char *link = folio_address(folio); if (len == 0 || len > PAGE_SIZE) { befs_error(sb, "Long symlink with illegal length"); @@ -489,12 +487,12 @@ static int befs_symlink_read_folio(struct file *unused, struct folio *folio) goto fail; } link[len - 1] = '\0'; - SetPageUptodate(page); - unlock_page(page); + folio_mark_uptodate(folio); + folio_unlock(folio); return 0; fail: - SetPageError(page); - unlock_page(page); + folio_set_error(folio); + folio_unlock(folio); return -EIO; } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index de440ebf5648..1b23d806a647 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -953,28 +953,28 @@ void btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, int mirror_ } #ifdef CONFIG_MIGRATION -static int btree_migratepage(struct address_space *mapping, - struct page *newpage, struct page *page, - enum migrate_mode mode) +static int btree_migrate_folio(struct address_space *mapping, + struct folio *dst, struct folio *src, enum migrate_mode mode) { /* * we can't safely write a btree page from here, * we haven't done the locking hook */ - if (PageDirty(page)) + if (folio_test_dirty(src)) return -EAGAIN; /* * Buffers may be managed in a filesystem specific way. * We must have no buffers or drop them. */ - if (page_has_private(page) && - !try_to_release_page(page, GFP_KERNEL)) + if (folio_get_private(src) && + !filemap_release_folio(src, GFP_KERNEL)) return -EAGAIN; - return migrate_page(mapping, newpage, page, mode); + return migrate_folio(mapping, dst, src, mode); } +#else +#define btree_migrate_folio NULL #endif - static int btree_writepages(struct address_space *mapping, struct writeback_control *wbc) { @@ -1074,10 +1074,8 @@ static const struct address_space_operations btree_aops = { .writepages = btree_writepages, .release_folio = btree_release_folio, .invalidate_folio = btree_invalidate_folio, -#ifdef CONFIG_MIGRATION - .migratepage = btree_migratepage, -#endif - .dirty_folio = btree_dirty_folio, + .migrate_folio = btree_migrate_folio, + .dirty_folio = btree_dirty_folio, }; struct extent_buffer *btrfs_find_create_tree_block( diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 57915bf2111f..11a52db506b3 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8268,30 +8268,24 @@ static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) } #ifdef CONFIG_MIGRATION -static int btrfs_migratepage(struct address_space *mapping, - struct page *newpage, struct page *page, +static int btrfs_migrate_folio(struct address_space *mapping, + struct folio *dst, struct folio *src, enum migrate_mode mode) { - int ret; + int ret = filemap_migrate_folio(mapping, dst, src, mode); - ret = migrate_page_move_mapping(mapping, newpage, page, 0); if (ret != MIGRATEPAGE_SUCCESS) return ret; - if (page_has_private(page)) - attach_page_private(newpage, detach_page_private(page)); - - if (PageOrdered(page)) { - ClearPageOrdered(page); - SetPageOrdered(newpage); + if (folio_test_ordered(src)) { + folio_clear_ordered(src); + folio_set_ordered(dst); } - if (mode != MIGRATE_SYNC_NO_COPY) - migrate_page_copy(newpage, page); - else - migrate_page_states(newpage, page); return MIGRATEPAGE_SUCCESS; } +#else +#define btrfs_migrate_folio NULL #endif static void btrfs_invalidate_folio(struct folio *folio, size_t offset, @@ -11436,9 +11430,7 @@ static const struct address_space_operations btrfs_aops = { .direct_IO = noop_direct_IO, .invalidate_folio = btrfs_invalidate_folio, .release_folio = btrfs_release_folio, -#ifdef CONFIG_MIGRATION - .migratepage = btrfs_migratepage, -#endif + .migrate_folio = btrfs_migrate_folio, .dirty_folio = filemap_dirty_folio, .error_remove_page = generic_error_remove_page, .swap_activate = btrfs_swap_activate, diff --git a/fs/buffer.c b/fs/buffer.c index 82de136b83bb..55e762a58eb6 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -282,10 +282,10 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) spin_unlock_irqrestore(&first->b_uptodate_lock, flags); /* - * If none of the buffers had errors and they are all - * uptodate then we can set the page uptodate. + * If all of the buffers are uptodate then we can set the page + * uptodate. */ - if (page_uptodate && !PageError(page)) + if (page_uptodate) SetPageUptodate(page); unlock_page(page); return; @@ -1604,7 +1604,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) { struct inode *bd_inode = bdev->bd_inode; struct address_space *bd_mapping = bd_inode->i_mapping; - struct pagevec pvec; + struct folio_batch fbatch; pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); pgoff_t end; int i, count; @@ -1612,24 +1612,24 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) struct buffer_head *head; end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits); - pagevec_init(&pvec); - while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) { - count = pagevec_count(&pvec); + folio_batch_init(&fbatch); + while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { + count = folio_batch_count(&fbatch); for (i = 0; i < count; i++) { - struct page *page = pvec.pages[i]; + struct folio *folio = fbatch.folios[i]; - if (!page_has_buffers(page)) + if (!folio_buffers(folio)) continue; /* - * We use page lock instead of bd_mapping->private_lock + * We use folio lock instead of bd_mapping->private_lock * to pin buffers here since we can afford to sleep and * it scales better than a global spinlock lock. */ - lock_page(page); - /* Recheck when the page is locked which pins bhs */ - if (!page_has_buffers(page)) + folio_lock(folio); + /* Recheck when the folio is locked which pins bhs */ + head = folio_buffers(folio); + if (!head) goto unlock_page; - head = page_buffers(page); bh = head; do { if (!buffer_mapped(bh) || (bh->b_blocknr < block)) @@ -1643,9 +1643,9 @@ next: bh = bh->b_this_page; } while (bh != head); unlock_page: - unlock_page(page); + folio_unlock(folio); } - pagevec_release(&pvec); + folio_batch_release(&fbatch); cond_resched(); /* End of range already reached? */ if (index > end || !index) @@ -2259,6 +2259,7 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block) unsigned int blocksize, bbits; int nr, i; int fully_mapped = 1; + bool page_error = false; VM_BUG_ON_FOLIO(folio_test_large(folio), folio); @@ -2283,8 +2284,10 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block) if (iblock < lblock) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, iblock, bh, 0); - if (err) + if (err) { folio_set_error(folio); + page_error = true; + } } if (!buffer_mapped(bh)) { folio_zero_range(folio, i * blocksize, @@ -2311,7 +2314,7 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block) * All buffers are uptodate - we can set the folio uptodate * as well. But not if get_block() returned an error. */ - if (!folio_test_error(folio)) + if (!page_error) folio_mark_uptodate(folio); folio_unlock(folio); return 0; @@ -2534,330 +2537,6 @@ out_unlock: } EXPORT_SYMBOL(block_page_mkwrite); -/* - * nobh_write_begin()'s prereads are special: the buffer_heads are freed - * immediately, while under the page lock. So it needs a special end_io - * handler which does not touch the bh after unlocking it. - */ -static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) -{ - __end_buffer_read_notouch(bh, uptodate); -} - -/* - * Attach the singly-linked list of buffers created by nobh_write_begin, to - * the page (converting it to circular linked list and taking care of page - * dirty races). - */ -static void attach_nobh_buffers(struct page *page, struct buffer_head *head) -{ - struct buffer_head *bh; - - BUG_ON(!PageLocked(page)); - - spin_lock(&page->mapping->private_lock); - bh = head; - do { - if (PageDirty(page)) - set_buffer_dirty(bh); - if (!bh->b_this_page) - bh->b_this_page = head; - bh = bh->b_this_page; - } while (bh != head); - attach_page_private(page, head); - spin_unlock(&page->mapping->private_lock); -} - -/* - * On entry, the page is fully not uptodate. - * On exit the page is fully uptodate in the areas outside (from,to) - * The filesystem needs to handle block truncation upon failure. - */ -int nobh_write_begin(struct address_space *mapping, loff_t pos, unsigned len, - struct page **pagep, void **fsdata, - get_block_t *get_block) -{ - struct inode *inode = mapping->host; - const unsigned blkbits = inode->i_blkbits; - const unsigned blocksize = 1 << blkbits; - struct buffer_head *head, *bh; - struct page *page; - pgoff_t index; - unsigned from, to; - unsigned block_in_page; - unsigned block_start, block_end; - sector_t block_in_file; - int nr_reads = 0; - int ret = 0; - int is_mapped_to_disk = 1; - - index = pos >> PAGE_SHIFT; - from = pos & (PAGE_SIZE - 1); - to = from + len; - - page = grab_cache_page_write_begin(mapping, index); - if (!page) - return -ENOMEM; - *pagep = page; - *fsdata = NULL; - - if (page_has_buffers(page)) { - ret = __block_write_begin(page, pos, len, get_block); - if (unlikely(ret)) - goto out_release; - return ret; - } - - if (PageMappedToDisk(page)) - return 0; - - /* - * Allocate buffers so that we can keep track of state, and potentially - * attach them to the page if an error occurs. In the common case of - * no error, they will just be freed again without ever being attached - * to the page (which is all OK, because we're under the page lock). - * - * Be careful: the buffer linked list is a NULL terminated one, rather - * than the circular one we're used to. - */ - head = alloc_page_buffers(page, blocksize, false); - if (!head) { - ret = -ENOMEM; - goto out_release; - } - - block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); - - /* - * We loop across all blocks in the page, whether or not they are - * part of the affected region. This is so we can discover if the - * page is fully mapped-to-disk. - */ - for (block_start = 0, block_in_page = 0, bh = head; - block_start < PAGE_SIZE; - block_in_page++, block_start += blocksize, bh = bh->b_this_page) { - int create; - - block_end = block_start + blocksize; - bh->b_state = 0; - create = 1; - if (block_start >= to) - create = 0; - ret = get_block(inode, block_in_file + block_in_page, - bh, create); - if (ret) - goto failed; - if (!buffer_mapped(bh)) - is_mapped_to_disk = 0; - if (buffer_new(bh)) - clean_bdev_bh_alias(bh); - if (PageUptodate(page)) { - set_buffer_uptodate(bh); - continue; - } - if (buffer_new(bh) || !buffer_mapped(bh)) { - zero_user_segments(page, block_start, from, - to, block_end); - continue; - } - if (buffer_uptodate(bh)) - continue; /* reiserfs does this */ - if (block_start < from || block_end > to) { - lock_buffer(bh); - bh->b_end_io = end_buffer_read_nobh; - submit_bh(REQ_OP_READ, bh); - nr_reads++; - } - } - - if (nr_reads) { - /* - * The page is locked, so these buffers are protected from - * any VM or truncate activity. Hence we don't need to care - * for the buffer_head refcounts. - */ - for (bh = head; bh; bh = bh->b_this_page) { - wait_on_buffer(bh); - if (!buffer_uptodate(bh)) - ret = -EIO; - } - if (ret) - goto failed; - } - - if (is_mapped_to_disk) - SetPageMappedToDisk(page); - - *fsdata = head; /* to be released by nobh_write_end */ - - return 0; - -failed: - BUG_ON(!ret); - /* - * Error recovery is a bit difficult. We need to zero out blocks that - * were newly allocated, and dirty them to ensure they get written out. - * Buffers need to be attached to the page at this point, otherwise - * the handling of potential IO errors during writeout would be hard - * (could try doing synchronous writeout, but what if that fails too?) - */ - attach_nobh_buffers(page, head); - page_zero_new_buffers(page, from, to); - -out_release: - unlock_page(page); - put_page(page); - *pagep = NULL; - - return ret; -} -EXPORT_SYMBOL(nobh_write_begin); - -int nobh_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) -{ - struct inode *inode = page->mapping->host; - struct buffer_head *head = fsdata; - struct buffer_head *bh; - BUG_ON(fsdata != NULL && page_has_buffers(page)); - - if (unlikely(copied < len) && head) - attach_nobh_buffers(page, head); - if (page_has_buffers(page)) - return generic_write_end(file, mapping, pos, len, - copied, page, fsdata); - - SetPageUptodate(page); - set_page_dirty(page); - if (pos+copied > inode->i_size) { - i_size_write(inode, pos+copied); - mark_inode_dirty(inode); - } - - unlock_page(page); - put_page(page); - - while (head) { - bh = head; - head = head->b_this_page; - free_buffer_head(bh); - } - - return copied; -} -EXPORT_SYMBOL(nobh_write_end); - -/* - * nobh_writepage() - based on block_full_write_page() except - * that it tries to operate without attaching bufferheads to - * the page. - */ -int nobh_writepage(struct page *page, get_block_t *get_block, - struct writeback_control *wbc) -{ - struct inode * const inode = page->mapping->host; - loff_t i_size = i_size_read(inode); - const pgoff_t end_index = i_size >> PAGE_SHIFT; - unsigned offset; - int ret; - - /* Is the page fully inside i_size? */ - if (page->index < end_index) - goto out; - - /* Is the page fully outside i_size? (truncate in progress) */ - offset = i_size & (PAGE_SIZE-1); - if (page->index >= end_index+1 || !offset) { - unlock_page(page); - return 0; /* don't care */ - } - - /* - * The page straddles i_size. It must be zeroed out on each and every - * writepage invocation because it may be mmapped. "A file is mapped - * in multiples of the page size. For a file that is not a multiple of - * the page size, the remaining memory is zeroed when mapped, and - * writes to that region are not written out to the file." - */ - zero_user_segment(page, offset, PAGE_SIZE); -out: - ret = mpage_writepage(page, get_block, wbc); - if (ret == -EAGAIN) - ret = __block_write_full_page(inode, page, get_block, wbc, - end_buffer_async_write); - return ret; -} -EXPORT_SYMBOL(nobh_writepage); - -int nobh_truncate_page(struct address_space *mapping, - loff_t from, get_block_t *get_block) -{ - pgoff_t index = from >> PAGE_SHIFT; - struct inode *inode = mapping->host; - unsigned blocksize = i_blocksize(inode); - struct folio *folio; - struct buffer_head map_bh; - size_t offset; - sector_t iblock; - int err; - - /* Block boundary? Nothing to do */ - if (!(from & (blocksize - 1))) - return 0; - - folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_CREAT, - mapping_gfp_mask(mapping)); - err = -ENOMEM; - if (!folio) - goto out; - - if (folio_buffers(folio)) - goto has_buffers; - - iblock = from >> inode->i_blkbits; - map_bh.b_size = blocksize; - map_bh.b_state = 0; - err = get_block(inode, iblock, &map_bh, 0); - if (err) - goto unlock; - /* unmapped? It's a ho |
