summaryrefslogtreecommitdiff
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-09-23 09:56:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-09-23 09:56:40 -0700
commit59c376d636387a9b896c8da1d1bb68bb0bda67c0 (patch)
tree875ef35172a5fb93be3bbbdfc708c6c53966fc5c /fs/buffer.c
parentd90b0276af8f25a0b8ae081a30d1b2a61263393b (diff)
parenta5f31a5028d1e88e97c3b6cdc3e3bf2da085e232 (diff)
downloadlinux-59c376d636387a9b896c8da1d1bb68bb0bda67c0.tar.gz
linux-59c376d636387a9b896c8da1d1bb68bb0bda67c0.tar.bz2
linux-59c376d636387a9b896c8da1d1bb68bb0bda67c0.zip
Merge tag 'iomap-6.6-fixes-2' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
Pull iomap fixes from Darrick Wong: - Return EIO on bad inputs to iomap_to_bh instead of BUGging, to deal less poorly with block device io racing with block device resizing - Fix a stale page data exposure bug introduced in 6.6-rc1 when unsharing a file range that is not in the page cache * tag 'iomap-6.6-fixes-2' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: iomap: convert iomap_unshare_iter to use large folios iomap: don't skip reading in !uptodate folios when unsharing a range iomap: handle error conditions more gracefully in iomap_to_bh
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 2379564e5aea..a6785cd07081 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2011,7 +2011,7 @@ void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
}
EXPORT_SYMBOL(folio_zero_new_buffers);
-static void
+static int
iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
const struct iomap *iomap)
{
@@ -2025,7 +2025,8 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
* current block, then do not map the buffer and let the caller
* handle it.
*/
- BUG_ON(offset >= iomap->offset + iomap->length);
+ if (offset >= iomap->offset + iomap->length)
+ return -EIO;
switch (iomap->type) {
case IOMAP_HOLE:
@@ -2037,7 +2038,7 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
if (!buffer_uptodate(bh) ||
(offset >= i_size_read(inode)))
set_buffer_new(bh);
- break;
+ return 0;
case IOMAP_DELALLOC:
if (!buffer_uptodate(bh) ||
(offset >= i_size_read(inode)))
@@ -2045,7 +2046,7 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
set_buffer_uptodate(bh);
set_buffer_mapped(bh);
set_buffer_delay(bh);
- break;
+ return 0;
case IOMAP_UNWRITTEN:
/*
* For unwritten regions, we always need to ensure that regions
@@ -2062,7 +2063,10 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
inode->i_blkbits;
set_buffer_mapped(bh);
- break;
+ return 0;
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
}
}
@@ -2103,13 +2107,12 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
- if (get_block) {
+ if (get_block)
err = get_block(inode, block, bh, 1);
- if (err)
- break;
- } else {
- iomap_to_bh(inode, block, bh, iomap);
- }
+ else
+ err = iomap_to_bh(inode, block, bh, iomap);
+ if (err)
+ break;
if (buffer_new(bh)) {
clean_bdev_bh_alias(bh);