diff options
| author | John Garry <john.g.garry@oracle.com> | 2025-09-15 10:34:58 +0000 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2025-10-15 12:03:34 +0200 |
| commit | 8f192ff4d501c439e1d85f9d1904233868f82ad4 (patch) | |
| tree | 24dd2c214c40bab6d466f6a7d9c2b8b13ea02236 /block | |
| parent | 3ef5e106a3441616e75565840fbf0858a3d78104 (diff) | |
| download | linux-8f192ff4d501c439e1d85f9d1904233868f82ad4.tar.gz linux-8f192ff4d501c439e1d85f9d1904233868f82ad4.tar.bz2 linux-8f192ff4d501c439e1d85f9d1904233868f82ad4.zip | |
block: update validation of atomic writes boundary for stacked devices
[ Upstream commit bfd4037296bd7e1f95394a2e3daf8e3c1796c3b3 ]
In commit 63d092d1c1b1 ("block: use chunk_sectors when evaluating stacked
atomic write limits"), it was missed to use a chunk sectors limit check
in blk_stack_atomic_writes_boundary_head(), so update that function to
do the proper check.
Fixes: 63d092d1c1b1 ("block: use chunk_sectors when evaluating stacked atomic write limits")
Signed-off-by: John Garry <john.g.garry@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'block')
| -rw-r--r-- | block/blk-settings.c | 22 |
1 files changed, 14 insertions, 8 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 693bc8d20acf..6760dbf130b2 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -643,18 +643,24 @@ static bool blk_stack_atomic_writes_tail(struct queue_limits *t, static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t, struct queue_limits *b) { + unsigned int boundary_sectors; + + if (!b->atomic_write_hw_boundary || !t->chunk_sectors) + return true; + + boundary_sectors = b->atomic_write_hw_boundary >> SECTOR_SHIFT; + /* * Ensure atomic write boundary is aligned with chunk sectors. Stacked - * devices store chunk sectors in t->io_min. + * devices store any stripe size in t->chunk_sectors. */ - if (b->atomic_write_hw_boundary > t->io_min && - b->atomic_write_hw_boundary % t->io_min) + if (boundary_sectors > t->chunk_sectors && + boundary_sectors % t->chunk_sectors) return false; - if (t->io_min > b->atomic_write_hw_boundary && - t->io_min % b->atomic_write_hw_boundary) + if (t->chunk_sectors > boundary_sectors && + t->chunk_sectors % boundary_sectors) return false; - t->atomic_write_hw_boundary = b->atomic_write_hw_boundary; return true; } @@ -695,13 +701,13 @@ static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t) static bool blk_stack_atomic_writes_head(struct queue_limits *t, struct queue_limits *b) { - if (b->atomic_write_hw_boundary && - !blk_stack_atomic_writes_boundary_head(t, b)) + if (!blk_stack_atomic_writes_boundary_head(t, b)) return false; t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max; t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min; t->atomic_write_hw_max = b->atomic_write_hw_max; + t->atomic_write_hw_boundary = b->atomic_write_hw_boundary; return true; } |
