summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-06-06 13:12:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-06-06 13:12:50 -0700
commit6d8854216ebb60959ddb6f4ea4123bd449ba6cf6 (patch)
tree065c7ffc29952ae309fcb815fbd4f81423655292 /block
parent794a54920781162c4503acea62d88e725726e319 (diff)
parent6f65947a1e684db28b9407ea51927ed5157caf41 (diff)
downloadlinux-6d8854216ebb60959ddb6f4ea4123bd449ba6cf6.tar.gz
linux-6d8854216ebb60959ddb6f4ea4123bd449ba6cf6.tar.bz2
linux-6d8854216ebb60959ddb6f4ea4123bd449ba6cf6.zip
Merge tag 'block-6.16-20250606' of git://git.kernel.dk/linux
Pull more block updates from Jens Axboe: - NVMe pull request via Christoph: - TCP error handling fix (Shin'ichiro Kawasaki) - TCP I/O stall handling fixes (Hannes Reinecke) - fix command limits status code (Keith Busch) - support vectored buffers also for passthrough (Pavel Begunkov) - spelling fixes (Yi Zhang) - MD pull request via Yu: - fix REQ_RAHEAD and REQ_NOWAIT IO err handling for raid1/10 - fix max_write_behind setting for dm-raid - some minor cleanups - Integrity data direction fix and cleanup - bcache NULL pointer fix - Fix for loop missing write start/end handling - Decouple hardware queues and IO threads in ublk - Slew of ublk selftests additions and updates * tag 'block-6.16-20250606' of git://git.kernel.dk/linux: (29 commits) nvme: spelling fixes nvme-tcp: fix I/O stalls on congested sockets nvme-tcp: sanitize request list handling nvme-tcp: remove tag set when second admin queue config fails nvme: enable vectored registered bufs for passthrough cmds nvme: fix implicit bool to flags conversion nvme: fix command limits status code selftests: ublk: kublk: improve behavior on init failure block: flip iter directions in blk_rq_integrity_map_user() block: drop direction param from bio_integrity_copy_user() selftests: ublk: cover PER_IO_DAEMON in more stress tests Documentation: ublk: document UBLK_F_PER_IO_DAEMON selftests: ublk: add stress test for per io daemons selftests: ublk: add functional test for per io daemons selftests: ublk: kublk: decouple ublk_queues from ublk server threads selftests: ublk: kublk: move per-thread data out of ublk_queue selftests: ublk: kublk: lift queue initialization out of thread selftests: ublk: kublk: tie sqe allocation to io instead of queue selftests: ublk: kublk: plumb q_id in io_uring user_data ublk: have a per-io daemon instead of a per-queue daemon ...
Diffstat (limited to 'block')
-rw-r--r--block/bio-integrity.c17
-rw-r--r--block/blk-integrity.c7
2 files changed, 6 insertions, 18 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index cb94e9be26dc..10912988c8f5 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -154,10 +154,9 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
EXPORT_SYMBOL(bio_integrity_add_page);
static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
- int nr_vecs, unsigned int len,
- unsigned int direction)
+ int nr_vecs, unsigned int len)
{
- bool write = direction == ITER_SOURCE;
+ bool write = op_is_write(bio_op(bio));
struct bio_integrity_payload *bip;
struct iov_iter iter;
void *buf;
@@ -168,7 +167,7 @@ static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
return -ENOMEM;
if (write) {
- iov_iter_bvec(&iter, direction, bvec, nr_vecs, len);
+ iov_iter_bvec(&iter, ITER_SOURCE, bvec, nr_vecs, len);
if (!copy_from_iter_full(buf, len, &iter)) {
ret = -EFAULT;
goto free_buf;
@@ -264,7 +263,7 @@ int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages;
struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec;
size_t offset, bytes = iter->count;
- unsigned int direction, nr_bvecs;
+ unsigned int nr_bvecs;
int ret, nr_vecs;
bool copy;
@@ -273,11 +272,6 @@ int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
if (bytes >> SECTOR_SHIFT > queue_max_hw_sectors(q))
return -E2BIG;
- if (bio_data_dir(bio) == READ)
- direction = ITER_DEST;
- else
- direction = ITER_SOURCE;
-
nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS + 1);
if (nr_vecs > BIO_MAX_VECS)
return -E2BIG;
@@ -300,8 +294,7 @@ int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
copy = true;
if (copy)
- ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes,
- direction);
+ ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes);
else
ret = bio_integrity_init_user(bio, bvec, nr_bvecs, bytes);
if (ret)
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index a1678f0a9f81..e4e2567061f9 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -117,13 +117,8 @@ int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
{
int ret;
struct iov_iter iter;
- unsigned int direction;
- if (op_is_write(req_op(rq)))
- direction = ITER_DEST;
- else
- direction = ITER_SOURCE;
- iov_iter_ubuf(&iter, direction, ubuf, bytes);
+ iov_iter_ubuf(&iter, rq_data_dir(rq), ubuf, bytes);
ret = bio_integrity_map_user(rq->bio, &iter);
if (ret)
return ret;