diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2022-10-09 22:30:23 -0700 |
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2022-10-09 22:30:23 -0700 |
| commit | 5f8f8574c7f5585b09a9623f0f13462e4eb67b4d (patch) | |
| tree | 8f1d5e88bf9604a9e39fbcce0e37b3d8cee451bb /block/blk.h | |
| parent | e62563db857f81d75c5726a35bc0180bed6d1540 (diff) | |
| parent | fe5b6aaef72a0f7daa06e7960e0bee45c2984e41 (diff) | |
| download | linux-5f8f8574c7f5585b09a9623f0f13462e4eb67b4d.tar.gz linux-5f8f8574c7f5585b09a9623f0f13462e4eb67b4d.tar.bz2 linux-5f8f8574c7f5585b09a9623f0f13462e4eb67b4d.zip | |
Merge branch 'next' into for-linus
Prepare input updates for 6.1 merge window.
Diffstat (limited to 'block/blk.h')
| -rw-r--r-- | block/blk.h | 81 |
1 files changed, 44 insertions, 37 deletions
diff --git a/block/blk.h b/block/blk.h index 434017701403..d7142c4d2fef 100644 --- a/block/blk.h +++ b/block/blk.h @@ -31,11 +31,6 @@ extern struct kmem_cache *blk_requestq_srcu_cachep; extern struct kobj_type blk_queue_ktype; extern struct ida blk_queue_ida; -static inline void __blk_get_queue(struct request_queue *q) -{ - kobject_get(&q->kobj); -} - bool is_flush_rq(struct request *req); struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, @@ -102,23 +97,23 @@ static inline bool biovec_phys_mergeable(struct request_queue *q, return true; } -static inline bool __bvec_gap_to_prev(struct request_queue *q, +static inline bool __bvec_gap_to_prev(struct queue_limits *lim, struct bio_vec *bprv, unsigned int offset) { - return (offset & queue_virt_boundary(q)) || - ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); + return (offset & lim->virt_boundary_mask) || + ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask); } /* * Check if adding a bio_vec after bprv with offset would create a gap in * the SG list. Most drivers don't care about this, but some do. */ -static inline bool bvec_gap_to_prev(struct request_queue *q, +static inline bool bvec_gap_to_prev(struct queue_limits *lim, struct bio_vec *bprv, unsigned int offset) { - if (!queue_virt_boundary(q)) + if (!lim->virt_boundary_mask) return false; - return __bvec_gap_to_prev(q, bprv, offset); + return __bvec_gap_to_prev(lim, bprv, offset); } static inline bool rq_mergeable(struct request *rq) @@ -159,6 +154,19 @@ static inline bool blk_discard_mergable(struct request *req) return false; } +static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, + enum req_op op) +{ + if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) + return min(q->limits.max_discard_sectors, + UINT_MAX >> SECTOR_SHIFT); + + if (unlikely(op == REQ_OP_WRITE_ZEROES)) + return q->limits.max_write_zeroes_sectors; + + return q->limits.max_sectors; +} + #ifdef CONFIG_BLK_DEV_INTEGRITY void blk_flush_integrity(void); bool __bio_integrity_endio(struct bio *); @@ -181,7 +189,8 @@ static inline bool integrity_req_gap_back_merge(struct request *req, struct bio_integrity_payload *bip = bio_integrity(req->bio); struct bio_integrity_payload *bip_next = bio_integrity(next); - return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], + return bvec_gap_to_prev(&req->q->limits, + &bip->bip_vec[bip->bip_vcnt - 1], bip_next->bip_vec[0].bv_offset); } @@ -191,7 +200,8 @@ static inline bool integrity_req_gap_front_merge(struct request *req, struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_integrity_payload *bip_next = bio_integrity(req->bio); - return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], + return bvec_gap_to_prev(&req->q->limits, + &bip->bip_vec[bip->bip_vcnt - 1], bip_next->bip_vec[0].bv_offset); } @@ -280,7 +290,8 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ssize_t part_timeout_store(struct device *, struct device_attribute *, const char *, size_t); -static inline bool blk_may_split(struct request_queue *q, struct bio *bio) +static inline bool bio_may_exceed_limits(struct bio *bio, + struct queue_limits *lim) { switch (bio_op(bio)) { case REQ_OP_DISCARD: @@ -299,12 +310,12 @@ static inline bool blk_may_split(struct request_queue *q, struct bio *bio) * to the performance impact of cloned bios themselves the loop below * doesn't matter anyway. */ - return q->limits.chunk_sectors || bio->bi_vcnt != 1 || + return lim->chunk_sectors || bio->bi_vcnt != 1 || bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; } -void __blk_queue_split(struct request_queue *q, struct bio **bio, - unsigned int *nr_segs); +struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim, + unsigned int *nr_segs); int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs); bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, @@ -337,16 +348,6 @@ static inline void req_set_nomerge(struct request_queue *q, struct request *req) } /* - * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size - * is defined as 'unsigned int', meantime it has to aligned to with logical - * block size which is the minimum accepted unit by hardware. - */ -static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) -{ - return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; -} - -/* * Internal io_context interface */ struct io_cq *ioc_find_get_icq(struct request_queue *q); @@ -370,7 +371,7 @@ static inline void blk_throtl_bio_endio(struct bio *bio) { } static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } #endif -void __blk_queue_bounce(struct request_queue *q, struct bio **bio); +struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q); static inline bool blk_queue_may_bounce(struct request_queue *q) { @@ -379,10 +380,12 @@ static inline bool blk_queue_may_bounce(struct request_queue *q) max_low_pfn >= max_pfn; } -static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) +static inline struct bio *blk_queue_bounce(struct bio *bio, + struct request_queue *q) { - if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio))) - __blk_queue_bounce(q, bio); + if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio))) + return __blk_queue_bounce(bio, q); + return bio; } #ifdef CONFIG_BLK_CGROUP_IOLATENCY @@ -392,11 +395,11 @@ static inline int blk_iolatency_init(struct request_queue *q) { return 0; } #endif #ifdef CONFIG_BLK_DEV_ZONED -void blk_queue_free_zone_bitmaps(struct request_queue *q); -void blk_queue_clear_zone_settings(struct request_queue *q); +void disk_free_zone_bitmaps(struct gendisk *disk); +void disk_clear_zone_settings(struct gendisk *disk); #else -static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {} -static inline void blk_queue_clear_zone_settings(struct request_queue *q) {} +static inline void disk_free_zone_bitmaps(struct gendisk *disk) {} +static inline void disk_clear_zone_settings(struct gendisk *disk) {} #endif int blk_alloc_ext_minor(void); @@ -411,6 +414,9 @@ int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start, sector_t length); void blk_drop_partitions(struct gendisk *disk); +struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, + struct lock_class_key *lkclass); + int bio_add_hw_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset, unsigned int max_sectors, bool *same_page); @@ -436,13 +442,14 @@ extern struct device_attribute dev_attr_events; extern struct device_attribute dev_attr_events_async; extern struct device_attribute dev_attr_events_poll_msecs; +extern struct attribute_group blk_trace_attr_group; + long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); extern const struct address_space_operations def_blk_aops; -int disk_register_independent_access_ranges(struct gendisk *disk, - struct blk_independent_access_ranges *new_iars); +int disk_register_independent_access_ranges(struct gendisk *disk); void disk_unregister_independent_access_ranges(struct gendisk *disk); #ifdef CONFIG_FAIL_MAKE_REQUEST |
