mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-18 03:23:53 -04:00
block: rework bio splitting
The current setup with bio_may_exceed_limit and __bio_split_to_limits is a bit of a mess. Change it so that __bio_split_to_limits does all the work and is just a variant of bio_split_to_limits that returns nr_segs. This is done by inlining it and instead have the various bio_split_* helpers directly submit the potentially split bios. To support btrfs, the rw version has a lower level helper split out that just returns the offset to split. This turns out to nicely clean up the btrfs flow as well. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: David Sterba <dsterba@suse.com> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Tested-by: Hans Holmberg <hans.holmberg@wdc.com> Reviewed-by: Hans Holmberg <hans.holmberg@wdc.com> Link: https://lore.kernel.org/r/20240826173820.1690925-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
committed by
Jens Axboe
parent
f6f84be089
commit
b35243a447
69
block/blk.h
69
block/blk.h
@@ -331,33 +331,58 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
|
||||
ssize_t part_timeout_store(struct device *, struct device_attribute *,
|
||||
const char *, size_t);
|
||||
|
||||
static inline bool bio_may_exceed_limits(struct bio *bio,
|
||||
const struct queue_limits *lim)
|
||||
{
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
return true; /* non-trivial splitting decisions */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
|
||||
unsigned *nsegs);
|
||||
struct bio *bio_split_write_zeroes(struct bio *bio,
|
||||
const struct queue_limits *lim, unsigned *nsegs);
|
||||
struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
|
||||
unsigned *nr_segs);
|
||||
|
||||
/*
|
||||
* All drivers must accept single-segments bios that are <= PAGE_SIZE.
|
||||
* This is a quick and dirty check that relies on the fact that
|
||||
* bi_io_vec[0] is always valid if a bio has data. The check might
|
||||
* lead to occasional false negatives when bios are cloned, but compared
|
||||
* to the performance impact of cloned bios themselves the loop below
|
||||
* doesn't matter anyway.
|
||||
*/
|
||||
/*
|
||||
* All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
|
||||
*
|
||||
* This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
|
||||
* always valid if a bio has data. The check might lead to occasional false
|
||||
* positives when bios are cloned, but compared to the performance impact of
|
||||
* cloned bios themselves the loop below doesn't matter anyway.
|
||||
*/
|
||||
static inline bool bio_may_need_split(struct bio *bio,
|
||||
const struct queue_limits *lim)
|
||||
{
|
||||
return lim->chunk_sectors || bio->bi_vcnt != 1 ||
|
||||
bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
|
||||
}
|
||||
|
||||
struct bio *__bio_split_to_limits(struct bio *bio,
|
||||
const struct queue_limits *lim,
|
||||
unsigned int *nr_segs);
|
||||
/**
|
||||
* __bio_split_to_limits - split a bio to fit the queue limits
|
||||
* @bio: bio to be split
|
||||
* @lim: queue limits to split based on
|
||||
* @nr_segs: returns the number of segments in the returned bio
|
||||
*
|
||||
* Check if @bio needs splitting based on the queue limits, and if so split off
|
||||
* a bio fitting the limits from the beginning of @bio and return it. @bio is
|
||||
* shortened to the remainder and re-submitted.
|
||||
*
|
||||
* The split bio is allocated from @q->bio_split, which is provided by the
|
||||
* block layer.
|
||||
*/
|
||||
static inline struct bio *__bio_split_to_limits(struct bio *bio,
|
||||
const struct queue_limits *lim, unsigned int *nr_segs)
|
||||
{
|
||||
switch (bio_op(bio)) {
|
||||
default:
|
||||
if (bio_may_need_split(bio, lim))
|
||||
return bio_split_rw(bio, lim, nr_segs);
|
||||
*nr_segs = 1;
|
||||
return bio;
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
return bio_split_discard(bio, lim, nr_segs);
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
return bio_split_write_zeroes(bio, lim, nr_segs);
|
||||
}
|
||||
}
|
||||
|
||||
int ll_back_merge_fn(struct request *req, struct bio *bio,
|
||||
unsigned int nr_segs);
|
||||
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
|
||||
Reference in New Issue
Block a user