mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-05-02 18:17:50 -04:00
dm/core: Combine request operation type and flags
Improve kernel code uniformity by combining the request operation type and flags into a single variable. Change 'int rw' into 'enum req_op op' because the name 'op' is what is used in the block layer to hold a request type. Use the blk_opf_t and enum req_op types where appropriate to improve static type checking. Cc: Alasdair Kergon <agk@redhat.com> Cc: Mike Snitzer <snitzer@kernel.org> Cc: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20220714180729.1065367-24-bvanassche@acm.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
committed by
Jens Axboe
parent
71f7113d20
commit
a3282b432f
@@ -293,7 +293,7 @@ static void km_dp_init(struct dpages *dp, void *data)
|
||||
/*-----------------------------------------------------------------
|
||||
* IO routines that accept a list of pages.
|
||||
*---------------------------------------------------------------*/
|
||||
static void do_region(int op, int op_flags, unsigned region,
|
||||
static void do_region(const blk_opf_t opf, unsigned region,
|
||||
struct dm_io_region *where, struct dpages *dp,
|
||||
struct io *io)
|
||||
{
|
||||
@@ -306,6 +306,7 @@ static void do_region(int op, int op_flags, unsigned region,
|
||||
struct request_queue *q = bdev_get_queue(where->bdev);
|
||||
sector_t num_sectors;
|
||||
unsigned int special_cmd_max_sectors;
|
||||
const enum req_op op = opf & REQ_OP_MASK;
|
||||
|
||||
/*
|
||||
* Reject unsupported discard and write same requests.
|
||||
@@ -339,8 +340,8 @@ static void do_region(int op, int op_flags, unsigned region,
|
||||
(PAGE_SIZE >> SECTOR_SHIFT)));
|
||||
}
|
||||
|
||||
bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags,
|
||||
GFP_NOIO, &io->client->bios);
|
||||
bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO,
|
||||
&io->client->bios);
|
||||
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
|
||||
bio->bi_end_io = endio;
|
||||
store_io_and_region_in_bio(bio, io, region);
|
||||
@@ -368,7 +369,7 @@ static void do_region(int op, int op_flags, unsigned region,
|
||||
} while (remaining);
|
||||
}
|
||||
|
||||
static void dispatch_io(int op, int op_flags, unsigned int num_regions,
|
||||
static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
|
||||
struct dm_io_region *where, struct dpages *dp,
|
||||
struct io *io, int sync)
|
||||
{
|
||||
@@ -378,7 +379,7 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions,
|
||||
BUG_ON(num_regions > DM_IO_MAX_REGIONS);
|
||||
|
||||
if (sync)
|
||||
op_flags |= REQ_SYNC;
|
||||
opf |= REQ_SYNC;
|
||||
|
||||
/*
|
||||
* For multiple regions we need to be careful to rewind
|
||||
@@ -386,8 +387,8 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions,
|
||||
*/
|
||||
for (i = 0; i < num_regions; i++) {
|
||||
*dp = old_pages;
|
||||
if (where[i].count || (op_flags & REQ_PREFLUSH))
|
||||
do_region(op, op_flags, i, where + i, dp, io);
|
||||
if (where[i].count || (opf & REQ_PREFLUSH))
|
||||
do_region(opf, i, where + i, dp, io);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -411,13 +412,13 @@ static void sync_io_complete(unsigned long error, void *context)
|
||||
}
|
||||
|
||||
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
|
||||
struct dm_io_region *where, int op, int op_flags,
|
||||
struct dpages *dp, unsigned long *error_bits)
|
||||
struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
|
||||
unsigned long *error_bits)
|
||||
{
|
||||
struct io *io;
|
||||
struct sync_io sio;
|
||||
|
||||
if (num_regions > 1 && !op_is_write(op)) {
|
||||
if (num_regions > 1 && !op_is_write(opf)) {
|
||||
WARN_ON(1);
|
||||
return -EIO;
|
||||
}
|
||||
@@ -434,7 +435,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
|
||||
io->vma_invalidate_address = dp->vma_invalidate_address;
|
||||
io->vma_invalidate_size = dp->vma_invalidate_size;
|
||||
|
||||
dispatch_io(op, op_flags, num_regions, where, dp, io, 1);
|
||||
dispatch_io(opf, num_regions, where, dp, io, 1);
|
||||
|
||||
wait_for_completion_io(&sio.wait);
|
||||
|
||||
@@ -445,12 +446,12 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
|
||||
}
|
||||
|
||||
static int async_io(struct dm_io_client *client, unsigned int num_regions,
|
||||
struct dm_io_region *where, int op, int op_flags,
|
||||
struct dm_io_region *where, blk_opf_t opf,
|
||||
struct dpages *dp, io_notify_fn fn, void *context)
|
||||
{
|
||||
struct io *io;
|
||||
|
||||
if (num_regions > 1 && !op_is_write(op)) {
|
||||
if (num_regions > 1 && !op_is_write(opf)) {
|
||||
WARN_ON(1);
|
||||
fn(1, context);
|
||||
return -EIO;
|
||||
@@ -466,7 +467,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
|
||||
io->vma_invalidate_address = dp->vma_invalidate_address;
|
||||
io->vma_invalidate_size = dp->vma_invalidate_size;
|
||||
|
||||
dispatch_io(op, op_flags, num_regions, where, dp, io, 0);
|
||||
dispatch_io(opf, num_regions, where, dp, io, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -519,13 +520,10 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
|
||||
|
||||
if (!io_req->notify.fn)
|
||||
return sync_io(io_req->client, num_regions, where,
|
||||
io_req->bi_opf & REQ_OP_MASK,
|
||||
io_req->bi_opf & ~REQ_OP_MASK, &dp,
|
||||
sync_error_bits);
|
||||
io_req->bi_opf, &dp, sync_error_bits);
|
||||
|
||||
return async_io(io_req->client, num_regions, where,
|
||||
io_req->bi_opf & REQ_OP_MASK,
|
||||
io_req->bi_opf & ~REQ_OP_MASK, &dp, io_req->notify.fn,
|
||||
io_req->bi_opf, &dp, io_req->notify.fn,
|
||||
io_req->notify.context);
|
||||
}
|
||||
EXPORT_SYMBOL(dm_io);
|
||||
|
||||
Reference in New Issue
Block a user