mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-28 04:17:47 +00:00
dm: introduce dm_accept_partial_bio
The function dm_accept_partial_bio allows the target to specify how many sectors of the current bio it will process. If the target only wants to accept part of the bio, it calls dm_accept_partial_bio and the DM core sends the rest of the data in next bio. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
e0d6609a5f
commit
1dd40c3ecd
@ -1110,6 +1110,46 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
|
||||
|
||||
/*
|
||||
* A target may call dm_accept_partial_bio only from the map routine. It is
|
||||
* allowed for all bio types except REQ_FLUSH.
|
||||
*
|
||||
* dm_accept_partial_bio informs the dm that the target only wants to process
|
||||
* additional n_sectors sectors of the bio and the rest of the data should be
|
||||
* sent in a next bio.
|
||||
*
|
||||
* A diagram that explains the arithmetics:
|
||||
* +--------------------+---------------+-------+
|
||||
* | 1 | 2 | 3 |
|
||||
* +--------------------+---------------+-------+
|
||||
*
|
||||
* <-------------- *tio->len_ptr --------------->
|
||||
* <------- bi_size ------->
|
||||
* <-- n_sectors -->
|
||||
*
|
||||
* Region 1 was already iterated over with bio_advance or similar function.
|
||||
* (it may be empty if the target doesn't use bio_advance)
|
||||
* Region 2 is the remaining bio size that the target wants to process.
|
||||
* (it may be empty if region 1 is non-empty, although there is no reason
|
||||
* to make it empty)
|
||||
* The target requires that region 3 is to be sent in the next bio.
|
||||
*
|
||||
* If the target wants to receive multiple copies of the bio (via num_*bios, etc),
|
||||
* the partially processed part (the sum of regions 1+2) must be the same for all
|
||||
* copies of the bio.
|
||||
*/
|
||||
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
|
||||
{
|
||||
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
|
||||
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
|
||||
BUG_ON(bio->bi_rw & REQ_FLUSH);
|
||||
BUG_ON(bi_size > *tio->len_ptr);
|
||||
BUG_ON(n_sectors > bi_size);
|
||||
*tio->len_ptr -= bi_size - n_sectors;
|
||||
bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
|
||||
|
||||
static void __map_bio(struct dm_target_io *tio)
|
||||
{
|
||||
int r;
|
||||
@ -1200,11 +1240,13 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
|
||||
|
||||
static void __clone_and_map_simple_bio(struct clone_info *ci,
|
||||
struct dm_target *ti,
|
||||
unsigned target_bio_nr, unsigned len)
|
||||
unsigned target_bio_nr, unsigned *len)
|
||||
{
|
||||
struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
|
||||
struct bio *clone = &tio->clone;
|
||||
|
||||
tio->len_ptr = len;
|
||||
|
||||
/*
|
||||
* Discard requests require the bio's inline iovecs be initialized.
|
||||
* ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
|
||||
@ -1212,13 +1254,13 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
|
||||
*/
|
||||
__bio_clone_fast(clone, ci->bio);
|
||||
if (len)
|
||||
bio_setup_sector(clone, ci->sector, len);
|
||||
bio_setup_sector(clone, ci->sector, *len);
|
||||
|
||||
__map_bio(tio);
|
||||
}
|
||||
|
||||
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
|
||||
unsigned num_bios, unsigned len)
|
||||
unsigned num_bios, unsigned *len)
|
||||
{
|
||||
unsigned target_bio_nr;
|
||||
|
||||
@ -1233,13 +1275,13 @@ static int __send_empty_flush(struct clone_info *ci)
|
||||
|
||||
BUG_ON(bio_has_data(ci->bio));
|
||||
while ((ti = dm_table_get_target(ci->map, target_nr++)))
|
||||
__send_duplicate_bios(ci, ti, ti->num_flush_bios, 0);
|
||||
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
|
||||
sector_t sector, unsigned len)
|
||||
sector_t sector, unsigned *len)
|
||||
{
|
||||
struct bio *bio = ci->bio;
|
||||
struct dm_target_io *tio;
|
||||
@ -1254,7 +1296,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
|
||||
|
||||
for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
|
||||
tio = alloc_tio(ci, ti, 0, target_bio_nr);
|
||||
clone_bio(tio, bio, sector, len);
|
||||
tio->len_ptr = len;
|
||||
clone_bio(tio, bio, sector, *len);
|
||||
__map_bio(tio);
|
||||
}
|
||||
}
|
||||
@ -1306,7 +1349,7 @@ static int __send_changing_extent_only(struct clone_info *ci,
|
||||
else
|
||||
len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
|
||||
|
||||
__send_duplicate_bios(ci, ti, num_bios, len);
|
||||
__send_duplicate_bios(ci, ti, num_bios, &len);
|
||||
|
||||
ci->sector += len;
|
||||
} while (ci->sector_count -= len);
|
||||
@ -1345,7 +1388,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
|
||||
|
||||
len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
|
||||
|
||||
__clone_and_map_data_bio(ci, ti, ci->sector, len);
|
||||
__clone_and_map_data_bio(ci, ti, ci->sector, &len);
|
||||
|
||||
ci->sector += len;
|
||||
ci->sector_count -= len;
|
||||
|
@ -291,6 +291,7 @@ struct dm_target_io {
|
||||
struct dm_io *io;
|
||||
struct dm_target *ti;
|
||||
unsigned target_bio_nr;
|
||||
unsigned *len_ptr;
|
||||
struct bio clone;
|
||||
};
|
||||
|
||||
@ -401,6 +402,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
|
||||
struct gendisk *dm_disk(struct mapped_device *md);
|
||||
int dm_suspended(struct dm_target *ti);
|
||||
int dm_noflush_suspending(struct dm_target *ti);
|
||||
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
|
||||
union map_info *dm_get_rq_mapinfo(struct request *rq);
|
||||
|
||||
struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
|
||||
|
Loading…
Reference in New Issue
Block a user