From 03396148bca54c0e81ad8eecb12a136456d14c16 Mon Sep 17 00:00:00 2001 From: Michael Tokarev Date: Thu, 7 Jun 2012 20:17:55 +0400 Subject: [PATCH] allow qemu_iovec_from_buffer() to specify offset from which to start copying Similar to qemu_iovec_memset(QEMUIOVector *qiov, size_t offset, int c, size_t bytes); the new prototype is: qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset, const void *buf, size_t bytes); The processing starts at offset bytes within qiov. This way, we may copy a bounce buffer directly to a middle of qiov. This is exactly the same function as iov_from_buf() from iov.c, so use the existing implementation and rename it to qemu_iovec_from_buf() to be shorter and to match the utility function. As with utility implementation, we now assert that the offset is inside actual iovec. Nothing changed for current callers, because `offset' parameter is new. While at it, stop using "bounce-qiov" in block/qcow2.c and copy decrypted data directly from cluster_data instead of recreating a temp qiov for doing that. Signed-off-by: Michael Tokarev --- block.c | 6 +++--- block/curl.c | 6 +++--- block/qcow.c | 2 +- block/qcow2.c | 9 +++------ block/rbd.c | 2 +- cutils.c | 16 +++------------- qemu-common.h | 3 ++- 7 files changed, 16 insertions(+), 28 deletions(-) diff --git a/block.c b/block.c index 7547051ec2..e0ef95e094 100644 --- a/block.c +++ b/block.c @@ -1821,8 +1821,8 @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, } skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; - qemu_iovec_from_buffer(qiov, bounce_buffer + skip_bytes, - nb_sectors * BDRV_SECTOR_SIZE); + qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, + nb_sectors * BDRV_SECTOR_SIZE); err: qemu_vfree(bounce_buffer); @@ -3382,7 +3382,7 @@ static void bdrv_aio_bh_cb(void *opaque) BlockDriverAIOCBSync *acb = opaque; if (!acb->is_write) - qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size); + qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); qemu_vfree(acb->bounce); acb->common.cb(acb->common.opaque, acb->ret); qemu_bh_delete(acb->bh); diff --git a/block/curl.c b/block/curl.c index bf3680ba57..e7c3634d35 100644 --- a/block/curl.c +++ b/block/curl.c @@ -140,8 +140,8 @@ static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque) continue; if ((s->buf_off >= acb->end)) { - qemu_iovec_from_buffer(acb->qiov, s->orig_buf + acb->start, - acb->end - acb->start); + qemu_iovec_from_buf(acb->qiov, 0, s->orig_buf + acb->start, + acb->end - acb->start); acb->common.cb(acb->common.opaque, 0); qemu_aio_release(acb); s->acb[i] = NULL; @@ -176,7 +176,7 @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len, { char *buf = state->orig_buf + (start - state->buf_start); - qemu_iovec_from_buffer(acb->qiov, buf, len); + qemu_iovec_from_buf(acb->qiov, 0, buf, len); acb->common.cb(acb->common.opaque, 0); return FIND_RET_OK; diff --git a/block/qcow.c b/block/qcow.c index 35dff497ae..728010319f 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -540,7 +540,7 @@ done: qemu_co_mutex_unlock(&s->lock); if (qiov->niov > 1) { - qemu_iovec_from_buffer(qiov, orig_buf, qiov->size); + qemu_iovec_from_buf(qiov, 0, orig_buf, qiov->size); qemu_vfree(orig_buf); } diff --git a/block/qcow2.c b/block/qcow2.c index fcbf95273b..ccc599b519 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -590,7 +590,7 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num, goto fail; } - qemu_iovec_from_buffer(&hd_qiov, + qemu_iovec_from_buf(&hd_qiov, 0, s->cluster_cache + index_in_cluster * 512, 512 * cur_nr_sectors); break; @@ -630,11 +630,8 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num, if (s->crypt_method) { qcow2_encrypt_sectors(s, sector_num, cluster_data, cluster_data, cur_nr_sectors, 0, &s->aes_decrypt_key); - qemu_iovec_reset(&hd_qiov); - qemu_iovec_copy(&hd_qiov, qiov, bytes_done, - cur_nr_sectors * 512); - qemu_iovec_from_buffer(&hd_qiov, cluster_data, - 512 * cur_nr_sectors); + qemu_iovec_from_buf(qiov, bytes_done, + cluster_data, 512 * cur_nr_sectors); } break; diff --git a/block/rbd.c b/block/rbd.c index 1280d66d3c..8bb3252bc3 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -620,7 +620,7 @@ static void rbd_aio_bh_cb(void *opaque) RBDAIOCB *acb = opaque; if (acb->cmd == RBD_AIO_READ) { - qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size); + qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); } qemu_vfree(acb->bounce); acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret)); diff --git a/cutils.c b/cutils.c index 0ddf4c7d74..b4dd844644 100644 --- a/cutils.c +++ b/cutils.c @@ -245,20 +245,10 @@ void qemu_iovec_to_buffer(QEMUIOVector *qiov, void *buf) } } -void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count) +size_t qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset, + const void *buf, size_t bytes) { - const uint8_t *p = (const uint8_t *)buf; - size_t copy; - int i; - - for (i = 0; i < qiov->niov && count; ++i) { - copy = count; - if (copy > qiov->iov[i].iov_len) - copy = qiov->iov[i].iov_len; - memcpy(qiov->iov[i].iov_base, p, copy); - p += copy; - count -= copy; - } + return iov_from_buf(qiov->iov, qiov->niov, offset, buf, bytes); } size_t qemu_iovec_memset(QEMUIOVector *qiov, size_t offset, diff --git a/qemu-common.h b/qemu-common.h index e752d2b6c1..430ec15a44 100644 --- a/qemu-common.h +++ b/qemu-common.h @@ -346,7 +346,8 @@ void qemu_iovec_concat(QEMUIOVector *dst, QEMUIOVector *src, size_t size); void qemu_iovec_destroy(QEMUIOVector *qiov); void qemu_iovec_reset(QEMUIOVector *qiov); void qemu_iovec_to_buffer(QEMUIOVector *qiov, void *buf); -void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count); +size_t qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset, + const void *buf, size_t bytes); size_t qemu_iovec_memset(QEMUIOVector *qiov, size_t offset, int fillc, size_t bytes);