mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-24 20:19:44 +00:00
xen: remove 'XenBlkDev' and 'blkdev' names from dataplane/xen-block
This is a purely cosmetic patch that substitutes the old 'struct XenBlkDev' name with 'XenBlockDataPlane' and 'blkdev' field/variable names with 'dataplane', and then does necessary fix-up to adhere to coding style. No functional change. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Acked-by: Anthony Perard <anthony.perard@citrix.com> Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
This commit is contained in:
parent
fcab2b464e
commit
f3b604e31d
@ -38,12 +38,12 @@ struct ioreq {
|
||||
int presync;
|
||||
int aio_inflight;
|
||||
int aio_errors;
|
||||
struct XenBlkDev *blkdev;
|
||||
XenBlockDataPlane *dataplane;
|
||||
QLIST_ENTRY(ioreq) list;
|
||||
BlockAcctCookie acct;
|
||||
};
|
||||
|
||||
struct XenBlkDev {
|
||||
struct XenBlockDataPlane {
|
||||
XenDevice *xendev;
|
||||
XenEventChannel *event_channel;
|
||||
unsigned int *ring_ref;
|
||||
@ -79,33 +79,33 @@ static void ioreq_reset(struct ioreq *ioreq)
|
||||
ioreq->aio_inflight = 0;
|
||||
ioreq->aio_errors = 0;
|
||||
|
||||
ioreq->blkdev = NULL;
|
||||
ioreq->dataplane = NULL;
|
||||
memset(&ioreq->list, 0, sizeof(ioreq->list));
|
||||
memset(&ioreq->acct, 0, sizeof(ioreq->acct));
|
||||
|
||||
qemu_iovec_reset(&ioreq->v);
|
||||
}
|
||||
|
||||
static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
|
||||
static struct ioreq *ioreq_start(XenBlockDataPlane *dataplane)
|
||||
{
|
||||
struct ioreq *ioreq = NULL;
|
||||
|
||||
if (QLIST_EMPTY(&blkdev->freelist)) {
|
||||
if (blkdev->requests_total >= blkdev->max_requests) {
|
||||
if (QLIST_EMPTY(&dataplane->freelist)) {
|
||||
if (dataplane->requests_total >= dataplane->max_requests) {
|
||||
goto out;
|
||||
}
|
||||
/* allocate new struct */
|
||||
ioreq = g_malloc0(sizeof(*ioreq));
|
||||
ioreq->blkdev = blkdev;
|
||||
blkdev->requests_total++;
|
||||
ioreq->dataplane = dataplane;
|
||||
dataplane->requests_total++;
|
||||
qemu_iovec_init(&ioreq->v, 1);
|
||||
} else {
|
||||
/* get one from freelist */
|
||||
ioreq = QLIST_FIRST(&blkdev->freelist);
|
||||
ioreq = QLIST_FIRST(&dataplane->freelist);
|
||||
QLIST_REMOVE(ioreq, list);
|
||||
}
|
||||
QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
|
||||
blkdev->requests_inflight++;
|
||||
QLIST_INSERT_HEAD(&dataplane->inflight, ioreq, list);
|
||||
dataplane->requests_inflight++;
|
||||
|
||||
out:
|
||||
return ioreq;
|
||||
@ -113,26 +113,26 @@ out:
|
||||
|
||||
static void ioreq_finish(struct ioreq *ioreq)
|
||||
{
|
||||
struct XenBlkDev *blkdev = ioreq->blkdev;
|
||||
XenBlockDataPlane *dataplane = ioreq->dataplane;
|
||||
|
||||
QLIST_REMOVE(ioreq, list);
|
||||
QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
|
||||
blkdev->requests_inflight--;
|
||||
blkdev->requests_finished++;
|
||||
QLIST_INSERT_HEAD(&dataplane->finished, ioreq, list);
|
||||
dataplane->requests_inflight--;
|
||||
dataplane->requests_finished++;
|
||||
}
|
||||
|
||||
static void ioreq_release(struct ioreq *ioreq, bool finish)
|
||||
{
|
||||
struct XenBlkDev *blkdev = ioreq->blkdev;
|
||||
XenBlockDataPlane *dataplane = ioreq->dataplane;
|
||||
|
||||
QLIST_REMOVE(ioreq, list);
|
||||
ioreq_reset(ioreq);
|
||||
ioreq->blkdev = blkdev;
|
||||
QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
|
||||
ioreq->dataplane = dataplane;
|
||||
QLIST_INSERT_HEAD(&dataplane->freelist, ioreq, list);
|
||||
if (finish) {
|
||||
blkdev->requests_finished--;
|
||||
dataplane->requests_finished--;
|
||||
} else {
|
||||
blkdev->requests_inflight--;
|
||||
dataplane->requests_inflight--;
|
||||
}
|
||||
}
|
||||
|
||||
@ -142,7 +142,7 @@ static void ioreq_release(struct ioreq *ioreq, bool finish)
|
||||
*/
|
||||
static int ioreq_parse(struct ioreq *ioreq)
|
||||
{
|
||||
struct XenBlkDev *blkdev = ioreq->blkdev;
|
||||
XenBlockDataPlane *dataplane = ioreq->dataplane;
|
||||
size_t len;
|
||||
int i;
|
||||
|
||||
@ -165,12 +165,12 @@ static int ioreq_parse(struct ioreq *ioreq)
|
||||
};
|
||||
|
||||
if (ioreq->req.operation != BLKIF_OP_READ &&
|
||||
blk_is_read_only(blkdev->blk)) {
|
||||
blk_is_read_only(dataplane->blk)) {
|
||||
error_report("error: write req for ro device");
|
||||
goto err;
|
||||
}
|
||||
|
||||
ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
|
||||
ioreq->start = ioreq->req.sector_number * dataplane->file_blk;
|
||||
for (i = 0; i < ioreq->req.nr_segments; i++) {
|
||||
if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
|
||||
error_report("error: nr_segments too big");
|
||||
@ -180,16 +180,16 @@ static int ioreq_parse(struct ioreq *ioreq)
|
||||
error_report("error: first > last sector");
|
||||
goto err;
|
||||
}
|
||||
if (ioreq->req.seg[i].last_sect * blkdev->file_blk >= XC_PAGE_SIZE) {
|
||||
if (ioreq->req.seg[i].last_sect * dataplane->file_blk >= XC_PAGE_SIZE) {
|
||||
error_report("error: page crossing");
|
||||
goto err;
|
||||
}
|
||||
|
||||
len = (ioreq->req.seg[i].last_sect -
|
||||
ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
|
||||
ioreq->req.seg[i].first_sect + 1) * dataplane->file_blk;
|
||||
ioreq->size += len;
|
||||
}
|
||||
if (ioreq->start + ioreq->size > blkdev->file_size) {
|
||||
if (ioreq->start + ioreq->size > dataplane->file_size) {
|
||||
error_report("error: access beyond end of file");
|
||||
goto err;
|
||||
}
|
||||
@ -202,11 +202,11 @@ err:
|
||||
|
||||
static int ioreq_grant_copy(struct ioreq *ioreq)
|
||||
{
|
||||
struct XenBlkDev *blkdev = ioreq->blkdev;
|
||||
XenDevice *xendev = blkdev->xendev;
|
||||
XenBlockDataPlane *dataplane = ioreq->dataplane;
|
||||
XenDevice *xendev = dataplane->xendev;
|
||||
XenDeviceGrantCopySegment segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
||||
int i, count;
|
||||
int64_t file_blk = blkdev->file_blk;
|
||||
int64_t file_blk = dataplane->file_blk;
|
||||
bool to_domain = (ioreq->req.operation == BLKIF_OP_READ);
|
||||
void *virt = ioreq->buf;
|
||||
Error *local_err = NULL;
|
||||
@ -251,9 +251,9 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
|
||||
static void qemu_aio_complete(void *opaque, int ret)
|
||||
{
|
||||
struct ioreq *ioreq = opaque;
|
||||
struct XenBlkDev *blkdev = ioreq->blkdev;
|
||||
XenBlockDataPlane *dataplane = ioreq->dataplane;
|
||||
|
||||
aio_context_acquire(blkdev->ctx);
|
||||
aio_context_acquire(dataplane->ctx);
|
||||
|
||||
if (ret != 0) {
|
||||
error_report("%s I/O error",
|
||||
@ -302,25 +302,25 @@ static void qemu_aio_complete(void *opaque, int ret)
|
||||
}
|
||||
case BLKIF_OP_READ:
|
||||
if (ioreq->status == BLKIF_RSP_OKAY) {
|
||||
block_acct_done(blk_get_stats(blkdev->blk), &ioreq->acct);
|
||||
block_acct_done(blk_get_stats(dataplane->blk), &ioreq->acct);
|
||||
} else {
|
||||
block_acct_failed(blk_get_stats(blkdev->blk), &ioreq->acct);
|
||||
block_acct_failed(blk_get_stats(dataplane->blk), &ioreq->acct);
|
||||
}
|
||||
break;
|
||||
case BLKIF_OP_DISCARD:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
qemu_bh_schedule(blkdev->bh);
|
||||
qemu_bh_schedule(dataplane->bh);
|
||||
|
||||
done:
|
||||
aio_context_release(blkdev->ctx);
|
||||
aio_context_release(dataplane->ctx);
|
||||
}
|
||||
|
||||
static bool blk_split_discard(struct ioreq *ioreq, blkif_sector_t sector_number,
|
||||
uint64_t nr_sectors)
|
||||
{
|
||||
struct XenBlkDev *blkdev = ioreq->blkdev;
|
||||
XenBlockDataPlane *dataplane = ioreq->dataplane;
|
||||
int64_t byte_offset;
|
||||
int byte_chunk;
|
||||
uint64_t byte_remaining, limit;
|
||||
@ -329,18 +329,18 @@ static bool blk_split_discard(struct ioreq *ioreq, blkif_sector_t sector_number,
|
||||
|
||||
/* Wrap around, or overflowing byte limit? */
|
||||
if (sec_start + sec_count < sec_count ||
|
||||
sec_start + sec_count > INT64_MAX / blkdev->file_blk) {
|
||||
sec_start + sec_count > INT64_MAX / dataplane->file_blk) {
|
||||
return false;
|
||||
}
|
||||
|
||||
limit = BDRV_REQUEST_MAX_SECTORS * blkdev->file_blk;
|
||||
byte_offset = sec_start * blkdev->file_blk;
|
||||
byte_remaining = sec_count * blkdev->file_blk;
|
||||
limit = BDRV_REQUEST_MAX_SECTORS * dataplane->file_blk;
|
||||
byte_offset = sec_start * dataplane->file_blk;
|
||||
byte_remaining = sec_count * dataplane->file_blk;
|
||||
|
||||
do {
|
||||
byte_chunk = byte_remaining > limit ? limit : byte_remaining;
|
||||
ioreq->aio_inflight++;
|
||||
blk_aio_pdiscard(blkdev->blk, byte_offset, byte_chunk,
|
||||
blk_aio_pdiscard(dataplane->blk, byte_offset, byte_chunk,
|
||||
qemu_aio_complete, ioreq);
|
||||
byte_remaining -= byte_chunk;
|
||||
byte_offset += byte_chunk;
|
||||
@ -351,7 +351,7 @@ static bool blk_split_discard(struct ioreq *ioreq, blkif_sector_t sector_number,
|
||||
|
||||
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
|
||||
{
|
||||
struct XenBlkDev *blkdev = ioreq->blkdev;
|
||||
XenBlockDataPlane *dataplane = ioreq->dataplane;
|
||||
|
||||
ioreq->buf = qemu_memalign(XC_PAGE_SIZE, ioreq->size);
|
||||
if (ioreq->req.nr_segments &&
|
||||
@ -364,17 +364,17 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
|
||||
|
||||
ioreq->aio_inflight++;
|
||||
if (ioreq->presync) {
|
||||
blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
|
||||
blk_aio_flush(ioreq->dataplane->blk, qemu_aio_complete, ioreq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (ioreq->req.operation) {
|
||||
case BLKIF_OP_READ:
|
||||
qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
|
||||
block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
|
||||
block_acct_start(blk_get_stats(dataplane->blk), &ioreq->acct,
|
||||
ioreq->v.size, BLOCK_ACCT_READ);
|
||||
ioreq->aio_inflight++;
|
||||
blk_aio_preadv(blkdev->blk, ioreq->start, &ioreq->v, 0,
|
||||
blk_aio_preadv(dataplane->blk, ioreq->start, &ioreq->v, 0,
|
||||
qemu_aio_complete, ioreq);
|
||||
break;
|
||||
case BLKIF_OP_WRITE:
|
||||
@ -384,12 +384,12 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
|
||||
}
|
||||
|
||||
qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
|
||||
block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
|
||||
block_acct_start(blk_get_stats(dataplane->blk), &ioreq->acct,
|
||||
ioreq->v.size,
|
||||
ioreq->req.operation == BLKIF_OP_WRITE ?
|
||||
BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
|
||||
ioreq->aio_inflight++;
|
||||
blk_aio_pwritev(blkdev->blk, ioreq->start, &ioreq->v, 0,
|
||||
blk_aio_pwritev(dataplane->blk, ioreq->start, &ioreq->v, 0,
|
||||
qemu_aio_complete, ioreq);
|
||||
break;
|
||||
case BLKIF_OP_DISCARD:
|
||||
@ -417,27 +417,27 @@ err:
|
||||
|
||||
static int blk_send_response_one(struct ioreq *ioreq)
|
||||
{
|
||||
struct XenBlkDev *blkdev = ioreq->blkdev;
|
||||
XenBlockDataPlane *dataplane = ioreq->dataplane;
|
||||
int send_notify = 0;
|
||||
int have_requests = 0;
|
||||
blkif_response_t *resp;
|
||||
|
||||
/* Place on the response ring for the relevant domain. */
|
||||
switch (blkdev->protocol) {
|
||||
switch (dataplane->protocol) {
|
||||
case BLKIF_PROTOCOL_NATIVE:
|
||||
resp = (blkif_response_t *)RING_GET_RESPONSE(
|
||||
&blkdev->rings.native,
|
||||
blkdev->rings.native.rsp_prod_pvt);
|
||||
&dataplane->rings.native,
|
||||
dataplane->rings.native.rsp_prod_pvt);
|
||||
break;
|
||||
case BLKIF_PROTOCOL_X86_32:
|
||||
resp = (blkif_response_t *)RING_GET_RESPONSE(
|
||||
&blkdev->rings.x86_32_part,
|
||||
blkdev->rings.x86_32_part.rsp_prod_pvt);
|
||||
&dataplane->rings.x86_32_part,
|
||||
dataplane->rings.x86_32_part.rsp_prod_pvt);
|
||||
break;
|
||||
case BLKIF_PROTOCOL_X86_64:
|
||||
resp = (blkif_response_t *)RING_GET_RESPONSE(
|
||||
&blkdev->rings.x86_64_part,
|
||||
blkdev->rings.x86_64_part.rsp_prod_pvt);
|
||||
&dataplane->rings.x86_64_part,
|
||||
dataplane->rings.x86_64_part.rsp_prod_pvt);
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
@ -447,42 +447,45 @@ static int blk_send_response_one(struct ioreq *ioreq)
|
||||
resp->operation = ioreq->req.operation;
|
||||
resp->status = ioreq->status;
|
||||
|
||||
blkdev->rings.common.rsp_prod_pvt++;
|
||||
dataplane->rings.common.rsp_prod_pvt++;
|
||||
|
||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
|
||||
if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
|
||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&dataplane->rings.common,
|
||||
send_notify);
|
||||
if (dataplane->rings.common.rsp_prod_pvt ==
|
||||
dataplane->rings.common.req_cons) {
|
||||
/*
|
||||
* Tail check for pending requests. Allows frontend to avoid
|
||||
* notifications if requests are already in flight (lower
|
||||
* overheads and promotes batching).
|
||||
*/
|
||||
RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
|
||||
} else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
|
||||
RING_FINAL_CHECK_FOR_REQUESTS(&dataplane->rings.common,
|
||||
have_requests);
|
||||
} else if (RING_HAS_UNCONSUMED_REQUESTS(&dataplane->rings.common)) {
|
||||
have_requests = 1;
|
||||
}
|
||||
|
||||
if (have_requests) {
|
||||
blkdev->more_work++;
|
||||
dataplane->more_work++;
|
||||
}
|
||||
return send_notify;
|
||||
}
|
||||
|
||||
/* walk finished list, send outstanding responses, free requests */
|
||||
static void blk_send_response_all(struct XenBlkDev *blkdev)
|
||||
static void blk_send_response_all(XenBlockDataPlane *dataplane)
|
||||
{
|
||||
struct ioreq *ioreq;
|
||||
int send_notify = 0;
|
||||
|
||||
while (!QLIST_EMPTY(&blkdev->finished)) {
|
||||
ioreq = QLIST_FIRST(&blkdev->finished);
|
||||
while (!QLIST_EMPTY(&dataplane->finished)) {
|
||||
ioreq = QLIST_FIRST(&dataplane->finished);
|
||||
send_notify += blk_send_response_one(ioreq);
|
||||
ioreq_release(ioreq, true);
|
||||
}
|
||||
if (send_notify) {
|
||||
Error *local_err = NULL;
|
||||
|
||||
xen_device_notify_event_channel(blkdev->xendev,
|
||||
blkdev->event_channel,
|
||||
xen_device_notify_event_channel(dataplane->xendev,
|
||||
dataplane->event_channel,
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
@ -490,67 +493,76 @@ static void blk_send_response_all(struct XenBlkDev *blkdev)
|
||||
}
|
||||
}
|
||||
|
||||
static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq,
|
||||
static int blk_get_request(XenBlockDataPlane *dataplane, struct ioreq *ioreq,
|
||||
RING_IDX rc)
|
||||
{
|
||||
switch (blkdev->protocol) {
|
||||
case BLKIF_PROTOCOL_NATIVE:
|
||||
memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
|
||||
sizeof(ioreq->req));
|
||||
switch (dataplane->protocol) {
|
||||
case BLKIF_PROTOCOL_NATIVE: {
|
||||
blkif_request_t *req =
|
||||
RING_GET_REQUEST(&dataplane->rings.native, rc);
|
||||
|
||||
memcpy(&ioreq->req, req, sizeof(ioreq->req));
|
||||
break;
|
||||
case BLKIF_PROTOCOL_X86_32:
|
||||
blkif_get_x86_32_req(&ioreq->req,
|
||||
RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
|
||||
}
|
||||
case BLKIF_PROTOCOL_X86_32: {
|
||||
blkif_x86_32_request_t *req =
|
||||
RING_GET_REQUEST(&dataplane->rings.x86_32_part, rc);
|
||||
|
||||
blkif_get_x86_32_req(&ioreq->req, req);
|
||||
break;
|
||||
case BLKIF_PROTOCOL_X86_64:
|
||||
blkif_get_x86_64_req(&ioreq->req,
|
||||
RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
|
||||
}
|
||||
case BLKIF_PROTOCOL_X86_64: {
|
||||
blkif_x86_64_request_t *req =
|
||||
RING_GET_REQUEST(&dataplane->rings.x86_64_part, rc);
|
||||
|
||||
blkif_get_x86_64_req(&ioreq->req, req);
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Prevent the compiler from accessing the on-ring fields instead. */
|
||||
barrier();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void blk_handle_requests(struct XenBlkDev *blkdev)
|
||||
static void blk_handle_requests(XenBlockDataPlane *dataplane)
|
||||
{
|
||||
RING_IDX rc, rp;
|
||||
struct ioreq *ioreq;
|
||||
|
||||
blkdev->more_work = 0;
|
||||
dataplane->more_work = 0;
|
||||
|
||||
rc = blkdev->rings.common.req_cons;
|
||||
rp = blkdev->rings.common.sring->req_prod;
|
||||
rc = dataplane->rings.common.req_cons;
|
||||
rp = dataplane->rings.common.sring->req_prod;
|
||||
xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
|
||||
|
||||
blk_send_response_all(blkdev);
|
||||
blk_send_response_all(dataplane);
|
||||
while (rc != rp) {
|
||||
/* pull request from ring */
|
||||
if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
|
||||
if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) {
|
||||
break;
|
||||
}
|
||||
ioreq = ioreq_start(blkdev);
|
||||
ioreq = ioreq_start(dataplane);
|
||||
if (ioreq == NULL) {
|
||||
blkdev->more_work++;
|
||||
dataplane->more_work++;
|
||||
break;
|
||||
}
|
||||
blk_get_request(blkdev, ioreq, rc);
|
||||
blkdev->rings.common.req_cons = ++rc;
|
||||
blk_get_request(dataplane, ioreq, rc);
|
||||
dataplane->rings.common.req_cons = ++rc;
|
||||
|
||||
/* parse them */
|
||||
if (ioreq_parse(ioreq) != 0) {
|
||||
|
||||
switch (ioreq->req.operation) {
|
||||
case BLKIF_OP_READ:
|
||||
block_acct_invalid(blk_get_stats(blkdev->blk),
|
||||
block_acct_invalid(blk_get_stats(dataplane->blk),
|
||||
BLOCK_ACCT_READ);
|
||||
break;
|
||||
case BLKIF_OP_WRITE:
|
||||
block_acct_invalid(blk_get_stats(blkdev->blk),
|
||||
block_acct_invalid(blk_get_stats(dataplane->blk),
|
||||
BLOCK_ACCT_WRITE);
|
||||
break;
|
||||
case BLKIF_OP_FLUSH_DISKCACHE:
|
||||
block_acct_invalid(blk_get_stats(blkdev->blk),
|
||||
block_acct_invalid(blk_get_stats(dataplane->blk),
|
||||
BLOCK_ACCT_FLUSH);
|
||||
default:
|
||||
break;
|
||||
@ -559,8 +571,8 @@ static void blk_handle_requests(struct XenBlkDev *blkdev)
|
||||
if (blk_send_response_one(ioreq)) {
|
||||
Error *local_err = NULL;
|
||||
|
||||
xen_device_notify_event_channel(blkdev->xendev,
|
||||
blkdev->event_channel,
|
||||
xen_device_notify_event_channel(dataplane->xendev,
|
||||
dataplane->event_channel,
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
@ -573,173 +585,173 @@ static void blk_handle_requests(struct XenBlkDev *blkdev)
|
||||
ioreq_runio_qemu_aio(ioreq);
|
||||
}
|
||||
|
||||
if (blkdev->more_work && blkdev->requests_inflight < blkdev->max_requests) {
|
||||
qemu_bh_schedule(blkdev->bh);
|
||||
if (dataplane->more_work &&
|
||||
dataplane->requests_inflight < dataplane->max_requests) {
|
||||
qemu_bh_schedule(dataplane->bh);
|
||||
}
|
||||
}
|
||||
|
||||
static void blk_bh(void *opaque)
|
||||
{
|
||||
struct XenBlkDev *blkdev = opaque;
|
||||
XenBlockDataPlane *dataplane = opaque;
|
||||
|
||||
aio_context_acquire(blkdev->ctx);
|
||||
blk_handle_requests(blkdev);
|
||||
aio_context_release(blkdev->ctx);
|
||||
aio_context_acquire(dataplane->ctx);
|
||||
blk_handle_requests(dataplane);
|
||||
aio_context_release(dataplane->ctx);
|
||||
}
|
||||
|
||||
static void blk_event(void *opaque)
|
||||
{
|
||||
struct XenBlkDev *blkdev = opaque;
|
||||
XenBlockDataPlane *dataplane = opaque;
|
||||
|
||||
qemu_bh_schedule(blkdev->bh);
|
||||
qemu_bh_schedule(dataplane->bh);
|
||||
}
|
||||
|
||||
struct XenBlkDev *xen_block_dataplane_create(XenDevice *xendev,
|
||||
BlockConf *conf,
|
||||
IOThread *iothread)
|
||||
XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev,
|
||||
BlockConf *conf,
|
||||
IOThread *iothread)
|
||||
{
|
||||
struct XenBlkDev *blkdev = g_new0(struct XenBlkDev, 1);
|
||||
XenBlockDataPlane *dataplane = g_new0(XenBlockDataPlane, 1);
|
||||
|
||||
blkdev->xendev = xendev;
|
||||
blkdev->file_blk = conf->logical_block_size;
|
||||
blkdev->blk = conf->blk;
|
||||
blkdev->file_size = blk_getlength(blkdev->blk);
|
||||
dataplane->xendev = xendev;
|
||||
dataplane->file_blk = conf->logical_block_size;
|
||||
dataplane->blk = conf->blk;
|
||||
dataplane->file_size = blk_getlength(dataplane->blk);
|
||||
|
||||
QLIST_INIT(&blkdev->inflight);
|
||||
QLIST_INIT(&blkdev->finished);
|
||||
QLIST_INIT(&blkdev->freelist);
|
||||
QLIST_INIT(&dataplane->inflight);
|
||||
QLIST_INIT(&dataplane->finished);
|
||||
QLIST_INIT(&dataplane->freelist);
|
||||
|
||||
if (iothread) {
|
||||
blkdev->iothread = iothread;
|
||||
object_ref(OBJECT(blkdev->iothread));
|
||||
blkdev->ctx = iothread_get_aio_context(blkdev->iothread);
|
||||
dataplane->iothread = iothread;
|
||||
object_ref(OBJECT(dataplane->iothread));
|
||||
dataplane->ctx = iothread_get_aio_context(dataplane->iothread);
|
||||
} else {
|
||||
blkdev->ctx = qemu_get_aio_context();
|
||||
dataplane->ctx = qemu_get_aio_context();
|
||||
}
|
||||
blkdev->bh = aio_bh_new(blkdev->ctx, blk_bh, blkdev);
|
||||
dataplane->bh = aio_bh_new(dataplane->ctx, blk_bh, dataplane);
|
||||
|
||||
return blkdev;
|
||||
return dataplane;
|
||||
}
|
||||
|
||||
void xen_block_dataplane_destroy(struct XenBlkDev *blkdev)
|
||||
void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane)
|
||||
{
|
||||
struct ioreq *ioreq;
|
||||
|
||||
if (!blkdev) {
|
||||
if (!dataplane) {
|
||||
return;
|
||||
}
|
||||
|
||||
while (!QLIST_EMPTY(&blkdev->freelist)) {
|
||||
ioreq = QLIST_FIRST(&blkdev->freelist);
|
||||
while (!QLIST_EMPTY(&dataplane->freelist)) {
|
||||
ioreq = QLIST_FIRST(&dataplane->freelist);
|
||||
QLIST_REMOVE(ioreq, list);
|
||||
qemu_iovec_destroy(&ioreq->v);
|
||||
g_free(ioreq);
|
||||
}
|
||||
|
||||
qemu_bh_delete(blkdev->bh);
|
||||
if (blkdev->iothread) {
|
||||
object_unref(OBJECT(blkdev->iothread));
|
||||
qemu_bh_delete(dataplane->bh);
|
||||
if (dataplane->iothread) {
|
||||
object_unref(OBJECT(dataplane->iothread));
|
||||
}
|
||||
|
||||
g_free(blkdev);
|
||||
g_free(dataplane);
|
||||
}
|
||||
|
||||
|
||||
void xen_block_dataplane_stop(struct XenBlkDev *blkdev)
|
||||
void xen_block_dataplane_stop(XenBlockDataPlane *dataplane)
|
||||
{
|
||||
XenDevice *xendev;
|
||||
|
||||
if (!blkdev) {
|
||||
if (!dataplane) {
|
||||
return;
|
||||
}
|
||||
|
||||
aio_context_acquire(blkdev->ctx);
|
||||
blk_set_aio_context(blkdev->blk, qemu_get_aio_context());
|
||||
aio_context_release(blkdev->ctx);
|
||||
aio_context_acquire(dataplane->ctx);
|
||||
blk_set_aio_context(dataplane->blk, qemu_get_aio_context());
|
||||
aio_context_release(dataplane->ctx);
|
||||
|
||||
xendev = blkdev->xendev;
|
||||
xendev = dataplane->xendev;
|
||||
|
||||
if (blkdev->event_channel) {
|
||||
if (dataplane->event_channel) {
|
||||
Error *local_err = NULL;
|
||||
|
||||
xen_device_unbind_event_channel(xendev, blkdev->event_channel,
|
||||
xen_device_unbind_event_channel(xendev, dataplane->event_channel,
|
||||
&local_err);
|
||||
blkdev->event_channel = NULL;
|
||||
dataplane->event_channel = NULL;
|
||||
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
}
|
||||
}
|
||||
|
||||
if (blkdev->sring) {
|
||||
if (dataplane->sring) {
|
||||
Error *local_err = NULL;
|
||||
|
||||
xen_device_unmap_grant_refs(xendev, blkdev->sring,
|
||||
blkdev->nr_ring_ref, &local_err);
|
||||
blkdev->sring = NULL;
|
||||
xen_device_unmap_grant_refs(xendev, dataplane->sring,
|
||||
dataplane->nr_ring_ref, &local_err);
|
||||
dataplane->sring = NULL;
|
||||
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
}
|
||||
}
|
||||
|
||||
g_free(blkdev->ring_ref);
|
||||
blkdev->ring_ref = NULL;
|
||||
g_free(dataplane->ring_ref);
|
||||
dataplane->ring_ref = NULL;
|
||||
}
|
||||
|
||||
void xen_block_dataplane_start(struct XenBlkDev *blkdev,
|
||||
void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
|
||||
const unsigned int ring_ref[],
|
||||
unsigned int nr_ring_ref,
|
||||
unsigned int event_channel,
|
||||
unsigned int protocol,
|
||||
Error **errp)
|
||||
{
|
||||
XenDevice *xendev = blkdev->xendev;
|
||||
XenDevice *xendev = dataplane->xendev;
|
||||
Error *local_err = NULL;
|
||||
unsigned int ring_size;
|
||||
unsigned int i;
|
||||
|
||||
blkdev->nr_ring_ref = nr_ring_ref;
|
||||
blkdev->ring_ref = g_new(unsigned int, nr_ring_ref);
|
||||
dataplane->nr_ring_ref = nr_ring_ref;
|
||||
dataplane->ring_ref = g_new(unsigned int, nr_ring_ref);
|
||||
|
||||
for (i = 0; i < nr_ring_ref; i++) {
|
||||
blkdev->ring_ref[i] = ring_ref[i];
|
||||
dataplane->ring_ref[i] = ring_ref[i];
|
||||
}
|
||||
|
||||
blkdev->protocol = protocol;
|
||||
dataplane->protocol = protocol;
|
||||
|
||||
ring_size = XC_PAGE_SIZE * blkdev->nr_ring_ref;
|
||||
switch (blkdev->protocol) {
|
||||
ring_size = XC_PAGE_SIZE * dataplane->nr_ring_ref;
|
||||
switch (dataplane->protocol) {
|
||||
case BLKIF_PROTOCOL_NATIVE:
|
||||
{
|
||||
blkdev->max_requests = __CONST_RING_SIZE(blkif, ring_size);
|
||||
dataplane->max_requests = __CONST_RING_SIZE(blkif, ring_size);
|
||||
break;
|
||||
}
|
||||
case BLKIF_PROTOCOL_X86_32:
|
||||
{
|
||||
blkdev->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size);
|
||||
dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size);
|
||||
break;
|
||||
}
|
||||
case BLKIF_PROTOCOL_X86_64:
|
||||
{
|
||||
blkdev->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size);
|
||||
dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
error_setg(errp, "unknown protocol %u", blkdev->protocol);
|
||||
error_setg(errp, "unknown protocol %u", dataplane->protocol);
|
||||
return;
|
||||
}
|
||||
|
||||
xen_device_set_max_grant_refs(xendev, blkdev->nr_ring_ref,
|
||||
xen_device_set_max_grant_refs(xendev, dataplane->nr_ring_ref,
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
goto stop;
|
||||
}
|
||||
|
||||
blkdev->sring = xen_device_map_grant_refs(xendev,
|
||||
blkdev->ring_ref,
|
||||
blkdev->nr_ring_ref,
|
||||
dataplane->sring = xen_device_map_grant_refs(xendev,
|
||||
dataplane->ring_ref,
|
||||
dataplane->nr_ring_ref,
|
||||
PROT_READ | PROT_WRITE,
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
@ -747,46 +759,46 @@ void xen_block_dataplane_start(struct XenBlkDev *blkdev,
|
||||
goto stop;
|
||||
}
|
||||
|
||||
switch (blkdev->protocol) {
|
||||
switch (dataplane->protocol) {
|
||||
case BLKIF_PROTOCOL_NATIVE:
|
||||
{
|
||||
blkif_sring_t *sring_native = blkdev->sring;
|
||||
blkif_sring_t *sring_native = dataplane->sring;
|
||||
|
||||
BACK_RING_INIT(&blkdev->rings.native, sring_native, ring_size);
|
||||
BACK_RING_INIT(&dataplane->rings.native, sring_native, ring_size);
|
||||
break;
|
||||
}
|
||||
case BLKIF_PROTOCOL_X86_32:
|
||||
{
|
||||
blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
|
||||
blkif_x86_32_sring_t *sring_x86_32 = dataplane->sring;
|
||||
|
||||
BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32,
|
||||
BACK_RING_INIT(&dataplane->rings.x86_32_part, sring_x86_32,
|
||||
ring_size);
|
||||
break;
|
||||
}
|
||||
case BLKIF_PROTOCOL_X86_64:
|
||||
{
|
||||
blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
|
||||
blkif_x86_64_sring_t *sring_x86_64 = dataplane->sring;
|
||||
|
||||
BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64,
|
||||
BACK_RING_INIT(&dataplane->rings.x86_64_part, sring_x86_64,
|
||||
ring_size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
blkdev->event_channel =
|
||||
dataplane->event_channel =
|
||||
xen_device_bind_event_channel(xendev, event_channel,
|
||||
blk_event, blkdev,
|
||||
blk_event, dataplane,
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
goto stop;
|
||||
}
|
||||
|
||||
aio_context_acquire(blkdev->ctx);
|
||||
blk_set_aio_context(blkdev->blk, blkdev->ctx);
|
||||
aio_context_release(blkdev->ctx);
|
||||
aio_context_acquire(dataplane->ctx);
|
||||
blk_set_aio_context(dataplane->blk, dataplane->ctx);
|
||||
aio_context_release(dataplane->ctx);
|
||||
return;
|
||||
|
||||
stop:
|
||||
xen_block_dataplane_stop(blkdev);
|
||||
xen_block_dataplane_stop(dataplane);
|
||||
}
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include "hw/xen/xen-bus.h"
|
||||
#include "sysemu/iothread.h"
|
||||
|
||||
typedef struct XenBlkDev XenBlockDataPlane;
|
||||
typedef struct XenBlockDataPlane XenBlockDataPlane;
|
||||
|
||||
XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev,
|
||||
BlockConf *conf,
|
||||
|
Loading…
Reference in New Issue
Block a user