-----BEGIN PGP SIGNATURE-----

Version: GnuPG v1
 
 iQEcBAABAgAGBQJXVtmBAAoJEJykq7OBq3PI+eIH/3sx898eSWy9GcNddxvt9PwZ
 XB4R0gVG3dcGupaRFvMV3C6kLbx+5YykdZTU5heN7R3k3pBNVdPlbYhAy4KnNQqJ
 SPNTk2Y2yH4VtmjIJpl2bJsYbnxQN26gFaMWrs8UzFRVeDQjT0K0OyO5yHlVkDtE
 gOMA6zTQ94L0wj3g3pz8PPOGzJ/mA3MvMq+Af0h7d4iiwUqSGfZVxOQ7eS92vdmb
 VkSFOJSbckOb1k2IC9uwYpvaJR3KFp7RBY2joPYoD/kHv41oLRZ06ceHEuYWYGRK
 9aLiXsWIfXijKTynbbB0HeLcUKvHZok39zWl9d0zNtZ3q7Akuz94aWvbqHABG+Y=
 =1Q5r
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging

# gpg: Signature made Tue 07 Jun 2016 15:26:09 BST
# gpg:                using RSA key 0x9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>"
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>"

* remotes/stefanha/tags/block-pull-request:
  throttle: refuse iops-size without iops-total/read/write
  block: Drop bdrv_ioctl_bh_cb
  block: Move BlockRequest type to io.c
  block/io: optimize bdrv_co_pwritev for small requests
  iostatus: fix comments for block_job_iostatus_reset
  block/io: Remove unused bdrv_aio_write_zeroes()
  virtio: drop duplicate virtio_queue_get_id() function
  virtio-scsi: Remove op blocker for dataplane
  virtio-blk: Remove op blocker for dataplane
  blockdev-backup: Don't move target AioContext if it's attached
  blockdev-backup: Use bdrv_lookup_bs on target
  tests: avoid coroutine pool test crash

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2016-06-07 15:59:28 +01:00
commit 40eeb397c8
15 changed files with 74 additions and 248 deletions

View File

@ -1427,6 +1427,14 @@ int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs,
bytes += offset & (align - 1);
offset = offset & ~(align - 1);
/* We have read the tail already if the request is smaller
* than one aligned block.
*/
if (bytes < align) {
qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
bytes = align;
}
}
if ((offset + bytes) & (align - 1)) {
@ -1865,17 +1873,6 @@ BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
cb, opaque, true);
}
BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
BDRV_REQ_ZERO_WRITE | flags,
cb, opaque, true);
}
void bdrv_aio_cancel(BlockAIOCB *acb)
{
qemu_aio_ref(acb);
@ -1905,6 +1902,27 @@ void bdrv_aio_cancel_async(BlockAIOCB *acb)
/**************************************************************/
/* async block device emulation */
typedef struct BlockRequest {
union {
/* Used during read, write, trim */
struct {
int64_t sector;
int nb_sectors;
int flags;
QEMUIOVector *qiov;
};
/* Used during ioctl */
struct {
int req;
void *buf;
};
};
BlockCompletionFunc *cb;
void *opaque;
int error;
} BlockRequest;
typedef struct BlockAIOCBCoroutine {
BlockAIOCB common;
BlockRequest req;
@ -2309,19 +2327,6 @@ int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
return rwco.ret;
}
typedef struct {
CoroutineIOCompletion *co;
QEMUBH *bh;
} BdrvIoctlCompletionData;
static void bdrv_ioctl_bh_cb(void *opaque)
{
BdrvIoctlCompletionData *data = opaque;
bdrv_co_io_em_complete(data->co, -ENOTSUP);
qemu_bh_delete(data->bh);
}
static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf)
{
BlockDriver *drv = bs->drv;
@ -2339,11 +2344,8 @@ static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf)
acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
if (!acb) {
BdrvIoctlCompletionData *data = g_new(BdrvIoctlCompletionData, 1);
data->bh = aio_bh_new(bdrv_get_aio_context(bs),
bdrv_ioctl_bh_cb, data);
data->co = &co;
qemu_bh_schedule(data->bh);
co.ret = -ENOTSUP;
goto out;
}
qemu_coroutine_yield();
out:

View File

@ -3335,7 +3335,7 @@ void do_blockdev_backup(const char *device, const char *target,
BlockdevOnError on_target_error,
BlockJobTxn *txn, Error **errp)
{
BlockBackend *blk, *target_blk;
BlockBackend *blk;
BlockDriverState *bs;
BlockDriverState *target_bs;
Error *local_err = NULL;
@ -3366,19 +3366,22 @@ void do_blockdev_backup(const char *device, const char *target,
}
bs = blk_bs(blk);
target_blk = blk_by_name(target);
if (!target_blk) {
error_setg(errp, "Device '%s' not found", target);
target_bs = bdrv_lookup_bs(target, target, errp);
if (!target_bs) {
goto out;
}
if (!blk_is_available(target_blk)) {
error_setg(errp, "Device '%s' has no medium", target);
goto out;
}
target_bs = blk_bs(target_blk);
if (bdrv_get_aio_context(target_bs) != aio_context) {
if (!bdrv_has_blk(target_bs)) {
/* The target BDS is not attached, we can safely move it to another
* AioContext. */
bdrv_set_aio_context(target_bs, aio_context);
} else {
error_setg(errp, "Target is attached to a different thread from "
"source.");
goto out;
}
}
backup_start(bs, target_bs, speed, sync, NULL, on_source_error,
on_target_error, block_job_cb, bs, txn, &local_err);
if (local_err != NULL) {

View File

@ -37,8 +37,6 @@ struct VirtIOBlockDataPlane {
EventNotifier *guest_notifier; /* irq */
QEMUBH *bh; /* bh for guest notification */
Notifier insert_notifier, remove_notifier;
/* Note that these EventNotifiers are assigned by value. This is
* fine as long as you do not call event_notifier_cleanup on them
* (because you don't own the file descriptor or handle; you just
@ -46,9 +44,6 @@ struct VirtIOBlockDataPlane {
*/
IOThread *iothread;
AioContext *ctx;
/* Operation blocker on BDS */
Error *blocker;
};
/* Raise an interrupt to signal guest, if necessary */
@ -68,54 +63,6 @@ static void notify_guest_bh(void *opaque)
event_notifier_set(s->guest_notifier);
}
static void data_plane_set_up_op_blockers(VirtIOBlockDataPlane *s)
{
assert(!s->blocker);
error_setg(&s->blocker, "block device is in use by data plane");
blk_op_block_all(s->conf->conf.blk, s->blocker);
blk_op_unblock(s->conf->conf.blk, BLOCK_OP_TYPE_RESIZE, s->blocker);
blk_op_unblock(s->conf->conf.blk, BLOCK_OP_TYPE_DRIVE_DEL, s->blocker);
blk_op_unblock(s->conf->conf.blk, BLOCK_OP_TYPE_BACKUP_SOURCE, s->blocker);
blk_op_unblock(s->conf->conf.blk, BLOCK_OP_TYPE_CHANGE, s->blocker);
blk_op_unblock(s->conf->conf.blk, BLOCK_OP_TYPE_COMMIT_SOURCE, s->blocker);
blk_op_unblock(s->conf->conf.blk, BLOCK_OP_TYPE_COMMIT_TARGET, s->blocker);
blk_op_unblock(s->conf->conf.blk, BLOCK_OP_TYPE_EJECT, s->blocker);
blk_op_unblock(s->conf->conf.blk, BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
s->blocker);
blk_op_unblock(s->conf->conf.blk, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT,
s->blocker);
blk_op_unblock(s->conf->conf.blk, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE,
s->blocker);
blk_op_unblock(s->conf->conf.blk, BLOCK_OP_TYPE_MIRROR_SOURCE, s->blocker);
blk_op_unblock(s->conf->conf.blk, BLOCK_OP_TYPE_STREAM, s->blocker);
blk_op_unblock(s->conf->conf.blk, BLOCK_OP_TYPE_REPLACE, s->blocker);
}
static void data_plane_remove_op_blockers(VirtIOBlockDataPlane *s)
{
if (s->blocker) {
blk_op_unblock_all(s->conf->conf.blk, s->blocker);
error_free(s->blocker);
s->blocker = NULL;
}
}
static void data_plane_blk_insert_notifier(Notifier *n, void *data)
{
VirtIOBlockDataPlane *s = container_of(n, VirtIOBlockDataPlane,
insert_notifier);
assert(s->conf->conf.blk == data);
data_plane_set_up_op_blockers(s);
}
static void data_plane_blk_remove_notifier(Notifier *n, void *data)
{
VirtIOBlockDataPlane *s = container_of(n, VirtIOBlockDataPlane,
remove_notifier);
assert(s->conf->conf.blk == data);
data_plane_remove_op_blockers(s);
}
/* Context: QEMU global mutex held */
void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
VirtIOBlockDataPlane **dataplane,
@ -158,13 +105,6 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
s->ctx = iothread_get_aio_context(s->iothread);
s->bh = aio_bh_new(s->ctx, notify_guest_bh, s);
s->insert_notifier.notify = data_plane_blk_insert_notifier;
s->remove_notifier.notify = data_plane_blk_remove_notifier;
blk_add_insert_bs_notifier(conf->conf.blk, &s->insert_notifier);
blk_add_remove_bs_notifier(conf->conf.blk, &s->remove_notifier);
data_plane_set_up_op_blockers(s);
*dataplane = s;
}
@ -176,9 +116,6 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
}
virtio_blk_data_plane_stop(s);
data_plane_remove_op_blockers(s);
notifier_remove(&s->insert_notifier);
notifier_remove(&s->remove_notifier);
qemu_bh_delete(s->bh);
object_unref(OBJECT(s->iothread));
g_free(s);

View File

@ -185,7 +185,7 @@ static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq)
{
VirtIOSCSIReq *req = sreq->hba_private;
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev);
uint32_t n = virtio_queue_get_id(req->vq) - 2;
uint32_t n = virtio_get_queue_index(req->vq) - 2;
assert(n < vs->conf.num_queues);
qemu_put_be32s(f, &n);
@ -773,22 +773,6 @@ static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
}
}
static void virtio_scsi_blk_insert_notifier(Notifier *n, void *data)
{
VirtIOSCSIBlkChangeNotifier *cn = DO_UPCAST(VirtIOSCSIBlkChangeNotifier,
n, n);
assert(cn->sd->conf.blk == data);
blk_op_block_all(cn->sd->conf.blk, cn->s->blocker);
}
static void virtio_scsi_blk_remove_notifier(Notifier *n, void *data)
{
VirtIOSCSIBlkChangeNotifier *cn = DO_UPCAST(VirtIOSCSIBlkChangeNotifier,
n, n);
assert(cn->sd->conf.blk == data);
blk_op_unblock_all(cn->sd->conf.blk, cn->s->blocker);
}
static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp)
{
@ -797,29 +781,13 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
SCSIDevice *sd = SCSI_DEVICE(dev);
if (s->ctx && !s->dataplane_fenced) {
VirtIOSCSIBlkChangeNotifier *insert_notifier, *remove_notifier;
if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
return;
}
blk_op_block_all(sd->conf.blk, s->blocker);
aio_context_acquire(s->ctx);
blk_set_aio_context(sd->conf.blk, s->ctx);
aio_context_release(s->ctx);
insert_notifier = g_new0(VirtIOSCSIBlkChangeNotifier, 1);
insert_notifier->n.notify = virtio_scsi_blk_insert_notifier;
insert_notifier->s = s;
insert_notifier->sd = sd;
blk_add_insert_bs_notifier(sd->conf.blk, &insert_notifier->n);
QTAILQ_INSERT_TAIL(&s->insert_notifiers, insert_notifier, next);
remove_notifier = g_new0(VirtIOSCSIBlkChangeNotifier, 1);
remove_notifier->n.notify = virtio_scsi_blk_remove_notifier;
remove_notifier->s = s;
remove_notifier->sd = sd;
blk_add_remove_bs_notifier(sd->conf.blk, &remove_notifier->n);
QTAILQ_INSERT_TAIL(&s->remove_notifiers, remove_notifier, next);
}
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
@ -835,7 +803,6 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
SCSIDevice *sd = SCSI_DEVICE(dev);
VirtIOSCSIBlkChangeNotifier *insert_notifier, *remove_notifier;
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
virtio_scsi_push_event(s, sd,
@ -843,28 +810,6 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
VIRTIO_SCSI_EVT_RESET_REMOVED);
}
if (s->ctx) {
blk_op_unblock_all(sd->conf.blk, s->blocker);
}
QTAILQ_FOREACH(insert_notifier, &s->insert_notifiers, next) {
if (insert_notifier->sd == sd) {
notifier_remove(&insert_notifier->n);
QTAILQ_REMOVE(&s->insert_notifiers, insert_notifier, next);
g_free(insert_notifier);
break;
}
}
QTAILQ_FOREACH(remove_notifier, &s->remove_notifiers, next) {
if (remove_notifier->sd == sd) {
notifier_remove(&remove_notifier->n);
QTAILQ_REMOVE(&s->remove_notifiers, remove_notifier, next);
g_free(remove_notifier);
break;
}
}
qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
}
@ -950,11 +895,6 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
register_savevm(dev, "virtio-scsi", virtio_scsi_id++, 1,
virtio_scsi_save, virtio_scsi_load, s);
error_setg(&s->blocker, "block device is in use by data plane");
QTAILQ_INIT(&s->insert_notifiers);
QTAILQ_INIT(&s->remove_notifiers);
}
static void virtio_scsi_instance_init(Object *obj)
@ -980,8 +920,6 @@ static void virtio_scsi_device_unrealize(DeviceState *dev, Error **errp)
{
VirtIOSCSI *s = VIRTIO_SCSI(dev);
error_free(s->blocker);
unregister_savevm(dev, "virtio-scsi", s);
virtio_scsi_common_unrealize(dev, errp);
}

View File

@ -1062,13 +1062,6 @@ int virtio_get_num_queues(VirtIODevice *vdev)
return i;
}
int virtio_queue_get_id(VirtQueue *vq)
{
VirtIODevice *vdev = vq->vdev;
assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_QUEUE_MAX]);
return vq - &vdev->vq[0];
}
void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
{
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));

View File

@ -229,9 +229,6 @@ int bdrv_write(BlockDriverState *bs, int64_t sector_num,
const uint8_t *buf, int nb_sectors);
int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, BdrvRequestFlags flags);
BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque);
int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags);
int bdrv_pread(BlockDriverState *bs, int64_t offset,
void *buf, int count);
@ -323,27 +320,6 @@ BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
void bdrv_aio_cancel(BlockAIOCB *acb);
void bdrv_aio_cancel_async(BlockAIOCB *acb);
typedef struct BlockRequest {
/* Fields to be filled by caller */
union {
struct {
int64_t sector;
int nb_sectors;
int flags;
QEMUIOVector *qiov;
};
struct {
int req;
void *buf;
};
};
BlockCompletionFunc *cb;
void *opaque;
/* Filled by block layer */
int error;
} BlockRequest;
/* sg packet commands */
int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf);
BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,

View File

@ -397,7 +397,7 @@ int block_job_complete_sync(BlockJob *job, Error **errp);
* @job: The job whose I/O status should be reset.
*
* Reset I/O status on @job and on BlockDriverState objects it uses,
* other than job->bs.
* other than job->blk.
*/
void block_job_iostatus_reset(BlockJob *job);

View File

@ -68,13 +68,6 @@ typedef struct VirtIOSCSICommon {
VirtQueue **cmd_vqs;
} VirtIOSCSICommon;
typedef struct VirtIOSCSIBlkChangeNotifier {
Notifier n;
struct VirtIOSCSI *s;
SCSIDevice *sd;
QTAILQ_ENTRY(VirtIOSCSIBlkChangeNotifier) next;
} VirtIOSCSIBlkChangeNotifier;
typedef struct VirtIOSCSI {
VirtIOSCSICommon parent_obj;
@ -85,14 +78,10 @@ typedef struct VirtIOSCSI {
/* Fields for dataplane below */
AioContext *ctx; /* one iothread per virtio-scsi-pci for now */
QTAILQ_HEAD(, VirtIOSCSIBlkChangeNotifier) insert_notifiers;
QTAILQ_HEAD(, VirtIOSCSIBlkChangeNotifier) remove_notifiers;
bool dataplane_started;
bool dataplane_starting;
bool dataplane_stopping;
bool dataplane_fenced;
Error *blocker;
uint32_t host_features;
} VirtIOSCSI;

View File

@ -243,7 +243,6 @@ void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx);
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n);
VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n);
uint16_t virtio_get_queue_index(VirtQueue *vq);
int virtio_queue_get_id(VirtQueue *vq);
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
bool with_irqfd);

View File

@ -60,7 +60,7 @@ EOF
# Sequential RMW requests on the same physical sector
off=0x1000
for ev in "head" "after_head" "tail" "after_tail"; do
for ev in "head" "after_head"; do
cat <<EOF
break pwritev_rmw_$ev A
aio_write -P 10 $((off + 0x200)) 0x200
@ -211,16 +211,6 @@ function verify_io()
echo read -P 11 0x2400 0x200
echo read -P 0 0x2600 0xa00
echo read -P 0 0x3000 0x200
echo read -P 10 0x3200 0x200
echo read -P 11 0x3400 0x200
echo read -P 0 0x3600 0xa00
echo read -P 0 0x4000 0x200
echo read -P 10 0x4200 0x200
echo read -P 11 0x4400 0x200
echo read -P 0 0x4600 0xa00
# Chained dependencies
echo read -P 10 0x5000 0x200
echo read -P 11 0x5200 0x200

View File

@ -19,16 +19,6 @@ wrote XXX/XXX bytes at offset XXX
XXX bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote XXX/XXX bytes at offset XXX
XXX bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
blkdebug: Resuming request 'A'
wrote XXX/XXX bytes at offset XXX
XXX bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote XXX/XXX bytes at offset XXX
XXX bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
blkdebug: Resuming request 'A'
wrote XXX/XXX bytes at offset XXX
XXX bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote XXX/XXX bytes at offset XXX
XXX bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote XXX/XXX bytes at offset XXX
XXX bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote XXX/XXX bytes at offset XXX
@ -114,22 +104,6 @@ read 512/512 bytes at offset 9216
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
read 2560/2560 bytes at offset 9728
2.500 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
read 512/512 bytes at offset 12288
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
read 512/512 bytes at offset 12800
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
read 512/512 bytes at offset 13312
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
read 2560/2560 bytes at offset 13824
2.500 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
read 512/512 bytes at offset 16384
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
read 512/512 bytes at offset 16896
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
read 512/512 bytes at offset 17408
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
read 2560/2560 bytes at offset 17920
2.500 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
read 512/512 bytes at offset 20480
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
read 512/512 bytes at offset 20992

View File

@ -369,7 +369,15 @@ static void perf_cost(void)
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
/* This test assumes there is a freelist and marks freed coroutine memory
* with a sentinel value. If there is no freelist this would legitimately
* crash, so skip it.
*/
if (CONFIG_COROUTINE_POOL) {
g_test_add_func("/basic/co_queue", test_co_queue);
}
g_test_add_func("/basic/lifecycle", test_lifecycle);
g_test_add_func("/basic/yield", test_yield);
g_test_add_func("/basic/nesting", test_nesting);

View File

@ -398,6 +398,14 @@ static void test_max_is_missing_limit(void)
}
}
static void test_iops_size_is_missing_limit(void)
{
/* A total/read/write iops limit is required */
throttle_config_init(&cfg);
cfg.op_size = 4096;
g_assert(!throttle_is_valid(&cfg, NULL));
}
static void test_have_timer(void)
{
/* zero structures */
@ -653,6 +661,8 @@ int main(int argc, char **argv)
g_test_add_func("/throttle/config/conflicting", test_conflicting_config);
g_test_add_func("/throttle/config/is_valid", test_is_valid);
g_test_add_func("/throttle/config/max", test_max_is_missing_limit);
g_test_add_func("/throttle/config/iops_size",
test_iops_size_is_missing_limit);
g_test_add_func("/throttle/config_functions", test_config_functions);
g_test_add_func("/throttle/accounting", test_accounting);
g_test_add_func("/throttle/groups", test_groups);

View File

@ -70,7 +70,6 @@ bdrv_aio_discard(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs
bdrv_aio_flush(void *bs, void *opaque) "bs %p opaque %p"
bdrv_aio_readv(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
bdrv_aio_writev(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
bdrv_aio_write_zeroes(void *bs, int64_t sector_num, int nb_sectors, int flags, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d flags %#x opaque %p"
bdrv_co_readv(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d"
bdrv_co_writev(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d"
bdrv_co_write_zeroes(void *bs, int64_t sector_num, int nb_sector, int flags) "bs %p sector_num %"PRId64" nb_sectors %d flags %#x"

View File

@ -315,6 +315,14 @@ bool throttle_is_valid(ThrottleConfig *cfg, Error **errp)
return false;
}
if (cfg->op_size &&
!cfg->buckets[THROTTLE_OPS_TOTAL].avg &&
!cfg->buckets[THROTTLE_OPS_READ].avg &&
!cfg->buckets[THROTTLE_OPS_WRITE].avg) {
error_setg(errp, "iops size requires an iops value to be set");
return false;
}
for (i = 0; i < BUCKETS_COUNT; i++) {
if (cfg->buckets[i].avg < 0 ||
cfg->buckets[i].max < 0 ||