xemu/tests/qemu-iotests/093
Stefan Hajnoczi cbaddb25b2 qemu-iotests: step clock after each test iteration
The 093 throttling test submits twice as many requests as the throttle
limit in order to ensure that we reach the limit.  The remaining
requests are left in-flight at the end of each test iteration.

Commit 452589b6b4 ("vl.c/exit: pause cpus
before closing block devices") exposed a hang in 093.  This happens
because requests are still in flight when QEMU terminates but
QEMU_CLOCK_VIRTUAL time is frozen.  bdrv_drain_all() hangs forever since
throttled requests cannot complete.

Step the clock at the end of each test iteration so in-flight requests
actually finish.  This solves the hang and is cleaner than leaving tests
in-flight.

Note that this could also be "fixed" by disabling throttling when drives
are closed in QEMU.  That approach has two issues:

1. We must drain requests before disabling throttling, so the hang
   cannot be easily avoided!

2. Any time QEMU disables throttling internally there is a chance that
   malicious users can abuse the code path to bypass throttling limits.

Therefore it makes more sense to fix the test case than to modify QEMU.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20170815130502.8736-1-stefanha@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2017-08-15 10:03:27 -05:00

314 lines
12 KiB
Python
Executable File

#!/usr/bin/env python
#
# Tests for IO throttling
#
# Copyright (C) 2015 Red Hat, Inc.
# Copyright (C) 2015-2016 Igalia, S.L.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import iotests
nsec_per_sec = 1000000000
class ThrottleTestCase(iotests.QMPTestCase):
test_img = "null-aio://"
max_drives = 3
def blockstats(self, device):
result = self.vm.qmp("query-blockstats")
for r in result['return']:
if r['device'] == device:
stat = r['stats']
return stat['rd_bytes'], stat['rd_operations'], stat['wr_bytes'], stat['wr_operations']
raise Exception("Device not found for blockstats: %s" % device)
def setUp(self):
self.vm = iotests.VM()
for i in range(0, self.max_drives):
self.vm.add_drive(self.test_img)
self.vm.launch()
def tearDown(self):
self.vm.shutdown()
def configure_throttle(self, ndrives, params):
params['group'] = 'test'
# Set the I/O throttling parameters to all drives
for i in range(0, ndrives):
params['device'] = 'drive%d' % i
result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params)
self.assert_qmp(result, 'return', {})
def do_test_throttle(self, ndrives, seconds, params, first_drive = 0):
def check_limit(limit, num):
# IO throttling algorithm is discrete, allow 10% error so the test
# is more robust
return limit == 0 or \
(num < seconds * limit * 1.1 / ndrives
and num > seconds * limit * 0.9 / ndrives)
# Set vm clock to a known value
ns = seconds * nsec_per_sec
self.vm.qtest("clock_step %d" % ns)
# Submit enough requests so the throttling mechanism kicks
# in. The throttled requests won't be executed until we
# advance the virtual clock.
rq_size = 512
rd_nr = max(params['bps'] / rq_size / 2,
params['bps_rd'] / rq_size,
params['iops'] / 2,
params['iops_rd'])
rd_nr *= seconds * 2
rd_nr /= ndrives
wr_nr = max(params['bps'] / rq_size / 2,
params['bps_wr'] / rq_size,
params['iops'] / 2,
params['iops_wr'])
wr_nr *= seconds * 2
wr_nr /= ndrives
# Send I/O requests to all drives
for i in range(rd_nr):
for drive in range(0, ndrives):
idx = first_drive + drive
self.vm.hmp_qemu_io("drive%d" % idx, "aio_read %d %d" %
(i * rq_size, rq_size))
for i in range(wr_nr):
for drive in range(0, ndrives):
idx = first_drive + drive
self.vm.hmp_qemu_io("drive%d" % idx, "aio_write %d %d" %
(i * rq_size, rq_size))
# We'll store the I/O stats for each drive in these arrays
start_rd_bytes = [0] * ndrives
start_rd_iops = [0] * ndrives
start_wr_bytes = [0] * ndrives
start_wr_iops = [0] * ndrives
end_rd_bytes = [0] * ndrives
end_rd_iops = [0] * ndrives
end_wr_bytes = [0] * ndrives
end_wr_iops = [0] * ndrives
# Read the stats before advancing the clock
for i in range(0, ndrives):
idx = first_drive + i
start_rd_bytes[i], start_rd_iops[i], start_wr_bytes[i], \
start_wr_iops[i] = self.blockstats('drive%d' % idx)
self.vm.qtest("clock_step %d" % ns)
# Read the stats after advancing the clock
for i in range(0, ndrives):
idx = first_drive + i
end_rd_bytes[i], end_rd_iops[i], end_wr_bytes[i], \
end_wr_iops[i] = self.blockstats('drive%d' % idx)
# Check that the I/O is within the limits and evenly distributed
for i in range(0, ndrives):
rd_bytes = end_rd_bytes[i] - start_rd_bytes[i]
rd_iops = end_rd_iops[i] - start_rd_iops[i]
wr_bytes = end_wr_bytes[i] - start_wr_bytes[i]
wr_iops = end_wr_iops[i] - start_wr_iops[i]
self.assertTrue(check_limit(params['bps'], rd_bytes + wr_bytes))
self.assertTrue(check_limit(params['bps_rd'], rd_bytes))
self.assertTrue(check_limit(params['bps_wr'], wr_bytes))
self.assertTrue(check_limit(params['iops'], rd_iops + wr_iops))
self.assertTrue(check_limit(params['iops_rd'], rd_iops))
self.assertTrue(check_limit(params['iops_wr'], wr_iops))
# Allow remaining requests to finish. We submitted twice as many to
# ensure the throttle limit is reached.
self.vm.qtest("clock_step %d" % ns)
# Connect N drives to a VM and test I/O in all of them
def test_all(self):
params = {"bps": 4096,
"bps_rd": 4096,
"bps_wr": 4096,
"iops": 10,
"iops_rd": 10,
"iops_wr": 10,
}
# Repeat the test with different numbers of drives
for ndrives in range(1, self.max_drives + 1):
# Pick each out of all possible params and test
for tk in params:
limits = dict([(k, 0) for k in params])
limits[tk] = params[tk] * ndrives
self.configure_throttle(ndrives, limits)
self.do_test_throttle(ndrives, 5, limits)
# Connect N drives to a VM and test I/O in just one of them a time
def test_one(self):
params = {"bps": 4096,
"bps_rd": 4096,
"bps_wr": 4096,
"iops": 10,
"iops_rd": 10,
"iops_wr": 10,
}
# Repeat the test for each one of the drives
for drive in range(0, self.max_drives):
# Pick each out of all possible params and test
for tk in params:
limits = dict([(k, 0) for k in params])
limits[tk] = params[tk] * self.max_drives
self.configure_throttle(self.max_drives, limits)
self.do_test_throttle(1, 5, limits, drive)
def test_burst(self):
params = {"bps": 4096,
"bps_rd": 4096,
"bps_wr": 4096,
"iops": 10,
"iops_rd": 10,
"iops_wr": 10,
}
ndrives = 1
# Pick each out of all possible params and test
for tk in params:
rate = params[tk] * ndrives
burst_rate = rate * 7
burst_length = 4
# Configure the throttling settings
settings = dict([(k, 0) for k in params])
settings[tk] = rate
settings['%s_max' % tk] = burst_rate
settings['%s_max_length' % tk] = burst_length
self.configure_throttle(ndrives, settings)
# Wait for the bucket to empty so we can do bursts
wait_ns = nsec_per_sec * burst_length * burst_rate / rate
self.vm.qtest("clock_step %d" % wait_ns)
# Test I/O at the max burst rate
limits = dict([(k, 0) for k in params])
limits[tk] = burst_rate
self.do_test_throttle(ndrives, burst_length, limits)
# Now test I/O at the normal rate
limits[tk] = rate
self.do_test_throttle(ndrives, 5, limits)
class ThrottleTestCoroutine(ThrottleTestCase):
test_img = "null-co://"
class ThrottleTestGroupNames(iotests.QMPTestCase):
test_img = "null-aio://"
max_drives = 3
def setUp(self):
self.vm = iotests.VM()
for i in range(0, self.max_drives):
self.vm.add_drive(self.test_img, "throttling.iops-total=100")
self.vm.launch()
def tearDown(self):
self.vm.shutdown()
def set_io_throttle(self, device, params):
params["device"] = device
result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params)
self.assert_qmp(result, 'return', {})
def verify_name(self, device, name):
result = self.vm.qmp("query-block")
for r in result["return"]:
if r["device"] == device:
info = r["inserted"]
if name:
self.assertEqual(info["group"], name)
else:
self.assertFalse(info.has_key('group'))
return
raise Exception("No group information found for '%s'" % device)
def test_group_naming(self):
params = {"bps": 0,
"bps_rd": 0,
"bps_wr": 0,
"iops": 0,
"iops_rd": 0,
"iops_wr": 0}
# Check the drives added using the command line.
# The default throttling group name is the device name.
for i in range(self.max_drives):
devname = "drive%d" % i
self.verify_name(devname, devname)
# Clear throttling settings => the group name is gone.
for i in range(self.max_drives):
devname = "drive%d" % i
self.set_io_throttle(devname, params)
self.verify_name(devname, None)
# Set throttling settings using block_set_io_throttle and
# check the default group names.
params["iops"] = 10
for i in range(self.max_drives):
devname = "drive%d" % i
self.set_io_throttle(devname, params)
self.verify_name(devname, devname)
# Set a custom group name for each device
for i in range(3):
devname = "drive%d" % i
groupname = "group%d" % i
params['group'] = groupname
self.set_io_throttle(devname, params)
self.verify_name(devname, groupname)
# Put drive0 in group1 and check that all other devices remain
# unchanged
params['group'] = 'group1'
self.set_io_throttle('drive0', params)
self.verify_name('drive0', 'group1')
for i in range(1, self.max_drives):
devname = "drive%d" % i
groupname = "group%d" % i
self.verify_name(devname, groupname)
# Put drive0 in group2 and check that all other devices remain
# unchanged
params['group'] = 'group2'
self.set_io_throttle('drive0', params)
self.verify_name('drive0', 'group2')
for i in range(1, self.max_drives):
devname = "drive%d" % i
groupname = "group%d" % i
self.verify_name(devname, groupname)
# Clear throttling settings from drive0 check that all other
# devices remain unchanged
params["iops"] = 0
self.set_io_throttle('drive0', params)
self.verify_name('drive0', None)
for i in range(1, self.max_drives):
devname = "drive%d" % i
groupname = "group%d" % i
self.verify_name(devname, groupname)
if __name__ == '__main__':
iotests.main(supported_fmts=["raw"])