mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-23 19:49:43 +00:00
55f3e5cb3b
After introducing parallel async copy requests instead of plain cluster-by-cluster copying loop, we'll have to wait for paused status, as we need to wait for several parallel request. So, let's gently wait instead of just asserting that job already paused. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Message-Id: <20210116214705.822267-12-vsementsov@virtuozzo.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
349 lines
14 KiB
Python
Executable File
349 lines
14 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
# group: rw backing
|
|
#
|
|
# Tests for drive-backup
|
|
#
|
|
# Copyright (C) 2013 Red Hat, Inc.
|
|
#
|
|
# Based on 041.
|
|
#
|
|
# This program is free software; you can redistribute it and/or modify
|
|
# it under the terms of the GNU General Public License as published by
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
# (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
#
|
|
|
|
import time
|
|
import os
|
|
import iotests
|
|
from iotests import qemu_img, qemu_io, create_image
|
|
|
|
backing_img = os.path.join(iotests.test_dir, 'backing.img')
|
|
test_img = os.path.join(iotests.test_dir, 'test.img')
|
|
target_img = os.path.join(iotests.test_dir, 'target.img')
|
|
|
|
def img_create(img, fmt=iotests.imgfmt, size='64M', **kwargs):
|
|
fullname = os.path.join(iotests.test_dir, '%s.%s' % (img, fmt))
|
|
optargs = []
|
|
for k,v in kwargs.items():
|
|
optargs = optargs + ['-o', '%s=%s' % (k,v)]
|
|
args = ['create', '-f', fmt] + optargs + [fullname, size]
|
|
iotests.qemu_img(*args)
|
|
return fullname
|
|
|
|
def try_remove(img):
|
|
try:
|
|
os.remove(img)
|
|
except OSError:
|
|
pass
|
|
|
|
def io_write_patterns(img, patterns):
|
|
for pattern in patterns:
|
|
iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
|
|
|
|
|
|
class TestSyncModesNoneAndTop(iotests.QMPTestCase):
|
|
image_len = 64 * 1024 * 1024 # MB
|
|
|
|
def setUp(self):
|
|
create_image(backing_img, TestSyncModesNoneAndTop.image_len)
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
'-o', 'backing_file=%s' % backing_img, '-F', 'raw', test_img)
|
|
qemu_io('-c', 'write -P0x41 0 512', test_img)
|
|
qemu_io('-c', 'write -P0xd5 1M 32k', test_img)
|
|
qemu_io('-c', 'write -P0xdc 32M 124k', test_img)
|
|
qemu_io('-c', 'write -P0xdc 67043328 64k', test_img)
|
|
self.vm = iotests.VM().add_drive(test_img)
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
os.remove(test_img)
|
|
os.remove(backing_img)
|
|
try:
|
|
os.remove(target_img)
|
|
except OSError:
|
|
pass
|
|
|
|
def test_complete_top(self):
|
|
self.assert_no_active_block_jobs()
|
|
result = self.vm.qmp('drive-backup', device='drive0', sync='top',
|
|
format=iotests.imgfmt, target=target_img)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed(check_offset=False)
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
self.assertTrue(iotests.compare_images(test_img, target_img),
|
|
'target image does not match source after backup')
|
|
|
|
def test_cancel_sync_none(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('drive-backup', device='drive0',
|
|
sync='none', target=target_img)
|
|
self.assert_qmp(result, 'return', {})
|
|
time.sleep(1)
|
|
self.vm.hmp_qemu_io('drive0', 'write -P0x5e 0 512')
|
|
self.vm.hmp_qemu_io('drive0', 'aio_flush')
|
|
# Verify that the original contents exist in the target image.
|
|
|
|
event = self.cancel_and_wait()
|
|
self.assert_qmp(event, 'data/type', 'backup')
|
|
|
|
self.vm.shutdown()
|
|
time.sleep(1)
|
|
self.assertEqual(-1, qemu_io('-c', 'read -P0x41 0 512', target_img).find("verification failed"))
|
|
|
|
class TestBeforeWriteNotifier(iotests.QMPTestCase):
|
|
def setUp(self):
|
|
self.vm = iotests.VM().add_drive_raw("file=blkdebug::null-co://,id=drive0,align=65536,driver=blkdebug")
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
os.remove(target_img)
|
|
|
|
def test_before_write_notifier(self):
|
|
self.vm.pause_drive("drive0")
|
|
result = self.vm.qmp('drive-backup', device='drive0',
|
|
sync='full', target=target_img,
|
|
format="file", speed=1)
|
|
self.assert_qmp(result, 'return', {})
|
|
result = self.vm.qmp('block-job-pause', device="drive0")
|
|
self.assert_qmp(result, 'return', {})
|
|
# Speed is low enough that this must be an uncopied range, which will
|
|
# trigger the before write notifier
|
|
self.vm.hmp_qemu_io('drive0', 'aio_write -P 1 512512 512')
|
|
self.vm.resume_drive("drive0")
|
|
result = self.vm.qmp('block-job-resume', device="drive0")
|
|
self.assert_qmp(result, 'return', {})
|
|
event = self.cancel_and_wait()
|
|
self.assert_qmp(event, 'data/type', 'backup')
|
|
|
|
class BackupTest(iotests.QMPTestCase):
|
|
def setUp(self):
|
|
self.vm = iotests.VM()
|
|
self.test_img = img_create('test')
|
|
self.dest_img = img_create('dest')
|
|
self.dest_img2 = img_create('dest2')
|
|
self.ref_img = img_create('ref')
|
|
self.vm.add_drive(self.test_img)
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
try_remove(self.test_img)
|
|
try_remove(self.dest_img)
|
|
try_remove(self.dest_img2)
|
|
try_remove(self.ref_img)
|
|
|
|
def hmp_io_writes(self, drive, patterns):
|
|
for pattern in patterns:
|
|
self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
|
|
self.vm.hmp_qemu_io(drive, 'flush')
|
|
|
|
def qmp_backup_and_wait(self, cmd='drive-backup', serror=None,
|
|
aerror=None, **kwargs):
|
|
if not self.qmp_backup(cmd, serror, **kwargs):
|
|
return False
|
|
return self.qmp_backup_wait(kwargs['device'], aerror)
|
|
|
|
def qmp_backup(self, cmd='drive-backup',
|
|
error=None, **kwargs):
|
|
self.assertTrue('device' in kwargs)
|
|
res = self.vm.qmp(cmd, **kwargs)
|
|
if error:
|
|
self.assert_qmp(res, 'error/desc', error)
|
|
return False
|
|
self.assert_qmp(res, 'return', {})
|
|
return True
|
|
|
|
def qmp_backup_wait(self, device, error=None):
|
|
event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
|
|
match={'data': {'device': device}})
|
|
self.assertNotEqual(event, None)
|
|
try:
|
|
failure = self.dictpath(event, 'data/error')
|
|
except AssertionError:
|
|
# Backup succeeded.
|
|
self.assert_qmp(event, 'data/offset', event['data']['len'])
|
|
return True
|
|
else:
|
|
# Failure.
|
|
self.assert_qmp(event, 'data/error', qerror)
|
|
return False
|
|
|
|
def test_overlapping_writes(self):
|
|
# Write something to back up
|
|
self.hmp_io_writes('drive0', [('42', '0M', '2M')])
|
|
|
|
# Create a reference backup
|
|
self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
|
|
sync='full', target=self.ref_img,
|
|
auto_dismiss=False)
|
|
res = self.vm.qmp('block-job-dismiss', id='drive0')
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
# Now to the test backup: We simulate the following guest
|
|
# writes:
|
|
# (1) [1M + 64k, 1M + 128k): Afterwards, everything in that
|
|
# area should be in the target image, and we must not copy
|
|
# it again (because the source image has changed now)
|
|
# (64k is the job's cluster size)
|
|
# (2) [1M, 2M): The backup job must not get overeager. It
|
|
# must copy [1M, 1M + 64k) and [1M + 128k, 2M) separately,
|
|
# but not the area in between.
|
|
|
|
self.qmp_backup(device='drive0', format=iotests.imgfmt, sync='full',
|
|
target=self.dest_img, speed=1, auto_dismiss=False)
|
|
|
|
self.hmp_io_writes('drive0', [('23', '%ik' % (1024 + 64), '64k'),
|
|
('66', '1M', '1M')])
|
|
|
|
# Let the job complete
|
|
res = self.vm.qmp('block-job-set-speed', device='drive0', speed=0)
|
|
self.assert_qmp(res, 'return', {})
|
|
self.qmp_backup_wait('drive0')
|
|
res = self.vm.qmp('block-job-dismiss', id='drive0')
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
self.assertTrue(iotests.compare_images(self.ref_img, self.dest_img),
|
|
'target image does not match reference image')
|
|
|
|
def test_dismiss_false(self):
|
|
res = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(res, 'return', [])
|
|
self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
|
|
sync='full', target=self.dest_img,
|
|
auto_dismiss=True)
|
|
res = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(res, 'return', [])
|
|
|
|
def test_dismiss_true(self):
|
|
res = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(res, 'return', [])
|
|
self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
|
|
sync='full', target=self.dest_img,
|
|
auto_dismiss=False)
|
|
res = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(res, 'return[0]/status', 'concluded')
|
|
res = self.vm.qmp('block-job-dismiss', id='drive0')
|
|
self.assert_qmp(res, 'return', {})
|
|
res = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(res, 'return', [])
|
|
|
|
def test_dismiss_bad_id(self):
|
|
res = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(res, 'return', [])
|
|
res = self.vm.qmp('block-job-dismiss', id='foobar')
|
|
self.assert_qmp(res, 'error/class', 'DeviceNotActive')
|
|
|
|
def test_dismiss_collision(self):
|
|
res = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(res, 'return', [])
|
|
self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
|
|
sync='full', target=self.dest_img,
|
|
auto_dismiss=False)
|
|
res = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(res, 'return[0]/status', 'concluded')
|
|
# Leave zombie job un-dismissed, observe a failure:
|
|
res = self.qmp_backup_and_wait(serror="Job ID 'drive0' already in use",
|
|
device='drive0', format=iotests.imgfmt,
|
|
sync='full', target=self.dest_img2,
|
|
auto_dismiss=False)
|
|
self.assertEqual(res, False)
|
|
# OK, dismiss the zombie.
|
|
res = self.vm.qmp('block-job-dismiss', id='drive0')
|
|
self.assert_qmp(res, 'return', {})
|
|
res = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(res, 'return', [])
|
|
# Ensure it's really gone.
|
|
self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
|
|
sync='full', target=self.dest_img2,
|
|
auto_dismiss=False)
|
|
|
|
def dismissal_failure(self, dismissal_opt):
|
|
res = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(res, 'return', [])
|
|
# Give blkdebug something to chew on
|
|
self.hmp_io_writes('drive0',
|
|
(('0x9a', 0, 512),
|
|
('0x55', '8M', '352k'),
|
|
('0x78', '15872k', '1M')))
|
|
# Add destination node via blkdebug
|
|
res = self.vm.qmp('blockdev-add',
|
|
node_name='target0',
|
|
driver=iotests.imgfmt,
|
|
file={
|
|
'driver': 'blkdebug',
|
|
'image': {
|
|
'driver': 'file',
|
|
'filename': self.dest_img
|
|
},
|
|
'inject-error': [{
|
|
'event': 'write_aio',
|
|
'errno': 5,
|
|
'immediately': False,
|
|
'once': True
|
|
}],
|
|
})
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
res = self.qmp_backup(cmd='blockdev-backup',
|
|
device='drive0', target='target0',
|
|
on_target_error='stop',
|
|
sync='full',
|
|
auto_dismiss=dismissal_opt)
|
|
self.assertTrue(res)
|
|
event = self.vm.event_wait(name="BLOCK_JOB_ERROR",
|
|
match={'data': {'device': 'drive0'}})
|
|
self.assertNotEqual(event, None)
|
|
# OK, job should pause, but it can't do it immediately, as it can't
|
|
# cancel other parallel requests (which didn't fail)
|
|
with iotests.Timeout(60, "Timeout waiting for backup actually paused"):
|
|
while True:
|
|
res = self.vm.qmp('query-block-jobs')
|
|
if res['return'][0]['status'] == 'paused':
|
|
break
|
|
self.assert_qmp(res, 'return[0]/status', 'paused')
|
|
res = self.vm.qmp('block-job-dismiss', id='drive0')
|
|
self.assert_qmp(res, 'error/desc',
|
|
"Job 'drive0' in state 'paused' cannot accept"
|
|
" command verb 'dismiss'")
|
|
res = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(res, 'return[0]/status', 'paused')
|
|
# OK, unstick job and move forward.
|
|
res = self.vm.qmp('block-job-resume', device='drive0')
|
|
self.assert_qmp(res, 'return', {})
|
|
# And now we need to wait for it to conclude;
|
|
res = self.qmp_backup_wait(device='drive0')
|
|
self.assertTrue(res)
|
|
if not dismissal_opt:
|
|
# Job should now be languishing:
|
|
res = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(res, 'return[0]/status', 'concluded')
|
|
res = self.vm.qmp('block-job-dismiss', id='drive0')
|
|
self.assert_qmp(res, 'return', {})
|
|
res = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(res, 'return', [])
|
|
|
|
def test_dismiss_premature(self):
|
|
self.dismissal_failure(False)
|
|
|
|
def test_dismiss_erroneous(self):
|
|
self.dismissal_failure(True)
|
|
|
|
if __name__ == '__main__':
|
|
iotests.main(supported_fmts=['qcow2', 'qed'],
|
|
supported_protocols=['file'])
|