2005-04-27 20:17:58 +00:00
|
|
|
/*
|
2009-08-31 19:42:16 +00:00
|
|
|
* Block driver for Connectix / Microsoft Virtual PC images
|
2007-09-16 21:08:06 +00:00
|
|
|
*
|
2005-04-27 20:17:58 +00:00
|
|
|
* Copyright (c) 2005 Alex Beregszaszi
|
2009-01-26 20:27:02 +00:00
|
|
|
* Copyright (c) 2009 Kevin Wolf <kwolf@suse.de>
|
2007-09-16 21:08:06 +00:00
|
|
|
*
|
2005-04-27 20:17:58 +00:00
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
2016-01-18 18:01:42 +00:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 08:01:28 +00:00
|
|
|
#include "qapi/error.h"
|
2007-11-11 02:51:17 +00:00
|
|
|
#include "qemu-common.h"
|
2012-12-17 17:19:44 +00:00
|
|
|
#include "block/block_int.h"
|
2016-03-08 14:57:05 +00:00
|
|
|
#include "sysemu/block-backend.h"
|
2012-12-17 17:20:00 +00:00
|
|
|
#include "qemu/module.h"
|
2012-12-17 17:19:50 +00:00
|
|
|
#include "migration/migration.h"
|
2012-11-02 15:54:24 +00:00
|
|
|
#if defined(CONFIG_UUID)
|
|
|
|
#include <uuid/uuid.h>
|
|
|
|
#endif
|
2005-04-27 20:17:58 +00:00
|
|
|
|
|
|
|
/**************************************************************/
|
|
|
|
|
|
|
|
#define HEADER_SIZE 512
|
|
|
|
|
|
|
|
//#define CACHE
|
|
|
|
|
2009-01-26 20:26:49 +00:00
|
|
|
enum vhd_type {
|
|
|
|
VHD_FIXED = 2,
|
|
|
|
VHD_DYNAMIC = 3,
|
|
|
|
VHD_DIFFERENCING = 4,
|
|
|
|
};
|
|
|
|
|
2009-01-26 20:27:06 +00:00
|
|
|
// Seconds since Jan 1, 2000 0:00:00 (UTC)
|
|
|
|
#define VHD_TIMESTAMP_BASE 946684800
|
|
|
|
|
block/vpc: give option to force the current_size field in .bdrv_create
When QEMU creates a VHD image, it goes by the original spec,
calculating the current_size based on the nearest CHS geometry (with an
exception for disks > 127GB).
Apparently, Azure will only allow images that are sized to the nearest
MB, and the current_size as calculated from CHS cannot guarantee that.
Allow QEMU to create images similar to how Hyper-V creates images, by
setting current_size to the specified virtual disk size. This
introduces an option, force_size, to be passed to the vpc format during
image creation, e.g.:
qemu-img convert -f raw -o force_size -O vpc test.img test.vhd
When using the "force_size" option, the creator app field used by
QEMU will be "qem2" instead of "qemu", to indicate the difference.
In light of this, we also add parsing of the "qem2" field during
vpc_open.
Bug reference: https://bugs.launchpad.net/qemu/+bug/1490611
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:29 +00:00
|
|
|
#define VHD_CHS_MAX_C 65535LL
|
|
|
|
#define VHD_CHS_MAX_H 16
|
|
|
|
#define VHD_CHS_MAX_S 255
|
|
|
|
|
2014-03-26 12:05:36 +00:00
|
|
|
#define VHD_MAX_SECTORS (65535LL * 255 * 255)
|
block/vpc: give option to force the current_size field in .bdrv_create
When QEMU creates a VHD image, it goes by the original spec,
calculating the current_size based on the nearest CHS geometry (with an
exception for disks > 127GB).
Apparently, Azure will only allow images that are sized to the nearest
MB, and the current_size as calculated from CHS cannot guarantee that.
Allow QEMU to create images similar to how Hyper-V creates images, by
setting current_size to the specified virtual disk size. This
introduces an option, force_size, to be passed to the vpc format during
image creation, e.g.:
qemu-img convert -f raw -o force_size -O vpc test.img test.vhd
When using the "force_size" option, the creator app field used by
QEMU will be "qem2" instead of "qemu", to indicate the difference.
In light of this, we also add parsing of the "qem2" field during
vpc_open.
Bug reference: https://bugs.launchpad.net/qemu/+bug/1490611
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:29 +00:00
|
|
|
#define VHD_MAX_GEOMETRY (VHD_CHS_MAX_C * VHD_CHS_MAX_H * VHD_CHS_MAX_S)
|
|
|
|
|
|
|
|
#define VPC_OPT_FORCE_SIZE "force_size"
|
2014-03-26 12:05:36 +00:00
|
|
|
|
2005-04-27 20:17:58 +00:00
|
|
|
// always big-endian
|
2013-09-25 16:08:49 +00:00
|
|
|
typedef struct vhd_footer {
|
2009-01-26 20:26:49 +00:00
|
|
|
char creator[8]; // "conectix"
|
|
|
|
uint32_t features;
|
|
|
|
uint32_t version;
|
|
|
|
|
|
|
|
// Offset of next header structure, 0xFFFFFFFF if none
|
|
|
|
uint64_t data_offset;
|
|
|
|
|
|
|
|
// Seconds since Jan 1, 2000 0:00:00 (UTC)
|
|
|
|
uint32_t timestamp;
|
|
|
|
|
|
|
|
char creator_app[4]; // "vpc "
|
|
|
|
uint16_t major;
|
|
|
|
uint16_t minor;
|
|
|
|
char creator_os[4]; // "Wi2k"
|
|
|
|
|
|
|
|
uint64_t orig_size;
|
2015-03-03 10:41:55 +00:00
|
|
|
uint64_t current_size;
|
2009-01-26 20:26:49 +00:00
|
|
|
|
|
|
|
uint16_t cyls;
|
|
|
|
uint8_t heads;
|
|
|
|
uint8_t secs_per_cyl;
|
|
|
|
|
|
|
|
uint32_t type;
|
|
|
|
|
|
|
|
// Checksum of the Hard Disk Footer ("one's complement of the sum of all
|
|
|
|
// the bytes in the footer without the checksum field")
|
|
|
|
uint32_t checksum;
|
|
|
|
|
|
|
|
// UUID used to identify a parent hard disk (backing file)
|
|
|
|
uint8_t uuid[16];
|
|
|
|
|
|
|
|
uint8_t in_saved_state;
|
2013-09-25 16:08:49 +00:00
|
|
|
} QEMU_PACKED VHDFooter;
|
2009-01-26 20:26:46 +00:00
|
|
|
|
2013-09-25 16:08:49 +00:00
|
|
|
typedef struct vhd_dyndisk_header {
|
2009-01-26 20:26:49 +00:00
|
|
|
char magic[8]; // "cxsparse"
|
|
|
|
|
|
|
|
// Offset of next header structure, 0xFFFFFFFF if none
|
|
|
|
uint64_t data_offset;
|
|
|
|
|
|
|
|
// Offset of the Block Allocation Table (BAT)
|
|
|
|
uint64_t table_offset;
|
|
|
|
|
|
|
|
uint32_t version;
|
|
|
|
uint32_t max_table_entries; // 32bit/entry
|
|
|
|
|
|
|
|
// 2 MB by default, must be a power of two
|
|
|
|
uint32_t block_size;
|
|
|
|
|
|
|
|
uint32_t checksum;
|
|
|
|
uint8_t parent_uuid[16];
|
|
|
|
uint32_t parent_timestamp;
|
|
|
|
uint32_t reserved;
|
|
|
|
|
|
|
|
// Backing file name (in UTF-16)
|
|
|
|
uint8_t parent_name[512];
|
|
|
|
|
|
|
|
struct {
|
|
|
|
uint32_t platform;
|
|
|
|
uint32_t data_space;
|
|
|
|
uint32_t data_length;
|
|
|
|
uint32_t reserved;
|
|
|
|
uint64_t data_offset;
|
|
|
|
} parent_locator[8];
|
2013-09-25 16:08:49 +00:00
|
|
|
} QEMU_PACKED VHDDynDiskHeader;
|
2005-04-27 20:17:58 +00:00
|
|
|
|
|
|
|
typedef struct BDRVVPCState {
|
2011-10-20 11:16:21 +00:00
|
|
|
CoMutex lock;
|
2009-01-26 20:27:02 +00:00
|
|
|
uint8_t footer_buf[HEADER_SIZE];
|
|
|
|
uint64_t free_data_block_offset;
|
2009-01-26 20:26:49 +00:00
|
|
|
int max_table_entries;
|
2005-04-27 20:17:58 +00:00
|
|
|
uint32_t *pagetable;
|
2009-01-26 20:27:02 +00:00
|
|
|
uint64_t bat_offset;
|
|
|
|
uint64_t last_bitmap_offset;
|
2005-04-27 20:17:58 +00:00
|
|
|
|
2009-01-26 20:26:49 +00:00
|
|
|
uint32_t block_size;
|
2009-01-26 20:27:02 +00:00
|
|
|
uint32_t bitmap_size;
|
block/vpc: choose size calculation method based on creator_app field
The VHD file format is used by both Virtual PC, and Hyper-V. However,
how the virtual disk size is calculated varies between the two.
Virtual PC uses the CHS drive parameters to determine the drive size.
Hyper-V, on the other hand, uses the current_size field in the footer
when determining image size.
This is problematic for a few reasons:
* VHD images from Hyper-V, using CHS calculations, will likely be
trunctated.
* If we just rely always on current_size, then QEMU may have data
compatibility issues with Virtual PC (we may write too much data
into a VHD file to be used by Virtual PC, for instance).
* Existing VHD images created by QEMU have used the CHS calculations,
except for images exceeding the 127GB limit. We want to remain
compatible with our own generated images.
Luckily, the VHD specification defines a 'Creator App' field, that is
used to indicate what software created the VHD file.
This patch does two things:
1. Uses the 'Creator App' field to help determine how to calculate
size, and
2. Adds a VPC format option 'force_size_calc', so that the user can
override the 'Creator App' auto-detection, in case there exist
VHD images with unknown or contradictory 'Creator App' entries.
N.B.: We currently use the maximum CHS value as an indication to use the
current_size field. This patch does not change that, even with the
'force_size_calc' option.
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:27 +00:00
|
|
|
bool force_use_chs;
|
|
|
|
bool force_use_sz;
|
2009-01-26 20:27:02 +00:00
|
|
|
|
2005-04-27 20:17:58 +00:00
|
|
|
#ifdef CACHE
|
|
|
|
uint8_t *pageentry_u8;
|
|
|
|
uint32_t *pageentry_u32;
|
|
|
|
uint16_t *pageentry_u16;
|
2007-09-17 08:09:54 +00:00
|
|
|
|
2005-04-27 20:17:58 +00:00
|
|
|
uint64_t last_bitmap;
|
|
|
|
#endif
|
2011-11-22 15:51:12 +00:00
|
|
|
|
|
|
|
Error *migration_blocker;
|
2005-04-27 20:17:58 +00:00
|
|
|
} BDRVVPCState;
|
|
|
|
|
block/vpc: choose size calculation method based on creator_app field
The VHD file format is used by both Virtual PC, and Hyper-V. However,
how the virtual disk size is calculated varies between the two.
Virtual PC uses the CHS drive parameters to determine the drive size.
Hyper-V, on the other hand, uses the current_size field in the footer
when determining image size.
This is problematic for a few reasons:
* VHD images from Hyper-V, using CHS calculations, will likely be
trunctated.
* If we just rely always on current_size, then QEMU may have data
compatibility issues with Virtual PC (we may write too much data
into a VHD file to be used by Virtual PC, for instance).
* Existing VHD images created by QEMU have used the CHS calculations,
except for images exceeding the 127GB limit. We want to remain
compatible with our own generated images.
Luckily, the VHD specification defines a 'Creator App' field, that is
used to indicate what software created the VHD file.
This patch does two things:
1. Uses the 'Creator App' field to help determine how to calculate
size, and
2. Adds a VPC format option 'force_size_calc', so that the user can
override the 'Creator App' auto-detection, in case there exist
VHD images with unknown or contradictory 'Creator App' entries.
N.B.: We currently use the maximum CHS value as an indication to use the
current_size field. This patch does not change that, even with the
'force_size_calc' option.
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:27 +00:00
|
|
|
#define VPC_OPT_SIZE_CALC "force_size_calc"
|
|
|
|
static QemuOptsList vpc_runtime_opts = {
|
|
|
|
.name = "vpc-runtime-opts",
|
|
|
|
.head = QTAILQ_HEAD_INITIALIZER(vpc_runtime_opts.head),
|
|
|
|
.desc = {
|
|
|
|
{
|
|
|
|
.name = VPC_OPT_SIZE_CALC,
|
|
|
|
.type = QEMU_OPT_STRING,
|
|
|
|
.help = "Force disk size calculation to use either CHS geometry, "
|
|
|
|
"or use the disk current_size specified in the VHD footer. "
|
|
|
|
"{chs, current_size}"
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2009-01-26 20:27:06 +00:00
|
|
|
static uint32_t vpc_checksum(uint8_t* buf, size_t size)
|
|
|
|
{
|
|
|
|
uint32_t res = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++)
|
|
|
|
res += buf[i];
|
|
|
|
|
|
|
|
return ~res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-04-27 20:17:58 +00:00
|
|
|
static int vpc_probe(const uint8_t *buf, int buf_size, const char *filename)
|
|
|
|
{
|
2007-12-16 03:16:05 +00:00
|
|
|
if (buf_size >= 8 && !strncmp((char *)buf, "conectix", 8))
|
2005-04-27 20:17:58 +00:00
|
|
|
return 100;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
block/vpc: choose size calculation method based on creator_app field
The VHD file format is used by both Virtual PC, and Hyper-V. However,
how the virtual disk size is calculated varies between the two.
Virtual PC uses the CHS drive parameters to determine the drive size.
Hyper-V, on the other hand, uses the current_size field in the footer
when determining image size.
This is problematic for a few reasons:
* VHD images from Hyper-V, using CHS calculations, will likely be
trunctated.
* If we just rely always on current_size, then QEMU may have data
compatibility issues with Virtual PC (we may write too much data
into a VHD file to be used by Virtual PC, for instance).
* Existing VHD images created by QEMU have used the CHS calculations,
except for images exceeding the 127GB limit. We want to remain
compatible with our own generated images.
Luckily, the VHD specification defines a 'Creator App' field, that is
used to indicate what software created the VHD file.
This patch does two things:
1. Uses the 'Creator App' field to help determine how to calculate
size, and
2. Adds a VPC format option 'force_size_calc', so that the user can
override the 'Creator App' auto-detection, in case there exist
VHD images with unknown or contradictory 'Creator App' entries.
N.B.: We currently use the maximum CHS value as an indication to use the
current_size field. This patch does not change that, even with the
'force_size_calc' option.
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:27 +00:00
|
|
|
static void vpc_parse_options(BlockDriverState *bs, QemuOpts *opts,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
BDRVVPCState *s = bs->opaque;
|
|
|
|
const char *size_calc;
|
|
|
|
|
|
|
|
size_calc = qemu_opt_get(opts, VPC_OPT_SIZE_CALC);
|
|
|
|
|
|
|
|
if (!size_calc) {
|
|
|
|
/* no override, use autodetect only */
|
|
|
|
} else if (!strcmp(size_calc, "current_size")) {
|
|
|
|
s->force_use_sz = true;
|
|
|
|
} else if (!strcmp(size_calc, "chs")) {
|
|
|
|
s->force_use_chs = true;
|
|
|
|
} else {
|
|
|
|
error_setg(errp, "Invalid size calculation mode: '%s'", size_calc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-05 12:22:29 +00:00
|
|
|
static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
|
|
|
|
Error **errp)
|
2005-04-27 20:17:58 +00:00
|
|
|
{
|
|
|
|
BDRVVPCState *s = bs->opaque;
|
2010-04-14 12:17:38 +00:00
|
|
|
int i;
|
2013-09-25 16:08:49 +00:00
|
|
|
VHDFooter *footer;
|
|
|
|
VHDDynDiskHeader *dyndisk_header;
|
block/vpc: choose size calculation method based on creator_app field
The VHD file format is used by both Virtual PC, and Hyper-V. However,
how the virtual disk size is calculated varies between the two.
Virtual PC uses the CHS drive parameters to determine the drive size.
Hyper-V, on the other hand, uses the current_size field in the footer
when determining image size.
This is problematic for a few reasons:
* VHD images from Hyper-V, using CHS calculations, will likely be
trunctated.
* If we just rely always on current_size, then QEMU may have data
compatibility issues with Virtual PC (we may write too much data
into a VHD file to be used by Virtual PC, for instance).
* Existing VHD images created by QEMU have used the CHS calculations,
except for images exceeding the 127GB limit. We want to remain
compatible with our own generated images.
Luckily, the VHD specification defines a 'Creator App' field, that is
used to indicate what software created the VHD file.
This patch does two things:
1. Uses the 'Creator App' field to help determine how to calculate
size, and
2. Adds a VPC format option 'force_size_calc', so that the user can
override the 'Creator App' auto-detection, in case there exist
VHD images with unknown or contradictory 'Creator App' entries.
N.B.: We currently use the maximum CHS value as an indication to use the
current_size field. This patch does not change that, even with the
'force_size_calc' option.
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:27 +00:00
|
|
|
QemuOpts *opts = NULL;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
bool use_chs;
|
2009-01-26 20:26:46 +00:00
|
|
|
uint8_t buf[HEADER_SIZE];
|
2009-01-26 20:27:06 +00:00
|
|
|
uint32_t checksum;
|
2014-03-26 12:05:36 +00:00
|
|
|
uint64_t computed_size;
|
2015-07-24 14:26:51 +00:00
|
|
|
uint64_t pagetable_size;
|
2012-02-06 16:22:30 +00:00
|
|
|
int disk_type = VHD_DYNAMIC;
|
2013-01-25 16:07:29 +00:00
|
|
|
int ret;
|
2005-04-27 20:17:58 +00:00
|
|
|
|
block/vpc: choose size calculation method based on creator_app field
The VHD file format is used by both Virtual PC, and Hyper-V. However,
how the virtual disk size is calculated varies between the two.
Virtual PC uses the CHS drive parameters to determine the drive size.
Hyper-V, on the other hand, uses the current_size field in the footer
when determining image size.
This is problematic for a few reasons:
* VHD images from Hyper-V, using CHS calculations, will likely be
trunctated.
* If we just rely always on current_size, then QEMU may have data
compatibility issues with Virtual PC (we may write too much data
into a VHD file to be used by Virtual PC, for instance).
* Existing VHD images created by QEMU have used the CHS calculations,
except for images exceeding the 127GB limit. We want to remain
compatible with our own generated images.
Luckily, the VHD specification defines a 'Creator App' field, that is
used to indicate what software created the VHD file.
This patch does two things:
1. Uses the 'Creator App' field to help determine how to calculate
size, and
2. Adds a VPC format option 'force_size_calc', so that the user can
override the 'Creator App' auto-detection, in case there exist
VHD images with unknown or contradictory 'Creator App' entries.
N.B.: We currently use the maximum CHS value as an indication to use the
current_size field. This patch does not change that, even with the
'force_size_calc' option.
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:27 +00:00
|
|
|
opts = qemu_opts_create(&vpc_runtime_opts, NULL, 0, &error_abort);
|
|
|
|
qemu_opts_absorb_qdict(opts, options, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
vpc_parse_options(bs, opts, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2015-06-16 12:19:22 +00:00
|
|
|
ret = bdrv_pread(bs->file->bs, 0, s->footer_buf, HEADER_SIZE);
|
2013-01-25 16:07:29 +00:00
|
|
|
if (ret < 0) {
|
2005-04-27 20:17:58 +00:00
|
|
|
goto fail;
|
2013-01-25 16:07:29 +00:00
|
|
|
}
|
2005-04-27 20:17:58 +00:00
|
|
|
|
2013-09-25 16:08:49 +00:00
|
|
|
footer = (VHDFooter *) s->footer_buf;
|
2012-02-06 16:22:30 +00:00
|
|
|
if (strncmp(footer->creator, "conectix", 8)) {
|
2015-06-16 12:19:22 +00:00
|
|
|
int64_t offset = bdrv_getlength(bs->file->bs);
|
2013-01-25 16:07:29 +00:00
|
|
|
if (offset < 0) {
|
|
|
|
ret = offset;
|
|
|
|
goto fail;
|
|
|
|
} else if (offset < HEADER_SIZE) {
|
|
|
|
ret = -EINVAL;
|
2012-02-06 16:22:30 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2013-01-25 16:07:29 +00:00
|
|
|
|
2012-02-06 16:22:30 +00:00
|
|
|
/* If a fixed disk, the footer is found only at the end of the file */
|
2015-06-16 12:19:22 +00:00
|
|
|
ret = bdrv_pread(bs->file->bs, offset-HEADER_SIZE, s->footer_buf,
|
2013-01-25 16:07:29 +00:00
|
|
|
HEADER_SIZE);
|
|
|
|
if (ret < 0) {
|
2012-02-06 16:22:30 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (strncmp(footer->creator, "conectix", 8)) {
|
2014-02-17 13:44:06 +00:00
|
|
|
error_setg(errp, "invalid VPC image");
|
|
|
|
ret = -EINVAL;
|
2012-02-06 16:22:30 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
disk_type = VHD_FIXED;
|
|
|
|
}
|
2005-04-27 20:17:58 +00:00
|
|
|
|
2009-01-26 20:27:06 +00:00
|
|
|
checksum = be32_to_cpu(footer->checksum);
|
|
|
|
footer->checksum = 0;
|
|
|
|
if (vpc_checksum(s->footer_buf, HEADER_SIZE) != checksum)
|
|
|
|
fprintf(stderr, "block-vpc: The header checksum of '%s' is "
|
2010-04-14 12:17:38 +00:00
|
|
|
"incorrect.\n", bs->filename);
|
2009-01-26 20:27:06 +00:00
|
|
|
|
2012-03-13 14:38:13 +00:00
|
|
|
/* Write 'checksum' back to footer, or else will leave it with zero. */
|
2014-09-23 09:40:55 +00:00
|
|
|
footer->checksum = cpu_to_be32(checksum);
|
2012-03-13 14:38:13 +00:00
|
|
|
|
2013-02-12 11:25:15 +00:00
|
|
|
// The visible size of a image in Virtual PC depends on the geometry
|
|
|
|
// rather than on the size stored in the footer (the size in the footer
|
|
|
|
// is too large usually)
|
|
|
|
bs->total_sectors = (int64_t)
|
|
|
|
be16_to_cpu(footer->cyls) * footer->heads * footer->secs_per_cyl;
|
2009-01-26 20:26:54 +00:00
|
|
|
|
block/vpc: choose size calculation method based on creator_app field
The VHD file format is used by both Virtual PC, and Hyper-V. However,
how the virtual disk size is calculated varies between the two.
Virtual PC uses the CHS drive parameters to determine the drive size.
Hyper-V, on the other hand, uses the current_size field in the footer
when determining image size.
This is problematic for a few reasons:
* VHD images from Hyper-V, using CHS calculations, will likely be
trunctated.
* If we just rely always on current_size, then QEMU may have data
compatibility issues with Virtual PC (we may write too much data
into a VHD file to be used by Virtual PC, for instance).
* Existing VHD images created by QEMU have used the CHS calculations,
except for images exceeding the 127GB limit. We want to remain
compatible with our own generated images.
Luckily, the VHD specification defines a 'Creator App' field, that is
used to indicate what software created the VHD file.
This patch does two things:
1. Uses the 'Creator App' field to help determine how to calculate
size, and
2. Adds a VPC format option 'force_size_calc', so that the user can
override the 'Creator App' auto-detection, in case there exist
VHD images with unknown or contradictory 'Creator App' entries.
N.B.: We currently use the maximum CHS value as an indication to use the
current_size field. This patch does not change that, even with the
'force_size_calc' option.
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:27 +00:00
|
|
|
/* Microsoft Virtual PC and Microsoft Hyper-V produce and read
|
|
|
|
* VHD image sizes differently. VPC will rely on CHS geometry,
|
|
|
|
* while Hyper-V and disk2vhd use the size specified in the footer.
|
|
|
|
*
|
|
|
|
* We use a couple of approaches to try and determine the correct method:
|
|
|
|
* look at the Creator App field, and look for images that have CHS
|
|
|
|
* geometry that is the maximum value.
|
|
|
|
*
|
|
|
|
* If the CHS geometry is the maximum CHS geometry, then we assume that
|
|
|
|
* the size is the footer->current_size to avoid truncation. Otherwise,
|
|
|
|
* we follow the table based on footer->creator_app:
|
|
|
|
*
|
|
|
|
* Known creator apps:
|
|
|
|
* 'vpc ' : CHS Virtual PC (uses disk geometry)
|
|
|
|
* 'qemu' : CHS QEMU (uses disk geometry)
|
block/vpc: give option to force the current_size field in .bdrv_create
When QEMU creates a VHD image, it goes by the original spec,
calculating the current_size based on the nearest CHS geometry (with an
exception for disks > 127GB).
Apparently, Azure will only allow images that are sized to the nearest
MB, and the current_size as calculated from CHS cannot guarantee that.
Allow QEMU to create images similar to how Hyper-V creates images, by
setting current_size to the specified virtual disk size. This
introduces an option, force_size, to be passed to the vpc format during
image creation, e.g.:
qemu-img convert -f raw -o force_size -O vpc test.img test.vhd
When using the "force_size" option, the creator app field used by
QEMU will be "qem2" instead of "qemu", to indicate the difference.
In light of this, we also add parsing of the "qem2" field during
vpc_open.
Bug reference: https://bugs.launchpad.net/qemu/+bug/1490611
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:29 +00:00
|
|
|
* 'qem2' : current_size QEMU (uses current_size)
|
block/vpc: choose size calculation method based on creator_app field
The VHD file format is used by both Virtual PC, and Hyper-V. However,
how the virtual disk size is calculated varies between the two.
Virtual PC uses the CHS drive parameters to determine the drive size.
Hyper-V, on the other hand, uses the current_size field in the footer
when determining image size.
This is problematic for a few reasons:
* VHD images from Hyper-V, using CHS calculations, will likely be
trunctated.
* If we just rely always on current_size, then QEMU may have data
compatibility issues with Virtual PC (we may write too much data
into a VHD file to be used by Virtual PC, for instance).
* Existing VHD images created by QEMU have used the CHS calculations,
except for images exceeding the 127GB limit. We want to remain
compatible with our own generated images.
Luckily, the VHD specification defines a 'Creator App' field, that is
used to indicate what software created the VHD file.
This patch does two things:
1. Uses the 'Creator App' field to help determine how to calculate
size, and
2. Adds a VPC format option 'force_size_calc', so that the user can
override the 'Creator App' auto-detection, in case there exist
VHD images with unknown or contradictory 'Creator App' entries.
N.B.: We currently use the maximum CHS value as an indication to use the
current_size field. This patch does not change that, even with the
'force_size_calc' option.
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:27 +00:00
|
|
|
* 'win ' : current_size Hyper-V
|
|
|
|
* 'd2v ' : current_size Disk2vhd
|
2016-03-23 03:33:39 +00:00
|
|
|
* 'tap\0' : current_size XenServer
|
block/vpc: choose size calculation method based on creator_app field
The VHD file format is used by both Virtual PC, and Hyper-V. However,
how the virtual disk size is calculated varies between the two.
Virtual PC uses the CHS drive parameters to determine the drive size.
Hyper-V, on the other hand, uses the current_size field in the footer
when determining image size.
This is problematic for a few reasons:
* VHD images from Hyper-V, using CHS calculations, will likely be
trunctated.
* If we just rely always on current_size, then QEMU may have data
compatibility issues with Virtual PC (we may write too much data
into a VHD file to be used by Virtual PC, for instance).
* Existing VHD images created by QEMU have used the CHS calculations,
except for images exceeding the 127GB limit. We want to remain
compatible with our own generated images.
Luckily, the VHD specification defines a 'Creator App' field, that is
used to indicate what software created the VHD file.
This patch does two things:
1. Uses the 'Creator App' field to help determine how to calculate
size, and
2. Adds a VPC format option 'force_size_calc', so that the user can
override the 'Creator App' auto-detection, in case there exist
VHD images with unknown or contradictory 'Creator App' entries.
N.B.: We currently use the maximum CHS value as an indication to use the
current_size field. This patch does not change that, even with the
'force_size_calc' option.
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:27 +00:00
|
|
|
*
|
|
|
|
* The user can override the table values via drive options, however
|
|
|
|
* even with an override we will still use current_size for images
|
|
|
|
* that have CHS geometry of the maximum size.
|
|
|
|
*/
|
|
|
|
use_chs = (!!strncmp(footer->creator_app, "win ", 4) &&
|
block/vpc: give option to force the current_size field in .bdrv_create
When QEMU creates a VHD image, it goes by the original spec,
calculating the current_size based on the nearest CHS geometry (with an
exception for disks > 127GB).
Apparently, Azure will only allow images that are sized to the nearest
MB, and the current_size as calculated from CHS cannot guarantee that.
Allow QEMU to create images similar to how Hyper-V creates images, by
setting current_size to the specified virtual disk size. This
introduces an option, force_size, to be passed to the vpc format during
image creation, e.g.:
qemu-img convert -f raw -o force_size -O vpc test.img test.vhd
When using the "force_size" option, the creator app field used by
QEMU will be "qem2" instead of "qemu", to indicate the difference.
In light of this, we also add parsing of the "qem2" field during
vpc_open.
Bug reference: https://bugs.launchpad.net/qemu/+bug/1490611
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:29 +00:00
|
|
|
!!strncmp(footer->creator_app, "qem2", 4) &&
|
2016-03-23 03:33:39 +00:00
|
|
|
!!strncmp(footer->creator_app, "d2v ", 4) &&
|
|
|
|
!!memcmp(footer->creator_app, "tap", 4)) || s->force_use_chs;
|
block/vpc: choose size calculation method based on creator_app field
The VHD file format is used by both Virtual PC, and Hyper-V. However,
how the virtual disk size is calculated varies between the two.
Virtual PC uses the CHS drive parameters to determine the drive size.
Hyper-V, on the other hand, uses the current_size field in the footer
when determining image size.
This is problematic for a few reasons:
* VHD images from Hyper-V, using CHS calculations, will likely be
trunctated.
* If we just rely always on current_size, then QEMU may have data
compatibility issues with Virtual PC (we may write too much data
into a VHD file to be used by Virtual PC, for instance).
* Existing VHD images created by QEMU have used the CHS calculations,
except for images exceeding the 127GB limit. We want to remain
compatible with our own generated images.
Luckily, the VHD specification defines a 'Creator App' field, that is
used to indicate what software created the VHD file.
This patch does two things:
1. Uses the 'Creator App' field to help determine how to calculate
size, and
2. Adds a VPC format option 'force_size_calc', so that the user can
override the 'Creator App' auto-detection, in case there exist
VHD images with unknown or contradictory 'Creator App' entries.
N.B.: We currently use the maximum CHS value as an indication to use the
current_size field. This patch does not change that, even with the
'force_size_calc' option.
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:27 +00:00
|
|
|
|
|
|
|
if (!use_chs || bs->total_sectors == VHD_MAX_GEOMETRY || s->force_use_sz) {
|
2015-03-03 10:41:55 +00:00
|
|
|
bs->total_sectors = be64_to_cpu(footer->current_size) /
|
block/vpc: choose size calculation method based on creator_app field
The VHD file format is used by both Virtual PC, and Hyper-V. However,
how the virtual disk size is calculated varies between the two.
Virtual PC uses the CHS drive parameters to determine the drive size.
Hyper-V, on the other hand, uses the current_size field in the footer
when determining image size.
This is problematic for a few reasons:
* VHD images from Hyper-V, using CHS calculations, will likely be
trunctated.
* If we just rely always on current_size, then QEMU may have data
compatibility issues with Virtual PC (we may write too much data
into a VHD file to be used by Virtual PC, for instance).
* Existing VHD images created by QEMU have used the CHS calculations,
except for images exceeding the 127GB limit. We want to remain
compatible with our own generated images.
Luckily, the VHD specification defines a 'Creator App' field, that is
used to indicate what software created the VHD file.
This patch does two things:
1. Uses the 'Creator App' field to help determine how to calculate
size, and
2. Adds a VPC format option 'force_size_calc', so that the user can
override the 'Creator App' auto-detection, in case there exist
VHD images with unknown or contradictory 'Creator App' entries.
N.B.: We currently use the maximum CHS value as an indication to use the
current_size field. This patch does not change that, even with the
'force_size_calc' option.
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:27 +00:00
|
|
|
BDRV_SECTOR_SIZE;
|
2013-10-21 14:00:18 +00:00
|
|
|
}
|
|
|
|
|
2012-10-31 02:59:32 +00:00
|
|
|
/* Allow a maximum disk size of approximately 2 TB */
|
2014-03-26 12:05:36 +00:00
|
|
|
if (bs->total_sectors >= VHD_MAX_SECTORS) {
|
2013-01-25 16:07:29 +00:00
|
|
|
ret = -EFBIG;
|
2011-07-25 18:34:35 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2012-02-06 16:22:30 +00:00
|
|
|
if (disk_type == VHD_DYNAMIC) {
|
2015-06-16 12:19:22 +00:00
|
|
|
ret = bdrv_pread(bs->file->bs, be64_to_cpu(footer->data_offset), buf,
|
2013-01-25 16:07:29 +00:00
|
|
|
HEADER_SIZE);
|
|
|
|
if (ret < 0) {
|
2012-02-06 16:22:30 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2009-01-26 20:26:46 +00:00
|
|
|
|
2013-09-25 16:08:49 +00:00
|
|
|
dyndisk_header = (VHDDynDiskHeader *) buf;
|
2005-04-27 20:17:58 +00:00
|
|
|
|
2012-02-06 16:22:30 +00:00
|
|
|
if (strncmp(dyndisk_header->magic, "cxsparse", 8)) {
|
2013-01-25 16:07:29 +00:00
|
|
|
ret = -EINVAL;
|
2012-02-06 16:22:30 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2005-04-27 20:17:58 +00:00
|
|
|
|
2012-02-06 16:22:30 +00:00
|
|
|
s->block_size = be32_to_cpu(dyndisk_header->block_size);
|
2014-03-26 12:05:37 +00:00
|
|
|
if (!is_power_of_2(s->block_size) || s->block_size < BDRV_SECTOR_SIZE) {
|
|
|
|
error_setg(errp, "Invalid block size %" PRIu32, s->block_size);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
2012-02-06 16:22:30 +00:00
|
|
|
s->bitmap_size = ((s->block_size / (8 * 512)) + 511) & ~511;
|
2009-01-26 20:27:02 +00:00
|
|
|
|
2012-02-06 16:22:30 +00:00
|
|
|
s->max_table_entries = be32_to_cpu(dyndisk_header->max_table_entries);
|
2014-03-26 12:05:36 +00:00
|
|
|
|
|
|
|
if ((bs->total_sectors * 512) / s->block_size > 0xffffffffU) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (s->max_table_entries > (VHD_MAX_SECTORS * 512) / s->block_size) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
computed_size = (uint64_t) s->max_table_entries * s->block_size;
|
|
|
|
if (computed_size < bs->total_sectors * 512) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2015-07-24 14:26:51 +00:00
|
|
|
if (s->max_table_entries > SIZE_MAX / 4 ||
|
|
|
|
s->max_table_entries > (int) INT_MAX / 4) {
|
|
|
|
error_setg(errp, "Max Table Entries too large (%" PRId32 ")",
|
|
|
|
s->max_table_entries);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
pagetable_size = (uint64_t) s->max_table_entries * 4;
|
|
|
|
|
2015-06-16 12:19:22 +00:00
|
|
|
s->pagetable = qemu_try_blockalign(bs->file->bs, pagetable_size);
|
2014-05-21 16:08:38 +00:00
|
|
|
if (s->pagetable == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
2009-01-26 20:26:58 +00:00
|
|
|
|
2012-02-06 16:22:30 +00:00
|
|
|
s->bat_offset = be64_to_cpu(dyndisk_header->table_offset);
|
2013-01-25 16:07:29 +00:00
|
|
|
|
2015-06-16 12:19:22 +00:00
|
|
|
ret = bdrv_pread(bs->file->bs, s->bat_offset, s->pagetable,
|
|
|
|
pagetable_size);
|
2013-01-25 16:07:29 +00:00
|
|
|
if (ret < 0) {
|
2012-02-06 16:22:30 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2009-01-26 20:26:58 +00:00
|
|
|
|
2012-02-06 16:22:30 +00:00
|
|
|
s->free_data_block_offset =
|
2015-07-24 14:26:51 +00:00
|
|
|
ROUND_UP(s->bat_offset + pagetable_size, 512);
|
2009-01-26 20:27:02 +00:00
|
|
|
|
2012-02-06 16:22:30 +00:00
|
|
|
for (i = 0; i < s->max_table_entries; i++) {
|
|
|
|
be32_to_cpus(&s->pagetable[i]);
|
|
|
|
if (s->pagetable[i] != 0xFFFFFFFF) {
|
|
|
|
int64_t next = (512 * (int64_t) s->pagetable[i]) +
|
|
|
|
s->bitmap_size + s->block_size;
|
2009-01-26 20:27:02 +00:00
|
|
|
|
2012-02-06 16:22:30 +00:00
|
|
|
if (next > s->free_data_block_offset) {
|
|
|
|
s->free_data_block_offset = next;
|
|
|
|
}
|
|
|
|
}
|
2009-01-26 20:27:02 +00:00
|
|
|
}
|
|
|
|
|
2015-06-16 12:19:22 +00:00
|
|
|
if (s->free_data_block_offset > bdrv_getlength(bs->file->bs)) {
|
2013-10-24 07:16:03 +00:00
|
|
|
error_setg(errp, "block-vpc: free_data_block_offset points after "
|
|
|
|
"the end of file. The image has been truncated.");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2012-02-06 16:22:30 +00:00
|
|
|
s->last_bitmap_offset = (int64_t) -1;
|
2005-04-27 20:17:58 +00:00
|
|
|
|
|
|
|
#ifdef CACHE
|
2012-02-06 16:22:30 +00:00
|
|
|
s->pageentry_u8 = g_malloc(512);
|
|
|
|
s->pageentry_u32 = s->pageentry_u8;
|
|
|
|
s->pageentry_u16 = s->pageentry_u8;
|
|
|
|
s->last_pagetable = -1;
|
2005-04-27 20:17:58 +00:00
|
|
|
#endif
|
2012-02-06 16:22:30 +00:00
|
|
|
}
|
2005-04-27 20:17:58 +00:00
|
|
|
|
2011-10-20 11:16:21 +00:00
|
|
|
qemu_co_mutex_init(&s->lock);
|
2011-11-22 15:51:12 +00:00
|
|
|
|
|
|
|
/* Disable migration when VHD images are used */
|
2015-04-08 09:29:19 +00:00
|
|
|
error_setg(&s->migration_blocker, "The vpc format used by node '%s' "
|
|
|
|
"does not support live migration",
|
|
|
|
bdrv_get_device_or_node_name(bs));
|
2011-11-22 15:51:12 +00:00
|
|
|
migrate_add_blocker(s->migration_blocker);
|
|
|
|
|
2005-04-27 20:17:58 +00:00
|
|
|
return 0;
|
2013-01-25 16:07:29 +00:00
|
|
|
|
|
|
|
fail:
|
2014-03-26 12:05:36 +00:00
|
|
|
qemu_vfree(s->pagetable);
|
2013-01-25 16:07:29 +00:00
|
|
|
#ifdef CACHE
|
|
|
|
g_free(s->pageentry_u8);
|
|
|
|
#endif
|
|
|
|
return ret;
|
2005-04-27 20:17:58 +00:00
|
|
|
}
|
|
|
|
|
2012-09-20 19:13:33 +00:00
|
|
|
static int vpc_reopen_prepare(BDRVReopenState *state,
|
|
|
|
BlockReopenQueue *queue, Error **errp)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-01-26 20:26:58 +00:00
|
|
|
/*
|
|
|
|
* Returns the absolute byte offset of the given sector in the image file.
|
|
|
|
* If the sector is not allocated, -1 is returned instead.
|
2009-01-26 20:27:02 +00:00
|
|
|
*
|
|
|
|
* The parameter write must be 1 if the offset will be used for a write
|
|
|
|
* operation (the block bitmaps is updated then), 0 otherwise.
|
2009-01-26 20:26:58 +00:00
|
|
|
*/
|
2009-01-26 20:27:02 +00:00
|
|
|
static inline int64_t get_sector_offset(BlockDriverState *bs,
|
|
|
|
int64_t sector_num, int write)
|
2005-04-27 20:17:58 +00:00
|
|
|
{
|
|
|
|
BDRVVPCState *s = bs->opaque;
|
|
|
|
uint64_t offset = sector_num * 512;
|
|
|
|
uint64_t bitmap_offset, block_offset;
|
|
|
|
uint32_t pagetable_index, pageentry_index;
|
|
|
|
|
2009-01-26 20:26:49 +00:00
|
|
|
pagetable_index = offset / s->block_size;
|
|
|
|
pageentry_index = (offset % s->block_size) / 512;
|
2007-09-17 08:09:54 +00:00
|
|
|
|
2009-01-26 20:27:02 +00:00
|
|
|
if (pagetable_index >= s->max_table_entries || s->pagetable[pagetable_index] == 0xffffffff)
|
|
|
|
return -1; // not allocated
|
2005-04-27 20:17:58 +00:00
|
|
|
|
2009-01-27 14:29:15 +00:00
|
|
|
bitmap_offset = 512 * (uint64_t) s->pagetable[pagetable_index];
|
2009-01-26 20:27:02 +00:00
|
|
|
block_offset = bitmap_offset + s->bitmap_size + (512 * pageentry_index);
|
|
|
|
|
|
|
|
// We must ensure that we don't write to any sectors which are marked as
|
|
|
|
// unused in the bitmap. We get away with setting all bits in the block
|
|
|
|
// bitmap each time we write to a new block. This might cause Virtual PC to
|
|
|
|
// miss sparse read optimization, but it's not a problem in terms of
|
|
|
|
// correctness.
|
|
|
|
if (write && (s->last_bitmap_offset != bitmap_offset)) {
|
|
|
|
uint8_t bitmap[s->bitmap_size];
|
|
|
|
|
|
|
|
s->last_bitmap_offset = bitmap_offset;
|
|
|
|
memset(bitmap, 0xff, s->bitmap_size);
|
2015-06-16 12:19:22 +00:00
|
|
|
bdrv_pwrite_sync(bs->file->bs, bitmap_offset, bitmap, s->bitmap_size);
|
2009-01-26 20:27:02 +00:00
|
|
|
}
|
2007-09-17 08:09:54 +00:00
|
|
|
|
2009-01-26 20:26:58 +00:00
|
|
|
return block_offset;
|
2005-04-27 20:17:58 +00:00
|
|
|
}
|
|
|
|
|
2009-01-26 20:27:02 +00:00
|
|
|
/*
|
|
|
|
* Writes the footer to the end of the image file. This is needed when the
|
|
|
|
* file grows as it overwrites the old footer
|
|
|
|
*
|
|
|
|
* Returns 0 on success and < 0 on error
|
|
|
|
*/
|
|
|
|
static int rewrite_footer(BlockDriverState* bs)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
BDRVVPCState *s = bs->opaque;
|
|
|
|
int64_t offset = s->free_data_block_offset;
|
|
|
|
|
2015-06-16 12:19:22 +00:00
|
|
|
ret = bdrv_pwrite_sync(bs->file->bs, offset, s->footer_buf, HEADER_SIZE);
|
2009-01-26 20:27:02 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocates a new block. This involves writing a new footer and updating
|
|
|
|
* the Block Allocation Table to use the space at the old end of the image
|
|
|
|
* file (overwriting the old footer)
|
|
|
|
*
|
|
|
|
* Returns the sectors' offset in the image file on success and < 0 on error
|
|
|
|
*/
|
|
|
|
static int64_t alloc_block(BlockDriverState* bs, int64_t sector_num)
|
|
|
|
{
|
|
|
|
BDRVVPCState *s = bs->opaque;
|
|
|
|
int64_t bat_offset;
|
|
|
|
uint32_t index, bat_value;
|
|
|
|
int ret;
|
|
|
|
uint8_t bitmap[s->bitmap_size];
|
|
|
|
|
|
|
|
// Check if sector_num is valid
|
|
|
|
if ((sector_num < 0) || (sector_num > bs->total_sectors))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
// Write entry into in-memory BAT
|
|
|
|
index = (sector_num * 512) / s->block_size;
|
|
|
|
if (s->pagetable[index] != 0xFFFFFFFF)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
s->pagetable[index] = s->free_data_block_offset / 512;
|
|
|
|
|
|
|
|
// Initialize the block's bitmap
|
|
|
|
memset(bitmap, 0xff, s->bitmap_size);
|
2015-06-16 12:19:22 +00:00
|
|
|
ret = bdrv_pwrite_sync(bs->file->bs, s->free_data_block_offset, bitmap,
|
2010-06-18 14:19:11 +00:00
|
|
|
s->bitmap_size);
|
2011-11-23 10:38:01 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2009-01-26 20:27:02 +00:00
|
|
|
|
|
|
|
// Write new footer (the old one will be overwritten)
|
|
|
|
s->free_data_block_offset += s->block_size + s->bitmap_size;
|
|
|
|
ret = rewrite_footer(bs);
|
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
// Write BAT entry to disk
|
|
|
|
bat_offset = s->bat_offset + (4 * index);
|
2014-09-23 09:40:55 +00:00
|
|
|
bat_value = cpu_to_be32(s->pagetable[index]);
|
2015-06-16 12:19:22 +00:00
|
|
|
ret = bdrv_pwrite_sync(bs->file->bs, bat_offset, &bat_value, 4);
|
2009-01-26 20:27:02 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
return get_sector_offset(bs, sector_num, 0);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
s->free_data_block_offset -= (s->block_size + s->bitmap_size);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-11-22 12:39:49 +00:00
|
|
|
static int vpc_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
|
|
|
|
{
|
|
|
|
BDRVVPCState *s = (BDRVVPCState *)bs->opaque;
|
|
|
|
VHDFooter *footer = (VHDFooter *) s->footer_buf;
|
|
|
|
|
2014-09-08 14:40:44 +00:00
|
|
|
if (be32_to_cpu(footer->type) != VHD_FIXED) {
|
2013-11-22 12:39:49 +00:00
|
|
|
bdi->cluster_size = s->block_size;
|
|
|
|
}
|
|
|
|
|
2013-11-22 12:39:50 +00:00
|
|
|
bdi->unallocated_blocks_are_zero = true;
|
2013-11-22 12:39:49 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-09-16 21:08:06 +00:00
|
|
|
static int vpc_read(BlockDriverState *bs, int64_t sector_num,
|
2005-04-27 20:17:58 +00:00
|
|
|
uint8_t *buf, int nb_sectors)
|
|
|
|
{
|
2010-06-04 07:49:04 +00:00
|
|
|
BDRVVPCState *s = bs->opaque;
|
2005-04-27 20:17:58 +00:00
|
|
|
int ret;
|
2009-01-26 20:26:58 +00:00
|
|
|
int64_t offset;
|
2010-06-04 07:49:04 +00:00
|
|
|
int64_t sectors, sectors_per_block;
|
2013-09-25 16:08:49 +00:00
|
|
|
VHDFooter *footer = (VHDFooter *) s->footer_buf;
|
2005-04-27 20:17:58 +00:00
|
|
|
|
2014-09-08 14:40:44 +00:00
|
|
|
if (be32_to_cpu(footer->type) == VHD_FIXED) {
|
2015-06-16 12:19:22 +00:00
|
|
|
return bdrv_read(bs->file->bs, sector_num, buf, nb_sectors);
|
2012-02-06 16:22:30 +00:00
|
|
|
}
|
2005-04-27 20:17:58 +00:00
|
|
|
while (nb_sectors > 0) {
|
2009-01-26 20:27:02 +00:00
|
|
|
offset = get_sector_offset(bs, sector_num, 0);
|
2009-01-26 20:26:58 +00:00
|
|
|
|
2010-06-04 07:49:04 +00:00
|
|
|
sectors_per_block = s->block_size >> BDRV_SECTOR_BITS;
|
|
|
|
sectors = sectors_per_block - (sector_num % sectors_per_block);
|
|
|
|
if (sectors > nb_sectors) {
|
|
|
|
sectors = nb_sectors;
|
|
|
|
}
|
|
|
|
|
2009-01-26 20:26:58 +00:00
|
|
|
if (offset == -1) {
|
2010-06-04 07:49:04 +00:00
|
|
|
memset(buf, 0, sectors * BDRV_SECTOR_SIZE);
|
2009-01-26 20:26:58 +00:00
|
|
|
} else {
|
2015-06-16 12:19:22 +00:00
|
|
|
ret = bdrv_pread(bs->file->bs, offset, buf,
|
2010-06-04 07:49:04 +00:00
|
|
|
sectors * BDRV_SECTOR_SIZE);
|
|
|
|
if (ret != sectors * BDRV_SECTOR_SIZE) {
|
2009-01-26 20:26:58 +00:00
|
|
|
return -1;
|
2010-06-04 07:49:04 +00:00
|
|
|
}
|
2009-01-26 20:26:58 +00:00
|
|
|
}
|
|
|
|
|
2010-06-04 07:49:04 +00:00
|
|
|
nb_sectors -= sectors;
|
|
|
|
sector_num += sectors;
|
|
|
|
buf += sectors * BDRV_SECTOR_SIZE;
|
2005-04-27 20:17:58 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-10-20 11:16:22 +00:00
|
|
|
static coroutine_fn int vpc_co_read(BlockDriverState *bs, int64_t sector_num,
|
|
|
|
uint8_t *buf, int nb_sectors)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
BDRVVPCState *s = bs->opaque;
|
|
|
|
qemu_co_mutex_lock(&s->lock);
|
|
|
|
ret = vpc_read(bs, sector_num, buf, nb_sectors);
|
|
|
|
qemu_co_mutex_unlock(&s->lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-01-26 20:27:02 +00:00
|
|
|
static int vpc_write(BlockDriverState *bs, int64_t sector_num,
|
|
|
|
const uint8_t *buf, int nb_sectors)
|
|
|
|
{
|
2010-06-04 07:49:04 +00:00
|
|
|
BDRVVPCState *s = bs->opaque;
|
2009-01-26 20:27:02 +00:00
|
|
|
int64_t offset;
|
2010-06-04 07:49:04 +00:00
|
|
|
int64_t sectors, sectors_per_block;
|
2009-01-26 20:27:02 +00:00
|
|
|
int ret;
|
2013-09-25 16:08:49 +00:00
|
|
|
VHDFooter *footer = (VHDFooter *) s->footer_buf;
|
2009-01-26 20:27:02 +00:00
|
|
|
|
2014-09-08 14:40:44 +00:00
|
|
|
if (be32_to_cpu(footer->type) == VHD_FIXED) {
|
2015-06-16 12:19:22 +00:00
|
|
|
return bdrv_write(bs->file->bs, sector_num, buf, nb_sectors);
|
2012-02-06 16:22:30 +00:00
|
|
|
}
|
2009-01-26 20:27:02 +00:00
|
|
|
while (nb_sectors > 0) {
|
|
|
|
offset = get_sector_offset(bs, sector_num, 1);
|
|
|
|
|
2010-06-04 07:49:04 +00:00
|
|
|
sectors_per_block = s->block_size >> BDRV_SECTOR_BITS;
|
|
|
|
sectors = sectors_per_block - (sector_num % sectors_per_block);
|
|
|
|
if (sectors > nb_sectors) {
|
|
|
|
sectors = nb_sectors;
|
|
|
|
}
|
|
|
|
|
2009-01-26 20:27:02 +00:00
|
|
|
if (offset == -1) {
|
|
|
|
offset = alloc_block(bs, sector_num);
|
|
|
|
if (offset < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-06-16 12:19:22 +00:00
|
|
|
ret = bdrv_pwrite(bs->file->bs, offset, buf,
|
|
|
|
sectors * BDRV_SECTOR_SIZE);
|
2010-06-04 07:49:04 +00:00
|
|
|
if (ret != sectors * BDRV_SECTOR_SIZE) {
|
2009-01-26 20:27:02 +00:00
|
|
|
return -1;
|
2010-06-04 07:49:04 +00:00
|
|
|
}
|
2009-01-26 20:27:02 +00:00
|
|
|
|
2010-06-04 07:49:04 +00:00
|
|
|
nb_sectors -= sectors;
|
|
|
|
sector_num += sectors;
|
|
|
|
buf += sectors * BDRV_SECTOR_SIZE;
|
2009-01-26 20:27:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-10-20 11:16:23 +00:00
|
|
|
static coroutine_fn int vpc_co_write(BlockDriverState *bs, int64_t sector_num,
|
|
|
|
const uint8_t *buf, int nb_sectors)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
BDRVVPCState *s = bs->opaque;
|
|
|
|
qemu_co_mutex_lock(&s->lock);
|
|
|
|
ret = vpc_write(bs, sector_num, buf, nb_sectors);
|
|
|
|
qemu_co_mutex_unlock(&s->lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-02-11 14:56:01 +00:00
|
|
|
static int64_t coroutine_fn vpc_co_get_block_status(BlockDriverState *bs,
|
2016-01-26 03:58:48 +00:00
|
|
|
int64_t sector_num, int nb_sectors, int *pnum, BlockDriverState **file)
|
2015-02-11 14:56:01 +00:00
|
|
|
{
|
|
|
|
BDRVVPCState *s = bs->opaque;
|
|
|
|
VHDFooter *footer = (VHDFooter*) s->footer_buf;
|
2015-03-03 10:41:52 +00:00
|
|
|
int64_t start, offset;
|
2015-02-11 14:56:01 +00:00
|
|
|
bool allocated;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if (be32_to_cpu(footer->type) == VHD_FIXED) {
|
|
|
|
*pnum = nb_sectors;
|
2016-01-26 03:58:57 +00:00
|
|
|
*file = bs->file->bs;
|
2015-02-11 14:56:01 +00:00
|
|
|
return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID | BDRV_BLOCK_DATA |
|
|
|
|
(sector_num << BDRV_SECTOR_BITS);
|
|
|
|
}
|
|
|
|
|
|
|
|
offset = get_sector_offset(bs, sector_num, 0);
|
|
|
|
start = offset;
|
|
|
|
allocated = (offset != -1);
|
|
|
|
*pnum = 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
/* All sectors in a block are contiguous (without using the bitmap) */
|
|
|
|
n = ROUND_UP(sector_num + 1, s->block_size / BDRV_SECTOR_SIZE)
|
|
|
|
- sector_num;
|
|
|
|
n = MIN(n, nb_sectors);
|
|
|
|
|
|
|
|
*pnum += n;
|
|
|
|
sector_num += n;
|
|
|
|
nb_sectors -= n;
|
2015-03-03 10:41:52 +00:00
|
|
|
/* *pnum can't be greater than one block for allocated
|
|
|
|
* sectors since there is always a bitmap in between. */
|
|
|
|
if (allocated) {
|
2016-01-26 03:58:57 +00:00
|
|
|
*file = bs->file->bs;
|
2015-03-03 10:41:52 +00:00
|
|
|
return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | start;
|
|
|
|
}
|
2015-02-11 14:56:01 +00:00
|
|
|
if (nb_sectors == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
offset = get_sector_offset(bs, sector_num, 0);
|
2015-03-03 10:41:52 +00:00
|
|
|
} while (offset == -1);
|
2015-02-11 14:56:01 +00:00
|
|
|
|
2015-03-03 10:41:52 +00:00
|
|
|
return 0;
|
2015-02-11 14:56:01 +00:00
|
|
|
}
|
|
|
|
|
2009-01-26 20:27:06 +00:00
|
|
|
/*
|
|
|
|
* Calculates the number of cylinders, heads and sectors per cylinder
|
|
|
|
* based on a given number of sectors. This is the algorithm described
|
|
|
|
* in the VHD specification.
|
|
|
|
*
|
|
|
|
* Note that the geometry doesn't always exactly match total_sectors but
|
|
|
|
* may round it down.
|
2009-04-15 14:42:46 +00:00
|
|
|
*
|
2012-10-31 02:59:32 +00:00
|
|
|
* Returns 0 on success, -EFBIG if the size is larger than ~2 TB. Override
|
|
|
|
* the hardware EIDE and ATA-2 limit of 16 heads (max disk size of 127 GB)
|
|
|
|
* and instead allow up to 255 heads.
|
2009-01-26 20:27:06 +00:00
|
|
|
*/
|
2009-04-15 14:42:46 +00:00
|
|
|
static int calculate_geometry(int64_t total_sectors, uint16_t* cyls,
|
2009-01-26 20:27:06 +00:00
|
|
|
uint8_t* heads, uint8_t* secs_per_cyl)
|
|
|
|
{
|
|
|
|
uint32_t cyls_times_heads;
|
|
|
|
|
2015-03-03 10:41:54 +00:00
|
|
|
total_sectors = MIN(total_sectors, VHD_MAX_GEOMETRY);
|
2009-01-26 20:27:06 +00:00
|
|
|
|
2015-03-03 10:41:54 +00:00
|
|
|
if (total_sectors >= 65535LL * 16 * 63) {
|
2009-01-26 20:27:06 +00:00
|
|
|
*secs_per_cyl = 255;
|
2015-03-03 10:41:54 +00:00
|
|
|
*heads = 16;
|
2009-01-26 20:27:06 +00:00
|
|
|
cyls_times_heads = total_sectors / *secs_per_cyl;
|
|
|
|
} else {
|
|
|
|
*secs_per_cyl = 17;
|
|
|
|
cyls_times_heads = total_sectors / *secs_per_cyl;
|
|
|
|
*heads = (cyls_times_heads + 1023) / 1024;
|
|
|
|
|
2015-03-03 10:41:54 +00:00
|
|
|
if (*heads < 4) {
|
2009-01-26 20:27:06 +00:00
|
|
|
*heads = 4;
|
2015-03-03 10:41:54 +00:00
|
|
|
}
|
2009-01-26 20:27:06 +00:00
|
|
|
|
|
|
|
if (cyls_times_heads >= (*heads * 1024) || *heads > 16) {
|
|
|
|
*secs_per_cyl = 31;
|
|
|
|
*heads = 16;
|
|
|
|
cyls_times_heads = total_sectors / *secs_per_cyl;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cyls_times_heads >= (*heads * 1024)) {
|
|
|
|
*secs_per_cyl = 63;
|
|
|
|
*heads = 16;
|
|
|
|
cyls_times_heads = total_sectors / *secs_per_cyl;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-10 19:46:26 +00:00
|
|
|
*cyls = cyls_times_heads / *heads;
|
2009-04-15 14:42:46 +00:00
|
|
|
|
|
|
|
return 0;
|
2009-01-26 20:27:06 +00:00
|
|
|
}
|
|
|
|
|
2016-03-08 14:57:05 +00:00
|
|
|
static int create_dynamic_disk(BlockBackend *blk, uint8_t *buf,
|
2014-07-23 21:23:00 +00:00
|
|
|
int64_t total_sectors)
|
2009-01-26 20:27:06 +00:00
|
|
|
{
|
2013-09-25 16:08:49 +00:00
|
|
|
VHDDynDiskHeader *dyndisk_header =
|
|
|
|
(VHDDynDiskHeader *) buf;
|
2009-01-26 20:27:06 +00:00
|
|
|
size_t block_size, num_bat_entries;
|
2012-02-06 16:22:30 +00:00
|
|
|
int i;
|
2014-07-23 21:23:00 +00:00
|
|
|
int ret;
|
|
|
|
int64_t offset = 0;
|
2009-01-26 20:27:06 +00:00
|
|
|
|
|
|
|
// Write the footer (twice: at the beginning and at the end)
|
|
|
|
block_size = 0x200000;
|
|
|
|
num_bat_entries = (total_sectors + block_size / 512) / (block_size / 512);
|
|
|
|
|
2016-03-08 14:57:05 +00:00
|
|
|
ret = blk_pwrite(blk, offset, buf, HEADER_SIZE);
|
2016-04-07 14:52:34 +00:00
|
|
|
if (ret < 0) {
|
2011-01-12 19:49:00 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2009-01-26 20:27:06 +00:00
|
|
|
|
2014-07-23 21:23:00 +00:00
|
|
|
offset = 1536 + ((num_bat_entries * 4 + 511) & ~511);
|
2016-03-08 14:57:05 +00:00
|
|
|
ret = blk_pwrite(blk, offset, buf, HEADER_SIZE);
|
2014-07-23 21:23:00 +00:00
|
|
|
if (ret < 0) {
|
2011-01-12 19:49:00 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2009-01-26 20:27:06 +00:00
|
|
|
|
|
|
|
// Write the initial BAT
|
2014-07-23 21:23:00 +00:00
|
|
|
offset = 3 * 512;
|
2009-01-26 20:27:06 +00:00
|
|
|
|
|
|
|
memset(buf, 0xFF, 512);
|
2011-01-12 19:49:00 +00:00
|
|
|
for (i = 0; i < (num_bat_entries * 4 + 511) / 512; i++) {
|
2016-03-08 14:57:05 +00:00
|
|
|
ret = blk_pwrite(blk, offset, buf, 512);
|
2014-07-23 21:23:00 +00:00
|
|
|
if (ret < 0) {
|
2011-01-12 19:49:00 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2014-07-23 21:23:00 +00:00
|
|
|
offset += 512;
|
2011-01-12 19:49:00 +00:00
|
|
|
}
|
2009-01-26 20:27:06 +00:00
|
|
|
|
|
|
|
// Prepare the Dynamic Disk Header
|
|
|
|
memset(buf, 0, 1024);
|
|
|
|
|
2009-08-11 19:47:59 +00:00
|
|
|
memcpy(dyndisk_header->magic, "cxsparse", 8);
|
2009-01-26 20:27:06 +00:00
|
|
|
|
2011-11-09 16:32:25 +00:00
|
|
|
/*
|
|
|
|
* Note: The spec is actually wrong here for data_offset, it says
|
|
|
|
* 0xFFFFFFFF, but MS tools expect all 64 bits to be set.
|
|
|
|
*/
|
2014-09-23 09:40:55 +00:00
|
|
|
dyndisk_header->data_offset = cpu_to_be64(0xFFFFFFFFFFFFFFFFULL);
|
|
|
|
dyndisk_header->table_offset = cpu_to_be64(3 * 512);
|
|
|
|
dyndisk_header->version = cpu_to_be32(0x00010000);
|
|
|
|
dyndisk_header->block_size = cpu_to_be32(block_size);
|
|
|
|
dyndisk_header->max_table_entries = cpu_to_be32(num_bat_entries);
|
2009-01-26 20:27:06 +00:00
|
|
|
|
2014-09-23 09:40:55 +00:00
|
|
|
dyndisk_header->checksum = cpu_to_be32(vpc_checksum(buf, 1024));
|
2009-01-26 20:27:06 +00:00
|
|
|
|
|
|
|
// Write the header
|
2014-07-23 21:23:00 +00:00
|
|
|
offset = 512;
|
2009-01-26 20:27:06 +00:00
|
|
|
|
2016-03-08 14:57:05 +00:00
|
|
|
ret = blk_pwrite(blk, offset, buf, 1024);
|
2014-07-23 21:23:00 +00:00
|
|
|
if (ret < 0) {
|
2011-01-12 19:49:00 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2012-02-06 16:22:30 +00:00
|
|
|
fail:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-08 14:57:05 +00:00
|
|
|
static int create_fixed_disk(BlockBackend *blk, uint8_t *buf,
|
2014-07-23 21:23:00 +00:00
|
|
|
int64_t total_size)
|
2012-02-06 16:22:30 +00:00
|
|
|
{
|
2014-07-23 21:23:00 +00:00
|
|
|
int ret;
|
2012-02-06 16:22:30 +00:00
|
|
|
|
|
|
|
/* Add footer to total size */
|
2014-07-23 21:23:00 +00:00
|
|
|
total_size += HEADER_SIZE;
|
|
|
|
|
2016-03-08 14:57:05 +00:00
|
|
|
ret = blk_truncate(blk, total_size);
|
2014-07-23 21:23:00 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2012-02-06 16:22:30 +00:00
|
|
|
}
|
|
|
|
|
2016-03-08 14:57:05 +00:00
|
|
|
ret = blk_pwrite(blk, total_size - HEADER_SIZE, buf, HEADER_SIZE);
|
2014-07-23 21:23:00 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2012-02-06 16:22:30 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-06-05 09:21:10 +00:00
|
|
|
static int vpc_create(const char *filename, QemuOpts *opts, Error **errp)
|
2012-02-06 16:22:30 +00:00
|
|
|
{
|
|
|
|
uint8_t buf[1024];
|
2013-09-25 16:08:49 +00:00
|
|
|
VHDFooter *footer = (VHDFooter *) buf;
|
2014-06-05 09:21:10 +00:00
|
|
|
char *disk_type_param;
|
2014-07-23 21:23:00 +00:00
|
|
|
int i;
|
2012-02-06 16:22:30 +00:00
|
|
|
uint16_t cyls = 0;
|
|
|
|
uint8_t heads = 0;
|
|
|
|
uint8_t secs_per_cyl = 0;
|
|
|
|
int64_t total_sectors;
|
|
|
|
int64_t total_size;
|
|
|
|
int disk_type;
|
|
|
|
int ret = -EIO;
|
block/vpc: give option to force the current_size field in .bdrv_create
When QEMU creates a VHD image, it goes by the original spec,
calculating the current_size based on the nearest CHS geometry (with an
exception for disks > 127GB).
Apparently, Azure will only allow images that are sized to the nearest
MB, and the current_size as calculated from CHS cannot guarantee that.
Allow QEMU to create images similar to how Hyper-V creates images, by
setting current_size to the specified virtual disk size. This
introduces an option, force_size, to be passed to the vpc format during
image creation, e.g.:
qemu-img convert -f raw -o force_size -O vpc test.img test.vhd
When using the "force_size" option, the creator app field used by
QEMU will be "qem2" instead of "qemu", to indicate the difference.
In light of this, we also add parsing of the "qem2" field during
vpc_open.
Bug reference: https://bugs.launchpad.net/qemu/+bug/1490611
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:29 +00:00
|
|
|
bool force_size;
|
2014-07-23 21:23:00 +00:00
|
|
|
Error *local_err = NULL;
|
2016-03-08 14:57:05 +00:00
|
|
|
BlockBackend *blk = NULL;
|
2012-02-06 16:22:30 +00:00
|
|
|
|
|
|
|
/* Read out options */
|
2014-09-10 09:05:45 +00:00
|
|
|
total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
|
|
|
|
BDRV_SECTOR_SIZE);
|
2014-06-05 09:21:10 +00:00
|
|
|
disk_type_param = qemu_opt_get_del(opts, BLOCK_OPT_SUBFMT);
|
|
|
|
if (disk_type_param) {
|
|
|
|
if (!strcmp(disk_type_param, "dynamic")) {
|
2012-02-06 16:22:30 +00:00
|
|
|
disk_type = VHD_DYNAMIC;
|
2014-06-05 09:21:10 +00:00
|
|
|
} else if (!strcmp(disk_type_param, "fixed")) {
|
2012-02-06 16:22:30 +00:00
|
|
|
disk_type = VHD_FIXED;
|
|
|
|
} else {
|
2016-03-23 03:33:38 +00:00
|
|
|
error_setg(errp, "Invalid disk type, %s", disk_type_param);
|
2014-06-05 09:21:10 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
2012-02-06 16:22:30 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
disk_type = VHD_DYNAMIC;
|
|
|
|
}
|
|
|
|
|
block/vpc: give option to force the current_size field in .bdrv_create
When QEMU creates a VHD image, it goes by the original spec,
calculating the current_size based on the nearest CHS geometry (with an
exception for disks > 127GB).
Apparently, Azure will only allow images that are sized to the nearest
MB, and the current_size as calculated from CHS cannot guarantee that.
Allow QEMU to create images similar to how Hyper-V creates images, by
setting current_size to the specified virtual disk size. This
introduces an option, force_size, to be passed to the vpc format during
image creation, e.g.:
qemu-img convert -f raw -o force_size -O vpc test.img test.vhd
When using the "force_size" option, the creator app field used by
QEMU will be "qem2" instead of "qemu", to indicate the difference.
In light of this, we also add parsing of the "qem2" field during
vpc_open.
Bug reference: https://bugs.launchpad.net/qemu/+bug/1490611
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:29 +00:00
|
|
|
force_size = qemu_opt_get_bool_del(opts, VPC_OPT_FORCE_SIZE, false);
|
|
|
|
|
2014-07-23 21:23:00 +00:00
|
|
|
ret = bdrv_create_file(filename, opts, &local_err);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_propagate(errp, local_err);
|
2014-06-05 09:21:10 +00:00
|
|
|
goto out;
|
2012-02-06 16:22:30 +00:00
|
|
|
}
|
2016-03-08 14:57:05 +00:00
|
|
|
|
2016-03-16 18:54:38 +00:00
|
|
|
blk = blk_new_open(filename, NULL, NULL,
|
2016-03-15 13:34:37 +00:00
|
|
|
BDRV_O_RDWR | BDRV_O_PROTOCOL, &local_err);
|
2016-03-08 14:57:05 +00:00
|
|
|
if (blk == NULL) {
|
2014-07-23 21:23:00 +00:00
|
|
|
error_propagate(errp, local_err);
|
2016-03-08 14:57:05 +00:00
|
|
|
ret = -EIO;
|
2014-07-23 21:23:00 +00:00
|
|
|
goto out;
|
qemu-img create: add 'nocow' option
Add 'nocow' option so that users could have a chance to set NOCOW flag to
newly created files. It's useful on btrfs file system to enhance performance.
Btrfs has low performance when hosting VM images, even more when the guest
in those VM are also using btrfs as file system. One way to mitigate this bad
performance is to turn off COW attributes on VM files. Generally, there are
two ways to turn off NOCOW on btrfs: a) by mounting fs with nodatacow, then
all newly created files will be NOCOW. b) per file. Add the NOCOW file
attribute. It could only be done to empty or new files.
This patch tries the second way, according to the option, it could add NOCOW
per file.
For most block drivers, since the create file step is in raw-posix.c, so we
can do setting NOCOW flag ioctl in raw-posix.c only.
But there are some exceptions, like block/vpc.c and block/vdi.c, they are
creating file by calling qemu_open directly. For them, do the same setting
NOCOW flag ioctl work in them separately.
[Fixed up 082.out due to the new 'nocow' creation option
--Stefan]
Signed-off-by: Chunyan Liu <cyliu@suse.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-06-30 06:29:58 +00:00
|
|
|
}
|
|
|
|
|
2016-03-08 14:57:05 +00:00
|
|
|
blk_set_allow_write_beyond_eof(blk, true);
|
|
|
|
|
2012-02-07 09:15:47 +00:00
|
|
|
/*
|
|
|
|
* Calculate matching total_size and geometry. Increase the number of
|
|
|
|
* sectors requested until we get enough (or fail). This ensures that
|
|
|
|
* qemu-img convert doesn't truncate images, but rather rounds up.
|
2015-03-03 10:41:54 +00:00
|
|
|
*
|
block/vpc: give option to force the current_size field in .bdrv_create
When QEMU creates a VHD image, it goes by the original spec,
calculating the current_size based on the nearest CHS geometry (with an
exception for disks > 127GB).
Apparently, Azure will only allow images that are sized to the nearest
MB, and the current_size as calculated from CHS cannot guarantee that.
Allow QEMU to create images similar to how Hyper-V creates images, by
setting current_size to the specified virtual disk size. This
introduces an option, force_size, to be passed to the vpc format during
image creation, e.g.:
qemu-img convert -f raw -o force_size -O vpc test.img test.vhd
When using the "force_size" option, the creator app field used by
QEMU will be "qem2" instead of "qemu", to indicate the difference.
In light of this, we also add parsing of the "qem2" field during
vpc_open.
Bug reference: https://bugs.launchpad.net/qemu/+bug/1490611
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:29 +00:00
|
|
|
* If the image size can't be represented by a spec conformant CHS geometry,
|
2015-03-03 10:41:54 +00:00
|
|
|
* we set the geometry to 65535 x 16 x 255 (CxHxS) sectors and use
|
|
|
|
* the image size from the VHD footer to calculate total_sectors.
|
2012-02-07 09:15:47 +00:00
|
|
|
*/
|
block/vpc: give option to force the current_size field in .bdrv_create
When QEMU creates a VHD image, it goes by the original spec,
calculating the current_size based on the nearest CHS geometry (with an
exception for disks > 127GB).
Apparently, Azure will only allow images that are sized to the nearest
MB, and the current_size as calculated from CHS cannot guarantee that.
Allow QEMU to create images similar to how Hyper-V creates images, by
setting current_size to the specified virtual disk size. This
introduces an option, force_size, to be passed to the vpc format during
image creation, e.g.:
qemu-img convert -f raw -o force_size -O vpc test.img test.vhd
When using the "force_size" option, the creator app field used by
QEMU will be "qem2" instead of "qemu", to indicate the difference.
In light of this, we also add parsing of the "qem2" field during
vpc_open.
Bug reference: https://bugs.launchpad.net/qemu/+bug/1490611
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:29 +00:00
|
|
|
if (force_size) {
|
|
|
|
/* This will force the use of total_size for sector count, below */
|
|
|
|
cyls = VHD_CHS_MAX_C;
|
|
|
|
heads = VHD_CHS_MAX_H;
|
|
|
|
secs_per_cyl = VHD_CHS_MAX_S;
|
|
|
|
} else {
|
|
|
|
total_sectors = MIN(VHD_MAX_GEOMETRY, total_size / BDRV_SECTOR_SIZE);
|
|
|
|
for (i = 0; total_sectors > (int64_t)cyls * heads * secs_per_cyl; i++) {
|
|
|
|
calculate_geometry(total_sectors + i, &cyls, &heads, &secs_per_cyl);
|
|
|
|
}
|
2015-03-03 10:41:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((int64_t)cyls * heads * secs_per_cyl == VHD_MAX_GEOMETRY) {
|
|
|
|
total_sectors = total_size / BDRV_SECTOR_SIZE;
|
|
|
|
/* Allow a maximum disk size of approximately 2 TB */
|
|
|
|
if (total_sectors > VHD_MAX_SECTORS) {
|
2016-03-23 03:33:38 +00:00
|
|
|
error_setg(errp, "Disk size is too large, max size is 2040 GiB");
|
2012-02-06 16:22:30 +00:00
|
|
|
ret = -EFBIG;
|
2014-07-23 21:23:00 +00:00
|
|
|
goto out;
|
2012-02-06 16:22:30 +00:00
|
|
|
}
|
2015-03-03 10:41:54 +00:00
|
|
|
} else {
|
|
|
|
total_sectors = (int64_t)cyls * heads * secs_per_cyl;
|
|
|
|
total_size = total_sectors * BDRV_SECTOR_SIZE;
|
2012-02-06 16:22:30 +00:00
|
|
|
}
|
2012-02-07 09:15:47 +00:00
|
|
|
|
2012-02-06 16:22:30 +00:00
|
|
|
/* Prepare the Hard Disk Footer */
|
|
|
|
memset(buf, 0, 1024);
|
|
|
|
|
|
|
|
memcpy(footer->creator, "conectix", 8);
|
block/vpc: give option to force the current_size field in .bdrv_create
When QEMU creates a VHD image, it goes by the original spec,
calculating the current_size based on the nearest CHS geometry (with an
exception for disks > 127GB).
Apparently, Azure will only allow images that are sized to the nearest
MB, and the current_size as calculated from CHS cannot guarantee that.
Allow QEMU to create images similar to how Hyper-V creates images, by
setting current_size to the specified virtual disk size. This
introduces an option, force_size, to be passed to the vpc format during
image creation, e.g.:
qemu-img convert -f raw -o force_size -O vpc test.img test.vhd
When using the "force_size" option, the creator app field used by
QEMU will be "qem2" instead of "qemu", to indicate the difference.
In light of this, we also add parsing of the "qem2" field during
vpc_open.
Bug reference: https://bugs.launchpad.net/qemu/+bug/1490611
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:29 +00:00
|
|
|
if (force_size) {
|
|
|
|
memcpy(footer->creator_app, "qem2", 4);
|
|
|
|
} else {
|
|
|
|
memcpy(footer->creator_app, "qemu", 4);
|
|
|
|
}
|
2012-02-06 16:22:30 +00:00
|
|
|
memcpy(footer->creator_os, "Wi2k", 4);
|
|
|
|
|
2014-09-23 09:40:55 +00:00
|
|
|
footer->features = cpu_to_be32(0x02);
|
|
|
|
footer->version = cpu_to_be32(0x00010000);
|
2012-02-06 16:22:30 +00:00
|
|
|
if (disk_type == VHD_DYNAMIC) {
|
2014-09-23 09:40:55 +00:00
|
|
|
footer->data_offset = cpu_to_be64(HEADER_SIZE);
|
2012-02-06 16:22:30 +00:00
|
|
|
} else {
|
2014-09-23 09:40:55 +00:00
|
|
|
footer->data_offset = cpu_to_be64(0xFFFFFFFFFFFFFFFFULL);
|
2012-02-06 16:22:30 +00:00
|
|
|
}
|
2014-09-23 09:40:55 +00:00
|
|
|
footer->timestamp = cpu_to_be32(time(NULL) - VHD_TIMESTAMP_BASE);
|
2012-02-06 16:22:30 +00:00
|
|
|
|
|
|
|
/* Version of Virtual PC 2007 */
|
2014-09-23 09:40:55 +00:00
|
|
|
footer->major = cpu_to_be16(0x0005);
|
|
|
|
footer->minor = cpu_to_be16(0x0003);
|
2015-02-11 16:19:57 +00:00
|
|
|
footer->orig_size = cpu_to_be64(total_size);
|
2015-03-03 10:41:55 +00:00
|
|
|
footer->current_size = cpu_to_be64(total_size);
|
2014-09-23 09:40:55 +00:00
|
|
|
footer->cyls = cpu_to_be16(cyls);
|
2012-02-06 16:22:30 +00:00
|
|
|
footer->heads = heads;
|
|
|
|
footer->secs_per_cyl = secs_per_cyl;
|
|
|
|
|
2014-09-23 09:40:55 +00:00
|
|
|
footer->type = cpu_to_be32(disk_type);
|
2012-02-06 16:22:30 +00:00
|
|
|
|
2012-11-02 15:54:24 +00:00
|
|
|
#if defined(CONFIG_UUID)
|
|
|
|
uuid_generate(footer->uuid);
|
|
|
|
#endif
|
2012-02-06 16:22:30 +00:00
|
|
|
|
2014-09-23 09:40:55 +00:00
|
|
|
footer->checksum = cpu_to_be32(vpc_checksum(buf, HEADER_SIZE));
|
2012-02-06 16:22:30 +00:00
|
|
|
|
|
|
|
if (disk_type == VHD_DYNAMIC) {
|
2016-03-08 14:57:05 +00:00
|
|
|
ret = create_dynamic_disk(blk, buf, total_sectors);
|
2012-02-06 16:22:30 +00:00
|
|
|
} else {
|
2016-03-08 14:57:05 +00:00
|
|
|
ret = create_fixed_disk(blk, buf, total_size);
|
2012-02-06 16:22:30 +00:00
|
|
|
}
|
2016-03-23 03:33:38 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
error_setg(errp, "Unable to create or write VHD header");
|
|
|
|
}
|
2012-02-06 16:22:30 +00:00
|
|
|
|
2014-06-05 09:21:10 +00:00
|
|
|
out:
|
2016-03-08 14:57:05 +00:00
|
|
|
blk_unref(blk);
|
2014-06-05 09:21:10 +00:00
|
|
|
g_free(disk_type_param);
|
2011-01-12 19:49:00 +00:00
|
|
|
return ret;
|
2009-01-26 20:27:06 +00:00
|
|
|
}
|
|
|
|
|
2013-06-28 08:21:00 +00:00
|
|
|
static int vpc_has_zero_init(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BDRVVPCState *s = bs->opaque;
|
2013-09-25 16:08:49 +00:00
|
|
|
VHDFooter *footer = (VHDFooter *) s->footer_buf;
|
2013-06-28 08:21:00 +00:00
|
|
|
|
2014-09-08 14:40:44 +00:00
|
|
|
if (be32_to_cpu(footer->type) == VHD_FIXED) {
|
2015-06-16 12:19:22 +00:00
|
|
|
return bdrv_has_zero_init(bs->file->bs);
|
2013-06-28 08:21:00 +00:00
|
|
|
} else {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-27 20:17:58 +00:00
|
|
|
static void vpc_close(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BDRVVPCState *s = bs->opaque;
|
2014-03-26 12:05:36 +00:00
|
|
|
qemu_vfree(s->pagetable);
|
2005-04-27 20:17:58 +00:00
|
|
|
#ifdef CACHE
|
2011-08-21 03:09:37 +00:00
|
|
|
g_free(s->pageentry_u8);
|
2005-04-27 20:17:58 +00:00
|
|
|
#endif
|
2011-11-22 15:51:12 +00:00
|
|
|
|
|
|
|
migrate_del_blocker(s->migration_blocker);
|
|
|
|
error_free(s->migration_blocker);
|
2005-04-27 20:17:58 +00:00
|
|
|
}
|
|
|
|
|
2014-06-05 09:21:10 +00:00
|
|
|
static QemuOptsList vpc_create_opts = {
|
|
|
|
.name = "vpc-create-opts",
|
|
|
|
.head = QTAILQ_HEAD_INITIALIZER(vpc_create_opts.head),
|
|
|
|
.desc = {
|
|
|
|
{
|
|
|
|
.name = BLOCK_OPT_SIZE,
|
|
|
|
.type = QEMU_OPT_SIZE,
|
|
|
|
.help = "Virtual disk size"
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = BLOCK_OPT_SUBFMT,
|
|
|
|
.type = QEMU_OPT_STRING,
|
|
|
|
.help =
|
|
|
|
"Type of virtual hard disk format. Supported formats are "
|
|
|
|
"{dynamic (default) | fixed} "
|
|
|
|
},
|
block/vpc: give option to force the current_size field in .bdrv_create
When QEMU creates a VHD image, it goes by the original spec,
calculating the current_size based on the nearest CHS geometry (with an
exception for disks > 127GB).
Apparently, Azure will only allow images that are sized to the nearest
MB, and the current_size as calculated from CHS cannot guarantee that.
Allow QEMU to create images similar to how Hyper-V creates images, by
setting current_size to the specified virtual disk size. This
introduces an option, force_size, to be passed to the vpc format during
image creation, e.g.:
qemu-img convert -f raw -o force_size -O vpc test.img test.vhd
When using the "force_size" option, the creator app field used by
QEMU will be "qem2" instead of "qemu", to indicate the difference.
In light of this, we also add parsing of the "qem2" field during
vpc_open.
Bug reference: https://bugs.launchpad.net/qemu/+bug/1490611
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-02-25 17:27:29 +00:00
|
|
|
{
|
|
|
|
.name = VPC_OPT_FORCE_SIZE,
|
|
|
|
.type = QEMU_OPT_BOOL,
|
|
|
|
.help = "Force disk size calculation to use the actual size "
|
|
|
|
"specified, rather than using the nearest CHS-based "
|
|
|
|
"calculation"
|
|
|
|
},
|
2014-06-05 09:21:10 +00:00
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
2009-05-18 14:42:10 +00:00
|
|
|
};
|
|
|
|
|
2009-05-09 22:03:42 +00:00
|
|
|
static BlockDriver bdrv_vpc = {
|
2010-10-22 14:17:57 +00:00
|
|
|
.format_name = "vpc",
|
|
|
|
.instance_size = sizeof(BDRVVPCState),
|
2011-11-10 16:25:44 +00:00
|
|
|
|
2013-06-28 08:21:00 +00:00
|
|
|
.bdrv_probe = vpc_probe,
|
|
|
|
.bdrv_open = vpc_open,
|
|
|
|
.bdrv_close = vpc_close,
|
|
|
|
.bdrv_reopen_prepare = vpc_reopen_prepare,
|
2014-06-05 09:21:11 +00:00
|
|
|
.bdrv_create = vpc_create,
|
2009-05-18 14:42:10 +00:00
|
|
|
|
2015-02-11 14:56:01 +00:00
|
|
|
.bdrv_read = vpc_co_read,
|
|
|
|
.bdrv_write = vpc_co_write,
|
|
|
|
.bdrv_co_get_block_status = vpc_co_get_block_status,
|
2011-11-10 16:25:44 +00:00
|
|
|
|
2013-11-22 12:39:49 +00:00
|
|
|
.bdrv_get_info = vpc_get_info,
|
|
|
|
|
2014-06-05 09:21:10 +00:00
|
|
|
.create_opts = &vpc_create_opts,
|
2013-06-28 08:21:00 +00:00
|
|
|
.bdrv_has_zero_init = vpc_has_zero_init,
|
2005-04-27 20:17:58 +00:00
|
|
|
};
|
2009-05-09 22:03:42 +00:00
|
|
|
|
|
|
|
static void bdrv_vpc_init(void)
|
|
|
|
{
|
|
|
|
bdrv_register(&bdrv_vpc);
|
|
|
|
}
|
|
|
|
|
|
|
|
block_init(bdrv_vpc_init);
|