Separate vmdk_open by subformats to: * vmdk_open_vmdk3 * vmdk_open_vmdk4 Signed-off-by: Fam Zheng <famc...@gmail.com> --- block/vmdk.c | 255 ++++++++++++++++++++++++++++++++++++++++------------------ 1 files changed, 177 insertions(+), 78 deletions(-)
diff --git a/block/vmdk.c b/block/vmdk.c index 0517fdf..55b1e2b 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -401,84 +401,26 @@ static int vmdk_parent_open(BlockDriverState *bs) return 0; } -static int vmdk_open(BlockDriverState *bs, int flags) +static int vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent) { - BDRVVmdkState *s = bs->opaque; - uint32_t magic; + int ret; int l1_size, i; - VmdkExtent *extent = NULL; - - if (bdrv_pread(bs->file, 0, &magic, sizeof(magic)) != sizeof(magic)) - goto fail; - - magic = be32_to_cpu(magic); - if (magic == VMDK3_MAGIC) { - VMDK3Header header; - s->extents = qemu_mallocz(sizeof(VmdkExtent)); - s->num_extents = 1; - if (bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header)) - != sizeof(header)) { - goto fail; - } - extent = s->extents; - extent->flat = false; - extent->file = bs->file; - extent->cluster_sectors = le32_to_cpu(header.granularity); - extent->l2_size = 1 << 9; - extent->l1_size = 1 << 6; - extent->sectors = le32_to_cpu(header.disk_sectors); - extent->end_sector = le32_to_cpu(header.disk_sectors); - extent->l1_table_offset = le32_to_cpu(header.l1dir_offset) << 9; - extent->l1_backup_table_offset = 0; - extent->l1_entry_sectors = extent->l2_size * extent->cluster_sectors; - } else if (magic == VMDK4_MAGIC) { - VMDK4Header header; - s->extents = qemu_mallocz(sizeof(VmdkExtent)); - s->num_extents = 1; - if (bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header)) - != sizeof(header)) { - goto fail; - } - extent = s->extents; - extent->file = bs->file; - extent->sectors = le64_to_cpu(header.capacity); - extent->end_sector = le64_to_cpu(header.capacity); - extent->cluster_sectors = le64_to_cpu(header.granularity); - extent->l2_size = le32_to_cpu(header.num_gtes_per_gte); - extent->l1_entry_sectors = extent->l2_size * extent->cluster_sectors; - if (extent->l1_entry_sectors <= 0) { - goto fail; - } - extent->l1_size = (extent->sectors + extent->l1_entry_sectors - 1) - / extent->l1_entry_sectors; - extent->l1_table_offset = le64_to_cpu(header.rgd_offset) << 9; - extent->l1_backup_table_offset = le64_to_cpu(header.gd_offset) << 9; - - // try to open parent images, if exist - if (vmdk_parent_open(bs) != 0) - goto fail; - // write the CID once after the image creation - s->parent_cid = vmdk_read_cid(bs,1); - } else { - goto fail; - } - - /* sum up the total sectors */ - bs->total_sectors = 0; - for (i = 0; i < s->num_extents; i++) { - bs->total_sectors += s->extents[i].sectors; - } /* read the L1 table */ l1_size = extent->l1_size * sizeof(uint32_t); extent->l1_table = qemu_malloc(l1_size); - if (bdrv_pread(bs->file, - extent->l1_table_offset, - extent->l1_table, - l1_size) - != l1_size) { + if (!extent->l1_table) { + ret = -ENOMEM; goto fail; } + ret = bdrv_pread(bs->file, + extent->l1_table_offset, + extent->l1_table, + l1_size); + if (ret != l1_size) { + ret = ret < 0 ? ret : -EIO; + goto fail_l1; + } for (i = 0; i < extent->l1_size; i++) { le32_to_cpus(&extent->l1_table[i]); } @@ -493,21 +435,178 @@ static int vmdk_open(BlockDriverState *bs, int flags) goto fail; } for (i = 0; i < extent->l1_size; i++) { + if (!extent->l1_backup_table) { + ret = -ENOMEM; + goto fail_l1; + } + } + ret = bdrv_pread(bs->file, + extent->l1_backup_table_offset, + extent->l1_backup_table, + l1_size); + if (ret != l1_size) { + ret = ret < 0 ? ret : -EIO; + goto fail; + } + for (i = 0; i < extent->l1_size; i++) { le32_to_cpus(&extent->l1_backup_table[i]); } } extent->l2_cache = qemu_malloc(extent->l2_size * L2_CACHE_SIZE * sizeof(uint32_t)); + if (!extent->l2_cache) { + ret = -ENOMEM; + goto fail_l1b; + } return 0; + fail_l1b: + qemu_free(extent->l1_backup_table); + fail_l1: + qemu_free(extent->l1_table); fail: - for (i = 0; i < s->num_extents; i++) { - qemu_free(s->extents[i].l1_backup_table); - qemu_free(s->extents[i].l1_table); - qemu_free(s->extents[i].l2_cache); + return ret; +} + +/* Create and append extent to the entext array. Return the added VmdkExtent + * address. return NULL if allocation failed. */ +static int vmdk_add_extent(BlockDriverState *bs, + BlockDriverState *file, bool flat, int64_t sectors, + int64_t l1_offset, int64_t l1_backup_offset, + uint32_t l1_size, + int l2_size, unsigned int cluster_sectors, + VmdkExtent **new_extent) +{ + VmdkExtent *extent, *p; + BDRVVmdkState *s = bs->opaque; + + p = qemu_realloc(s->extents, (s->num_extents + 1) * sizeof(VmdkExtent)); + if (!p) { + return -ENOMEM; + } + s->extents = p; + extent = &s->extents[s->num_extents]; + s->num_extents++; + + memset(extent, 0, sizeof(VmdkExtent)); + extent->file = file; + extent->flat = flat; + extent->sectors = sectors; + extent->l1_table_offset = l1_offset; + extent->l1_backup_table_offset = l1_backup_offset; + extent->l1_size = l1_size; + extent->l1_entry_sectors = l2_size * cluster_sectors; + extent->l2_size = l2_size; + extent->cluster_sectors = cluster_sectors; + + if (s->num_extents > 1) { + extent->end_sector = (*(extent - 1)).end_sector + extent->sectors; + } else { + extent->end_sector = extent->sectors; + } + bs->total_sectors = extent->end_sector; + if (new_extent) { + *new_extent = extent; + } + return 0; +} + + +static int vmdk_open_vmdk3(BlockDriverState *bs, int flags) +{ + int ret; + uint32_t magic; + VMDK3Header header; + VmdkExtent *extent; + BDRVVmdkState *s = bs->opaque; + + ret = bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header)); + if (ret != sizeof(header)) { + ret = ret < 0 ? ret : -EIO; + goto fail; } + ret = vmdk_add_extent(bs, bs->file, false, le32_to_cpu(header.disk_sectors), + le32_to_cpu(header.l1dir_offset) << 9, 0, + 1 << 6, 1 << 9, le32_to_cpu(header.granularity), + &extent); + if (ret) { + goto fail; + } + ret = vmdk_init_tables(bs, extent); + if (ret) { + return ret; + } + return 0; + fail: qemu_free(s->extents); - return -1; + return ret; +} + +static int vmdk_open_vmdk4(BlockDriverState *bs, int flags) +{ + int ret; + uint32_t magic; + uint32_t l1_size, l1_entry_sectors; + VMDK4Header header; + BDRVVmdkState *s = bs->opaque; + VmdkExtent *extent; + + ret = bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header)); + if (ret != sizeof(header)) { + ret = ret < 0 ? ret : -EIO; + goto fail; + } + l1_entry_sectors = le32_to_cpu(header.num_gtes_per_gte) + * le64_to_cpu(header.granularity); + l1_size = (le64_to_cpu(header.capacity) + l1_entry_sectors - 1) + / l1_entry_sectors; + ret = vmdk_add_extent(bs, bs->file, false, + le64_to_cpu(header.capacity), + le64_to_cpu(header.rgd_offset) << 9, + le64_to_cpu(header.gd_offset) << 9, + l1_size, + le32_to_cpu(header.num_gtes_per_gte), + le64_to_cpu(header.granularity), + &extent); + if (ret) { + goto fail; + } + if (extent->l1_entry_sectors <= 0) { + ret = -EINVAL; + goto fail; + } + /* try to open parent images, if exist */ + ret = vmdk_parent_open(bs); + if (ret) { + goto fail; + } + s->parent_cid = vmdk_read_cid(bs, 1); + ret = vmdk_init_tables(bs, extent); + if (ret) { + goto fail; + } + return 0; + fail: + qemu_free(s->extents); + return ret; +} + +static int vmdk_open(BlockDriverState *bs, int flags) +{ + uint32_t magic; + + if (bdrv_pread(bs->file, 0, &magic, sizeof(magic)) != sizeof(magic)) { + return -EIO; + } + + magic = be32_to_cpu(magic); + if (magic == VMDK3_MAGIC) { + return vmdk_open_vmdk3(bs, flags); + } else if (magic == VMDK4_MAGIC) { + return vmdk_open_vmdk4(bs, flags); + } else { + return -EINVAL; + } } static int get_whole_cluster(BlockDriverState *bs, @@ -594,11 +693,11 @@ static uint64_t get_cluster_offset(BlockDriverState *bs, if (!l2_offset) { return 0; } - for(i = 0; i < L2_CACHE_SIZE; i++) { + for (i = 0; i < L2_CACHE_SIZE; i++) { if (l2_offset == extent->l2_cache_offsets[i]) { /* increment the hit count */ if (++extent->l2_cache_counts[i] == 0xffffffff) { - for(j = 0; j < L2_CACHE_SIZE; j++) { + for (j = 0; j < L2_CACHE_SIZE; j++) { extent->l2_cache_counts[j] >>= 1; } } @@ -609,7 +708,7 @@ static uint64_t get_cluster_offset(BlockDriverState *bs, /* not found: load a new entry in the least used one */ min_index = 0; min_count = 0xffffffff; - for(i = 0; i < L2_CACHE_SIZE; i++) { + for (i = 0; i < L2_CACHE_SIZE; i++) { if (extent->l2_cache_counts[i] < min_count) { min_count = extent->l2_cache_counts[i]; min_index = i;