Currently code uses only top qcow2 delta to handle decompression.
Images have compression type in the qcow2 header already set when
loaded.
To support mixed compression switch to use qio->qcow2 and respective
qcow2 header so the decompression is performed with the correct type.
Remove qcow2 argument in functions called from decompression where
possible and use qio->qcow2 which is the respective qcow2 image for
the qio, a qio references only one image.
https://virtuozzo.atlassian.net/browse/VSTOR-97155
Signed-off-by: Alexander Atanasov <alexander.atana...@virtuozzo.com>
---
drivers/md/dm-qcow2-map.c | 74 ++++++++++++++++++++++++---------------
1 file changed, 46 insertions(+), 28 deletions(-)
diff --git a/drivers/md/dm-qcow2-map.c b/drivers/md/dm-qcow2-map.c
index ccc8d4484049..969f591eb0a1 100644
--- a/drivers/md/dm-qcow2-map.c
+++ b/drivers/md/dm-qcow2-map.c
@@ -3009,8 +3009,9 @@ static int copy_buf_to_bvec_iter(const struct
bio_vec *bvec,
return ret;
}
-static int copy_clu_part_to_qio(struct qcow2 *qcow2, const void *buf,
struct qio *qio)
+static int copy_clu_part_to_qio(const void *buf, struct qio *qio)
{
+ struct qcow2 *qcow2 = qio->qcow2;
u32 max, seek, clu_size = qcow2->clu_size;
seek = bytes_off_in_cluster(qcow2, qio);
@@ -3047,7 +3048,7 @@ static int copy_zcow_slice(loff_t start, loff_t
end, void *qio_p,
return copy_buf_to_bvec_iter(bvec, &iter, buf + off, clu_size -
off);
}
-static int prepare_zcow_slices(struct qcow2 *qcow2, void *buf, struct
qio *qio)
+static int prepare_zcow_slices(void *buf, struct qio *qio)
{
loff_t consumed = 0;
/* Place required slices in that pages like further COW expects */
@@ -3655,46 +3656,63 @@ static int complete_metadata_writeback(struct
qcow2 *qcow2)
}
/* Process completed compressed READs */
-static void process_compressed_read(struct qcow2 *qcow2, struct
list_head *read_list,
+static void process_compressed_read(struct list_head *read_list,
struct list_head *cow_list)
{
+ struct qcow2 *qcow2, *qcow2_prev = NULL;
struct qcow2_bvec *qvec;
struct qio_ext *ext;
int ret;
void *buf = NULL, *arg;
struct qio *qio;
bool for_cow;
+ size_t dctxlen_alloced = 0;
size_t dctxlen;
if (list_empty(read_list))
return;
- if (qcow2->hdr.compression_type == QCOW2_COMPRESSION_TYPE_ZSTD)
- dctxlen = zstd_dstream_workspace_bound(qcow2->clu_size);
- else
- dctxlen = zlib_inflate_workspacesize();
-
-
- buf = kvmalloc(qcow2->clu_size + dctxlen, GFP_NOIO);
- if (!buf) {
- end_qios(read_list, BLK_STS_RESOURCE);
- return;
- }
-
- if (qcow2->hdr.compression_type == QCOW2_COMPRESSION_TYPE_ZSTD) {
- arg = zstd_init_dstream(qcow2->clu_size, buf +
qcow2->clu_size, dctxlen);
- if (!arg) {
- end_qios(read_list, BLK_STS_RESOURCE);
- kvfree(buf);
- return;
- }
- } else {
- arg = buf + qcow2->clu_size;
- }
while ((qio = qio_list_pop(read_list)) != NULL) {
qvec = qio->data;
ext = qio->ext;
+ qcow2 = qio->qcow2;
+
+ if (!qcow2_prev || qcow2_prev != qcow2) {
+ if (qcow2->hdr.compression_type ==
QCOW2_COMPRESSION_TYPE_ZSTD)
+ dctxlen = zstd_dstream_workspace_bound(qcow2->clu_size);
+ else
+ dctxlen = zlib_inflate_workspacesize();
+
+ if (dctxlen_alloced < dctxlen) {
+ if (buf)
+ kfree(buf);
+ buf = kvmalloc(qcow2->clu_size + dctxlen, GFP_NOIO);
+ if (!buf) {
+ QC_ERR(qcow2->tgt->ti, "can not allocate
decompression buffer:%d",
+ qcow2->clu_size + dctxlen);
+ end_qios(read_list, BLK_STS_RESOURCE);
+ return;
+ }
+ dctxlen_alloced = dctxlen;
+ }
+
+ if (qcow2->hdr.compression_type ==
QCOW2_COMPRESSION_TYPE_ZSTD) {
+ arg = zstd_init_dstream(qcow2->clu_size, buf +
qcow2->clu_size, dctxlen);
+ if (!arg) {
+ end_qios(read_list, BLK_STS_RESOURCE);
+ kvfree(buf);
+ return;
+ }
+ } else {
+ arg = buf + qcow2->clu_size;
+ }
+ } else {
+ if (qcow2->hdr.compression_type ==
QCOW2_COMPRESSION_TYPE_ZSTD) {
+ zstd_reset_dstream(arg);