Signed-off-by: Fiona Ebner <[email protected]>
---
src/PVE/API2/Qemu.pm | 6 ++++--
src/PVE/QemuMigrate.pm | 4 ++--
src/PVE/QemuServer/BlockJob.pm | 16 ++++++++--------
3 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/src/PVE/API2/Qemu.pm b/src/PVE/API2/Qemu.pm
index 190878de..5a627936 100644
--- a/src/PVE/API2/Qemu.pm
+++ b/src/PVE/API2/Qemu.pm
@@ -35,7 +35,7 @@ use PVE::QemuServer::CPUConfig;
use PVE::QemuServer::Drive qw(checked_volume_format checked_parse_volname);
use PVE::QemuServer::Helpers;
use PVE::QemuServer::ImportDisk;
-use PVE::QemuServer::Monitor qw(mon_cmd);
+use PVE::QemuServer::Monitor qw(mon_cmd vm_qmp_peer);
use PVE::QemuServer::Machine;
use PVE::QemuServer::Memory qw(get_current_memory);
use PVE::QemuServer::MetaInfo;
@@ -4607,7 +4607,9 @@ __PACKAGE__->register_method({
PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool;
};
if (my $err = $@) {
- eval { PVE::QemuServer::BlockJob::qemu_blockjobs_cancel($vmid,
$jobs) };
+ eval {
+
PVE::QemuServer::BlockJob::qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs);
+ };
sleep 1; # some storage like rbd need to wait before release
volume - really?
foreach my $volid (@$newvollist) {
diff --git a/src/PVE/QemuMigrate.pm b/src/PVE/QemuMigrate.pm
index 8fa84080..b7aba504 100644
--- a/src/PVE/QemuMigrate.pm
+++ b/src/PVE/QemuMigrate.pm
@@ -33,7 +33,7 @@ use PVE::QemuServer::CPUConfig;
use PVE::QemuServer::Drive qw(checked_volume_format);
use PVE::QemuServer::Helpers qw(min_version);
use PVE::QemuServer::Machine;
-use PVE::QemuServer::Monitor qw(mon_cmd);
+use PVE::QemuServer::Monitor qw(mon_cmd vm_qmp_peer);
use PVE::QemuServer::Memory qw(get_current_memory);
use PVE::QemuServer::Network;
use PVE::QemuServer::QMPHelpers;
@@ -1592,7 +1592,7 @@ sub phase2_cleanup {
if ($self->{storage_migration}) {
eval {
PVE::QemuServer::BlockJob::qemu_blockjobs_cancel(
- $vmid,
+ vm_qmp_peer($vmid),
$self->{storage_migration_jobs},
);
};
diff --git a/src/PVE/QemuServer/BlockJob.pm b/src/PVE/QemuServer/BlockJob.pm
index 33ff66bc..49bb13c7 100644
--- a/src/PVE/QemuServer/BlockJob.pm
+++ b/src/PVE/QemuServer/BlockJob.pm
@@ -42,16 +42,16 @@ sub qemu_handle_concluded_blockjob {
}
sub qemu_blockjobs_cancel {
- my ($vmid, $jobs) = @_;
+ my ($qmp_peer, $jobs) = @_;
foreach my $job (keys %$jobs) {
print "$job: Cancelling block job\n";
- eval { mon_cmd($vmid, "block-job-cancel", device => $job); };
+ eval { qmp_cmd($qmp_peer, "block-job-cancel", device => $job); };
$jobs->{$job}->{cancel} = 1;
}
while (1) {
- my $stats = mon_cmd($vmid, "query-block-jobs");
+ my $stats = qmp_cmd($qmp_peer, "query-block-jobs");
my $running_jobs = {};
foreach my $stat (@$stats) {
@@ -61,7 +61,7 @@ sub qemu_blockjobs_cancel {
foreach my $job (keys %$jobs) {
my $info = $running_jobs->{$job};
eval {
- qemu_handle_concluded_blockjob(vm_qmp_peer($vmid), $job,
$info, $jobs->{$job})
+ qemu_handle_concluded_blockjob($qmp_peer, $job, $info,
$jobs->{$job})
if $info && $info->{status} eq 'concluded';
};
log_warn($@) if $@; # only warn and proceed with canceling other
jobs
@@ -177,7 +177,7 @@ sub qemu_drive_mirror_monitor {
}
# if we clone a disk for a new target vm, we don't switch
the disk
- qemu_blockjobs_cancel($vmid, $jobs);
+ qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs);
if ($agent_running) {
print "unfreeze filesystem\n";
@@ -234,7 +234,7 @@ sub qemu_drive_mirror_monitor {
my $err = $@;
if ($err) {
- eval { qemu_blockjobs_cancel($vmid, $jobs) };
+ eval { qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs) };
die "block job ($op) error: $err";
}
}
@@ -308,7 +308,7 @@ sub qemu_drive_mirror {
# if a job already runs for this device we get an error, catch it for
cleanup
eval { mon_cmd($vmid, "drive-mirror", %$opts); };
if (my $err = $@) {
- eval { qemu_blockjobs_cancel($vmid, $jobs) };
+ eval { qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs) };
warn "$@\n" if $@;
die "mirroring error: $err\n";
}
@@ -503,7 +503,7 @@ sub blockdev_mirror {
# if a job already runs for this device we get an error, catch it for
cleanup
eval { mon_cmd($vmid, "blockdev-mirror", $qmp_opts->%*); };
if (my $err = $@) {
- eval { qemu_blockjobs_cancel($vmid, $jobs) };
+ eval { qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs) };
log_warn("unable to cancel block jobs - $@");
eval { PVE::QemuServer::Blockdev::detach(vm_qmp_peer($vmid),
$target_node_name); };
log_warn("unable to delete blockdev '$target_node_name' - $@");
--
2.47.3
_______________________________________________
pve-devel mailing list
[email protected]
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel