QMP and monitor helpers are moved from QemuServer.pm.

By using only vm_running_locally instead of check_running, a cyclic
dependency to QemuConfig is avoided. This also means that the $nocheck
parameter serves no more purpose, and has thus been removed along with
vm_mon_cmd_nocheck.

Care has been taken to avoid errors resulting from
this, and occasionally a manual check for a VM's existance inserted on
the callsite.

Methods have been renamed to avoid redundant naming:
* vm_qmp_command -> qmp_cmd
* vm_mon_cmd -> mon_cmd
* vm_human_monitor_command -> hmp_cmd

mon_cmd is exported since it has many users. This patch also changes all
non-package users of vm_qmp_command to use the mon_cmd helper. Includes
mocking for tests.

Signed-off-by: Stefan Reiter <s.rei...@proxmox.com>
---

Sorry for the long patch, but almost all changes are just callers of
mon_cmd/qmp_cmd being renamed.


 PVE/API2/Qemu.pm         |  15 ++--
 PVE/API2/Qemu/Agent.pm   |   7 +-
 PVE/CLI/qm.pm            |  13 +--
 PVE/QemuConfig.pm        |  15 ++--
 PVE/QemuMigrate.pm       |  21 ++---
 PVE/QemuServer.pm        | 184 +++++++++++++--------------------------
 PVE/QemuServer/Agent.pm  |   3 +-
 PVE/QemuServer/Makefile  |   1 +
 PVE/QemuServer/Memory.pm |   9 +-
 PVE/VZDump/QemuServer.pm |  13 +--
 test/snapshot-test.pm    |  19 ++--
 11 files changed, 130 insertions(+), 170 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index b2c0b0d..b828e08 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -20,6 +20,7 @@ use PVE::ReplicationConfig;
 use PVE::GuestHelpers;
 use PVE::QemuConfig;
 use PVE::QemuServer;
+use PVE::QemuServer::Monitor qw(mon_cmd);
 use PVE::QemuMigrate;
 use PVE::RPCEnvironment;
 use PVE::AccessControl;
@@ -1835,8 +1836,8 @@ __PACKAGE__->register_method({
        my ($ticket, undef, $remote_viewer_config) =
            PVE::AccessControl::remote_viewer_config($authuser, $vmid, $node, 
$proxy, $title, $port);
 
-       PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', 
password => $ticket);
-       PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 
'spice', time => "+30");
+       mon_cmd($vmid, "set_password", protocol => 'spice', password => 
$ticket);
+       mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
 
        return $remote_viewer_config;
     }});
@@ -2261,7 +2262,8 @@ __PACKAGE__->register_method({
        # checking the qmp status here to get feedback to the gui/cli/api
        # and the status query should not take too long
        my $qmpstatus = eval {
-           PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" 
}, 0);
+           PVE::QemuConfig::vm_exists_on_node($vmid);
+           mon_cmd($vmid, "query-status");
        };
        my $err = $@ if $@;
 
@@ -2341,7 +2343,8 @@ __PACKAGE__->register_method({
        my $vmid = extract_param($param, 'vmid');
 
        my $qmpstatus = eval {
-           PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" 
}, 0);
+           PVE::QemuConfig::vm_exists_on_node($vmid);
+           mon_cmd($vmid, "query-status");
        };
        my $err = $@ if $@;
 
@@ -3093,7 +3096,7 @@ __PACKAGE__->register_method({
                    PVE::QemuConfig->write_config($vmid, $conf);
 
                    if ($running && 
PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} && 
PVE::QemuServer::qga_check_running($vmid)) {
-                       eval { PVE::QemuServer::vm_mon_cmd($vmid, 
"guest-fstrim"); };
+                       eval { mon_cmd($vmid, "guest-fstrim"); };
                    }
 
                    eval {
@@ -3449,7 +3452,7 @@ __PACKAGE__->register_method({
 
        my $res = '';
        eval {
-           $res = PVE::QemuServer::vm_human_monitor_command($vmid, 
$param->{command});
+           $res = PVE::QemuServer::Monitor::hmp_cmd($vmid, $param->{command});
        };
        $res = "ERROR: $@" if $@;
 
diff --git a/PVE/API2/Qemu/Agent.pm b/PVE/API2/Qemu/Agent.pm
index 839146c..6792825 100644
--- a/PVE/API2/Qemu/Agent.pm
+++ b/PVE/API2/Qemu/Agent.pm
@@ -7,6 +7,7 @@ use PVE::RESTHandler;
 use PVE::JSONSchema qw(get_standard_option);
 use PVE::QemuServer;
 use PVE::QemuServer::Agent qw(agent_available agent_cmd check_agent_error);
+use PVE::QemuServer::Monitor qw(mon_cmd);
 use MIME::Base64 qw(encode_base64 decode_base64);
 use JSON;
 
@@ -190,7 +191,7 @@ sub register_command {
            agent_available($vmid, $conf);
 
            my $cmd = $param->{command} // $command;
-           my $res = PVE::QemuServer::vm_mon_cmd($vmid, "guest-$cmd");
+           my $res = mon_cmd($vmid, "guest-$cmd");
 
            return { result => $res };
        }});
@@ -415,7 +416,7 @@ __PACKAGE__->register_method({
        my $content = "";
 
        while ($bytes_left > 0 && !$eof) {
-           my $read = PVE::QemuServer::vm_mon_cmd($vmid, "guest-file-read", 
handle => $qgafh, count => int($read_size));
+           my $read = mon_cmd($vmid, "guest-file-read", handle => $qgafh, 
count => int($read_size));
            check_agent_error($read, "can't read from file");
 
            $content .= decode_base64($read->{'buf-b64'});
@@ -423,7 +424,7 @@ __PACKAGE__->register_method({
            $eof = $read->{eof} // 0;
        }
 
-       my $res = PVE::QemuServer::vm_mon_cmd($vmid, "guest-file-close", handle 
=> $qgafh);
+       my $res = mon_cmd($vmid, "guest-file-close", handle => $qgafh);
        check_agent_error($res, "can't close file", 1);
 
        my $result = {
diff --git a/PVE/CLI/qm.pm b/PVE/CLI/qm.pm
index 1a841b7..61ffa1d 100755
--- a/PVE/CLI/qm.pm
+++ b/PVE/CLI/qm.pm
@@ -27,9 +27,11 @@ use PVE::Tools qw(extract_param);
 
 use PVE::API2::Qemu::Agent;
 use PVE::API2::Qemu;
+use PVE::QemuConfig;
 use PVE::QemuServer::Helpers;
 use PVE::QemuServer::Agent qw(agent_available);
 use PVE::QemuServer::ImportDisk;
+use PVE::QemuServer::Monitor qw(mon_cmd);
 use PVE::QemuServer::OVF;
 use PVE::QemuServer;
 
@@ -210,15 +212,16 @@ __PACKAGE__->register_method ({
        my ($param) = @_;
 
        my $vmid = $param->{vmid};
+       PVE::QemuConfig::vm_exists_on_node($vmid);
        my $vnc_socket = PVE::QemuServer::Helpers::vnc_socket($vmid);
 
        if (my $ticket = $ENV{LC_PVE_TICKET}) {  # NOTE: ssh on debian only 
pass LC_* variables
-           PVE::QemuServer::vm_mon_cmd($vmid, "change", device => 'vnc', 
target => "unix:$vnc_socket,password");
-           PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 
'vnc', password => $ticket);
-           PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 
'vnc', time => "+30");
+           mon_cmd($vmid, "change", device => 'vnc', target => 
"unix:$vnc_socket,password");
+           mon_cmd($vmid, "set_password", protocol => 'vnc', password => 
$ticket);
+           mon_cmd($vmid, "expire_password", protocol => 'vnc', time => "+30");
        } else {
            # FIXME: remove or allow to add tls-creds object, as x509 vnc param 
is removed with qemu 4??
-           PVE::QemuServer::vm_mon_cmd($vmid, "change", device => 'vnc', 
target => "unix:$vnc_socket,password");
+           mon_cmd($vmid, "change", device => 'vnc', target => 
"unix:$vnc_socket,password");
        }
 
        run_vnc_proxy($vnc_socket);
@@ -398,7 +401,7 @@ __PACKAGE__->register_method ({
            last if $input =~ m/^\s*q(uit)?\s*$/;
 
            eval {
-               print PVE::QemuServer::vm_human_monitor_command ($vmid, $input);
+               print PVE::QemuServer::Monitor::hmp_cmd($vmid, $input);
            };
            print "ERROR: $@" if $@;
        }
diff --git a/PVE/QemuConfig.pm b/PVE/QemuConfig.pm
index bca2725..e2e722d 100644
--- a/PVE/QemuConfig.pm
+++ b/PVE/QemuConfig.pm
@@ -6,6 +6,7 @@ use warnings;
 use PVE::AbstractConfig;
 use PVE::INotify;
 use PVE::QemuServer::Helpers;
+use PVE::QemuServer::Monitor qw(mon_cmd);
 use PVE::QemuServer;
 use PVE::Storage;
 use PVE::Tools;
@@ -193,10 +194,10 @@ sub __snapshot_freeze {
     my ($class, $vmid, $unfreeze) = @_;
 
     if ($unfreeze) {
-       eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fsfreeze-thaw"); };
+       eval { mon_cmd($vmid, "guest-fsfreeze-thaw"); };
        warn "guest-fsfreeze-thaw problems - $@" if $@;
     } else {
-       eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fsfreeze-freeze"); };
+       eval { mon_cmd($vmid, "guest-fsfreeze-freeze"); };
        warn "guest-fsfreeze-freeze problems - $@" if $@;
     }
 }
@@ -212,9 +213,9 @@ sub __snapshot_create_vol_snapshots_hook {
                my $path = PVE::Storage::path($storecfg, $snap->{vmstate});
                PVE::Storage::activate_volumes($storecfg, [$snap->{vmstate}]);
 
-               PVE::QemuServer::vm_mon_cmd($vmid, "savevm-start", statefile => 
$path);
+               mon_cmd($vmid, "savevm-start", statefile => $path);
                for(;;) {
-                   my $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 
"query-savevm");
+                   my $stat = mon_cmd($vmid, "query-savevm");
                    if (!$stat->{status}) {
                        die "savevm not active\n";
                    } elsif ($stat->{status} eq 'active') {
@@ -227,18 +228,18 @@ sub __snapshot_create_vol_snapshots_hook {
                    }
                }
            } else {
-               PVE::QemuServer::vm_mon_cmd($vmid, "savevm-start");
+               mon_cmd($vmid, "savevm-start");
            }
        } elsif ($hook eq "after") {
            eval {
-               PVE::QemuServer::vm_mon_cmd($vmid, "savevm-end");
+               mon_cmd($vmid, "savevm-end");
                PVE::Storage::deactivate_volumes($storecfg, [$snap->{vmstate}]) 
if $snap->{vmstate};
            };
            warn $@ if $@;
        } elsif ($hook eq "after-freeze") {
            # savevm-end is async, we need to wait
            for (;;) {
-               my $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 
"query-savevm");
+               my $stat = mon_cmd($vmid, "query-savevm");
                if (!$stat->{bytes}) {
                    last;
                } else {
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 626b837..ce06af0 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -11,6 +11,7 @@ use PVE::Tools;
 use PVE::Cluster;
 use PVE::Storage;
 use PVE::QemuServer;
+use PVE::QemuServer::Monitor qw(mon_cmd);
 use Time::HiRes qw( usleep );
 use PVE::RPCEnvironment;
 use PVE::ReplicationConfig;
@@ -551,7 +552,7 @@ sub phase2 {
 
     my $spice_ticket;
     if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) {
-       my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice');
+       my $res = mon_cmd($vmid, 'query-spice');
        $spice_ticket = $res->{ticket};
     }
 
@@ -706,7 +707,7 @@ sub phase2 {
     $migrate_speed *= 1024;
     $self->log('info', "migrate_set_speed: $migrate_speed");
     eval {
-        PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value 
=> int($migrate_speed));
+       mon_cmd($vmid, "migrate_set_speed", value => int($migrate_speed));
     };
     $self->log('info', "migrate_set_speed error: $@") if $@;
 
@@ -715,7 +716,7 @@ sub phase2 {
     if (defined($migrate_downtime)) {
        $self->log('info', "migrate_set_downtime: $migrate_downtime");
        eval {
-           PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", 
value => int($migrate_downtime*100)/100);
+           mon_cmd($vmid, "migrate_set_downtime", value => 
int($migrate_downtime*100)/100);
        };
        $self->log('info', "migrate_set_downtime error: $@") if $@;
     }
@@ -733,7 +734,7 @@ sub phase2 {
 
     $self->log('info', "set cachesize: $cachesize");
     eval {
-       PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", 
value => int($cachesize));
+       mon_cmd($vmid, "migrate-set-cache-size", value => int($cachesize));
     };
     $self->log('info', "migrate-set-cache-size error: $@") if $@;
 
@@ -749,7 +750,7 @@ sub phase2 {
        $self->log('info', "spice client_migrate_info");
 
        eval {
-           PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", 
protocol => 'spice',
+           mon_cmd($vmid, "client_migrate_info", protocol => 'spice',
                                                hostname => $proxyticket, 
'port' => 0, 'tls-port' => $spice_port,
                                                'cert-subject' => $subject);
        };
@@ -759,7 +760,7 @@ sub phase2 {
 
     $self->log('info', "start migrate command to $ruri");
     eval {
-        PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => $ruri);
+       mon_cmd($vmid, "migrate", uri => $ruri);
     };
     my $merr = $@;
     $self->log('info', "migrate uri => $ruri failed: $merr") if $merr;
@@ -777,7 +778,7 @@ sub phase2 {
        usleep($usleep);
        my $stat;
        eval {
-           $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate");
+           $stat = mon_cmd($vmid, "query-migrate");
        };
        if (my $err = $@) {
            $err_count++;
@@ -846,7 +847,7 @@ sub phase2 {
                    $migrate_downtime *= 2;
                    $self->log('info', "migrate_set_downtime: 
$migrate_downtime");
                    eval {
-                       PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 
"migrate_set_downtime", value => int($migrate_downtime*100)/100);
+                       mon_cmd($vmid, "migrate_set_downtime", value => 
int($migrate_downtime*100)/100);
                    };
                    $self->log('info', "migrate_set_downtime error: $@") if $@;
                }
@@ -873,7 +874,7 @@ sub phase2_cleanup {
 
     $self->log('info', "migrate_cancel");
     eval {
-       PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel");
+       mon_cmd($vmid, "migrate_cancel");
     };
     $self->log('info', "migrate_cancel error: $@") if $@;
 
@@ -1024,7 +1025,7 @@ sub phase3_cleanup {
        if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && 
$self->{running}) {
            $self->log('info', "Waiting for spice server migration");
            while (1) {
-               my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 
'query-spice');
+               my $res = mon_cmd($vmid, 'query-spice');
                last if int($res->{'migrated'}) == 1;
                last if $timer > 50;
                $timer ++;
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index cc9288f..5b5de10 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -32,7 +32,6 @@ use PVE::INotify;
 use PVE::JSONSchema qw(get_standard_option);
 use PVE::ProcFSTools;
 use PVE::RPCEnvironment;
-use PVE::SafeSyslog;
 use PVE::Storage;
 use PVE::SysFSTools;
 use PVE::Systemd;
@@ -43,6 +42,7 @@ use PVE::QemuConfig;
 use PVE::QemuServer::Helpers;
 use PVE::QemuServer::Cloudinit;
 use PVE::QemuServer::Memory;
+use PVE::QemuServer::Monitor qw(mon_cmd);
 use PVE::QemuServer::PCI qw(print_pci_addr print_pcie_addr 
print_pcie_root_port);
 use PVE::QemuServer::USB qw(parse_usb_device);
 
@@ -4062,7 +4062,7 @@ sub config_to_command {
 sub spice_port {
     my ($vmid) = @_;
 
-    my $res = vm_mon_cmd($vmid, 'query-spice');
+    my $res = mon_cmd($vmid, 'query-spice');
 
     return $res->{'tls-port'} || $res->{'port'} || die "no spice port\n";
 }
@@ -4070,7 +4070,7 @@ sub spice_port {
 sub vm_devices_list {
     my ($vmid) = @_;
 
-    my $res = vm_mon_cmd($vmid, 'query-pci');
+    my $res = mon_cmd($vmid, 'query-pci');
     my $devices_to_check = [];
     my $devices = {};
     foreach my $pcibus (@$res) {
@@ -4089,14 +4089,14 @@ sub vm_devices_list {
        $devices_to_check = $to_check;
     }
 
-    my $resblock = vm_mon_cmd($vmid, 'query-block');
+    my $resblock = mon_cmd($vmid, 'query-block');
     foreach my $block (@$resblock) {
        if($block->{device} =~ m/^drive-(\S+)/){
                $devices->{$1} = 1;
        }
     }
 
-    my $resmice = vm_mon_cmd($vmid, 'query-mice');
+    my $resmice = mon_cmd($vmid, 'query-mice');
     foreach my $mice (@$resmice) {
        if ($mice->{name} eq 'QEMU HID Tablet') {
            $devices->{tablet} = 1;
@@ -4107,7 +4107,7 @@ sub vm_devices_list {
     # for usb devices there is no query-usb
     # but we can iterate over the entries in
     # qom-list path=/machine/peripheral
-    my $resperipheral = vm_mon_cmd($vmid, 'qom-list', path => 
'/machine/peripheral');
+    my $resperipheral = mon_cmd($vmid, 'qom-list', path => 
'/machine/peripheral');
     foreach my $per (@$resperipheral) {
        if ($per->{name} =~ m/^usb\d+$/) {
            $devices->{$per->{name}} = 1;
@@ -4288,13 +4288,13 @@ sub qemu_deviceadd {
     $devicefull = "driver=".$devicefull;
     my %options =  split(/[=,]/, $devicefull);
 
-    vm_mon_cmd($vmid, "device_add" , %options);
+    mon_cmd($vmid, "device_add" , %options);
 }
 
 sub qemu_devicedel {
     my ($vmid, $deviceid) = @_;
 
-    my $ret = vm_mon_cmd($vmid, "device_del", id => $deviceid);
+    my $ret = mon_cmd($vmid, "device_del", id => $deviceid);
 }
 
 sub qemu_iothread_add {
@@ -4323,7 +4323,7 @@ sub qemu_iothread_del {
 sub qemu_objectadd {
     my($vmid, $objectid, $qomtype) = @_;
 
-    vm_mon_cmd($vmid, "object-add", id => $objectid, "qom-type" => $qomtype);
+    mon_cmd($vmid, "object-add", id => $objectid, "qom-type" => $qomtype);
 
     return 1;
 }
@@ -4331,7 +4331,7 @@ sub qemu_objectadd {
 sub qemu_objectdel {
     my($vmid, $objectid) = @_;
 
-    vm_mon_cmd($vmid, "object-del", id => $objectid);
+    mon_cmd($vmid, "object-del", id => $objectid);
 
     return 1;
 }
@@ -4341,7 +4341,7 @@ sub qemu_driveadd {
 
     my $drive = print_drive_full($storecfg, $vmid, $device);
     $drive =~ s/\\/\\\\/g;
-    my $ret = vm_human_monitor_command($vmid, "drive_add auto \"$drive\"");
+    my $ret = PVE::QemuServer::Monitor::hmp_cmd($vmid, "drive_add auto 
\"$drive\"");
 
     # If the command succeeds qemu prints: "OK"
     return 1 if $ret =~ m/OK/s;
@@ -4352,7 +4352,7 @@ sub qemu_driveadd {
 sub qemu_drivedel {
     my($vmid, $deviceid) = @_;
 
-    my $ret = vm_human_monitor_command($vmid, "drive_del drive-$deviceid");
+    my $ret = PVE::QemuServer::Monitor::hmp_cmd($vmid, "drive_del 
drive-$deviceid");
     $ret =~ s/^\s+//;
 
     return 1 if $ret eq "";
@@ -4462,7 +4462,7 @@ sub qemu_add_pci_bridge {
 sub qemu_set_link_status {
     my ($vmid, $device, $up) = @_;
 
-    vm_mon_cmd($vmid, "set_link", name => $device,
+    mon_cmd($vmid, "set_link", name => $device,
               up => $up ? JSON::true : JSON::false);
 }
 
@@ -4472,14 +4472,14 @@ sub qemu_netdevadd {
     my $netdev = print_netdev_full($vmid, $conf, $arch, $device, $deviceid, 1);
     my %options =  split(/[=,]/, $netdev);
 
-    vm_mon_cmd($vmid, "netdev_add",  %options);
+    mon_cmd($vmid, "netdev_add",  %options);
     return 1;
 }
 
 sub qemu_netdevdel {
     my ($vmid, $deviceid) = @_;
 
-    vm_mon_cmd($vmid, "netdev_del", id => $deviceid);
+    mon_cmd($vmid, "netdev_del", id => $deviceid);
 }
 
 sub qemu_usb_hotplug {
@@ -4534,7 +4534,7 @@ sub qemu_cpu_hotplug {
                my $retry = 0;
                my $currentrunningvcpus = undef;
                while (1) {
-                   $currentrunningvcpus = vm_mon_cmd($vmid, "query-cpus");
+                   $currentrunningvcpus = mon_cmd($vmid, "query-cpus");
                    last if scalar(@{$currentrunningvcpus}) == $i-1;
                    raise_param_exc({ vcpus => "error unplugging cpu$i" }) if 
$retry > 5;
                    $retry++;
@@ -4551,7 +4551,7 @@ sub qemu_cpu_hotplug {
        return;
     }
 
-    my $currentrunningvcpus = vm_mon_cmd($vmid, "query-cpus");
+    my $currentrunningvcpus = mon_cmd($vmid, "query-cpus");
     die "vcpus in running vm does not match its configuration\n"
        if scalar(@{$currentrunningvcpus}) != $currentvcpus;
 
@@ -4564,7 +4564,7 @@ sub qemu_cpu_hotplug {
            my $retry = 0;
            my $currentrunningvcpus = undef;
            while (1) {
-               $currentrunningvcpus = vm_mon_cmd($vmid, "query-cpus");
+               $currentrunningvcpus = mon_cmd($vmid, "query-cpus");
                last if scalar(@{$currentrunningvcpus}) == $i;
                raise_param_exc({ vcpus => "error hotplugging cpu$i" }) if 
$retry > 10;
                sleep 1;
@@ -4577,7 +4577,7 @@ sub qemu_cpu_hotplug {
     } else {
 
        for (my $i = $currentvcpus; $i < $vcpus; $i++) {
-           vm_mon_cmd($vmid, "cpu-add", id => int($i));
+           mon_cmd($vmid, "cpu-add", id => int($i));
        }
     }
 }
@@ -4591,7 +4591,7 @@ sub qemu_block_set_io_throttle {
 
     return if !check_running($vmid) ;
 
-    vm_mon_cmd($vmid, "block_set_io_throttle", device => $deviceid,
+    mon_cmd($vmid, "block_set_io_throttle", device => $deviceid,
        bps => int($bps),
        bps_rd => int($bps_rd),
        bps_wr => int($bps_wr),
@@ -4656,7 +4656,7 @@ sub qemu_block_resize {
 
     return if !$running;
 
-    vm_mon_cmd($vmid, "block_resize", device => $deviceid, size => int($size));
+    mon_cmd($vmid, "block_resize", device => $deviceid, size => int($size));
 
 }
 
@@ -4666,7 +4666,7 @@ sub qemu_volume_snapshot {
     my $running = check_running($vmid);
 
     if ($running && do_snapshots_with_qemu($storecfg, $volid)){
-       vm_mon_cmd($vmid, 'blockdev-snapshot-internal-sync', device => 
$deviceid, name => $snap);
+       mon_cmd($vmid, 'blockdev-snapshot-internal-sync', device => $deviceid, 
name => $snap);
     } else {
        PVE::Storage::volume_snapshot($storecfg, $volid, $snap);
     }
@@ -4688,7 +4688,7 @@ sub qemu_volume_snapshot_delete {
     }
 
     if ($running && do_snapshots_with_qemu($storecfg, $volid)){
-       vm_mon_cmd($vmid, 'blockdev-snapshot-delete-internal-sync', device => 
$deviceid, name => $snap);
+       mon_cmd($vmid, 'blockdev-snapshot-delete-internal-sync', device => 
$deviceid, name => $snap);
     } else {
        PVE::Storage::volume_snapshot_delete($storecfg, $volid, $snap, 
$running);
     }
@@ -4707,7 +4707,7 @@ sub set_migration_caps {
        "compress" => 0
     };
 
-    my $supported_capabilities = vm_mon_cmd_nocheck($vmid, 
"query-migrate-capabilities");
+    my $supported_capabilities = mon_cmd($vmid, "query-migrate-capabilities");
 
     for my $supported_capability (@$supported_capabilities) {
        push @$cap_ref, {
@@ -4716,7 +4716,7 @@ sub set_migration_caps {
        };
     }
 
-    vm_mon_cmd_nocheck($vmid, "migrate-set-capabilities", capabilities => 
$cap_ref);
+    mon_cmd($vmid, "migrate-set-capabilities", capabilities => $cap_ref);
 }
 
 my $fast_plug_option = {
@@ -4797,7 +4797,7 @@ sub vmconfig_hotplug_pending {
                die "skip\n" if defined($conf->{balloon}) && $conf->{balloon} 
== 0;
                # here we reset the ballooning value to memory
                my $balloon = $conf->{memory} || $defaults->{memory};
-               vm_mon_cmd($vmid, "balloon", value => $balloon*1024*1024);
+               mon_cmd($vmid, "balloon", value => $balloon*1024*1024);
            } elsif ($fast_plug_option->{$opt}) {
                # do nothing
            } elsif ($opt =~ m/^net(\d+)$/) {
@@ -4881,7 +4881,7 @@ sub vmconfig_hotplug_pending {
                # allow manual ballooning if shares is set to zero
                if ((defined($conf->{shares}) && ($conf->{shares} == 0))) {
                    my $balloon = $conf->{pending}->{balloon} || 
$conf->{memory} || $defaults->{memory};
-                   vm_mon_cmd($vmid, "balloon", value => $balloon*1024*1024);
+                   mon_cmd($vmid, "balloon", value => $balloon*1024*1024);
                }
            } elsif ($opt =~ m/^net(\d+)$/) {
                # some changes can be done without hotplug
@@ -5157,14 +5157,14 @@ sub vmconfig_update_disk {
            } else { # cdrom
 
                if ($drive->{file} eq 'none') {
-                   vm_mon_cmd($vmid, "eject",force => JSON::true,device => 
"drive-$opt");
+                   mon_cmd($vmid, "eject",force => JSON::true,device => 
"drive-$opt");
                    if (drive_is_cloudinit($old_drive)) {
                        vmconfig_register_unused_drive($storecfg, $vmid, $conf, 
$old_drive);
                    }
                } else {
                    my $path = get_iso_path($storecfg, $vmid, $drive->{file});
-                   vm_mon_cmd($vmid, "eject", force => JSON::true,device => 
"drive-$opt"); # force eject if locked
-                   vm_mon_cmd($vmid, "change", device => "drive-$opt",target 
=> "$path") if $path;
+                   mon_cmd($vmid, "eject", force => JSON::true,device => 
"drive-$opt"); # force eject if locked
+                   mon_cmd($vmid, "change", device => "drive-$opt",target => 
"$path") if $path;
                }
 
                return 1;
@@ -5413,7 +5413,7 @@ sub vm_start {
        print "migration listens on $migrate_uri\n" if $migrate_uri;
 
        if ($statefile && $statefile ne 'tcp' && $statefile ne 'unix')  {
-           eval { vm_mon_cmd_nocheck($vmid, "cont"); };
+           eval { mon_cmd($vmid, "cont"); };
            warn $@ if $@;
        }
 
@@ -5425,13 +5425,13 @@ sub vm_start {
            my $pfamily = PVE::Tools::get_host_address_family($nodename);
            my $storage_migrate_port = PVE::Tools::next_migrate_port($pfamily);
 
-           vm_mon_cmd_nocheck($vmid, "nbd-server-start", addr => { type => 
'inet', data => { host => "${localip}", port => "${storage_migrate_port}" } } );
+           mon_cmd($vmid, "nbd-server-start", addr => { type => 'inet', data 
=> { host => "${localip}", port => "${storage_migrate_port}" } } );
 
            $localip = "[$localip]" if Net::IP::ip_is_ipv6($localip);
 
            foreach my $opt (sort keys %$local_volumes) {
                my $volid = $local_volumes->{$opt};
-               vm_mon_cmd_nocheck($vmid, "nbd-server-add", device => 
"drive-$opt", writable => JSON::true );
+               mon_cmd($vmid, "nbd-server-add", device => "drive-$opt", 
writable => JSON::true );
                my $migrate_storage_uri = 
"nbd:${localip}:${storage_migrate_port}:exportname=drive-$opt";
                print "storage migration listens on $migrate_storage_uri 
volume:$volid\n";
            }
@@ -5446,13 +5446,13 @@ sub vm_start {
            if ($spice_port) {
                print "spice listens on port $spice_port\n";
                if ($spice_ticket) {
-                   vm_mon_cmd_nocheck($vmid, "set_password", protocol => 
'spice', password => $spice_ticket);
-                   vm_mon_cmd_nocheck($vmid, "expire_password", protocol => 
'spice', time => "+30");
+                   mon_cmd($vmid, "set_password", protocol => 'spice', 
password => $spice_ticket);
+                   mon_cmd($vmid, "expire_password", protocol => 'spice', time 
=> "+30");
                }
            }
 
        } else {
-           vm_mon_cmd_nocheck($vmid, "balloon", value => 
$conf->{balloon}*1024*1024)
+           mon_cmd($vmid, "balloon", value => $conf->{balloon}*1024*1024)
                if !$statefile && $conf->{balloon};
 
            foreach my $opt (keys %$conf) {
@@ -5462,7 +5462,7 @@ sub vm_start {
            }
        }
 
-       vm_mon_cmd_nocheck($vmid, 'qom-set',
+       mon_cmd($vmid, 'qom-set',
                    path => "machine/peripheral/balloon0",
                    property => "guest-stats-polling-interval",
                    value => 2) if (!defined($conf->{balloon}) || 
$conf->{balloon});
@@ -5479,60 +5479,6 @@ sub vm_start {
     });
 }
 
-sub vm_mon_cmd {
-    my ($vmid, $execute, %params) = @_;
-
-    my $cmd = { execute => $execute, arguments => \%params };
-    vm_qmp_command($vmid, $cmd);
-}
-
-sub vm_mon_cmd_nocheck {
-    my ($vmid, $execute, %params) = @_;
-
-    my $cmd = { execute => $execute, arguments => \%params };
-    vm_qmp_command($vmid, $cmd, 1);
-}
-
-sub vm_qmp_command {
-    my ($vmid, $cmd, $nocheck) = @_;
-
-    my $res;
-
-    my $timeout;
-    if ($cmd->{arguments}) {
-       $timeout = delete $cmd->{arguments}->{timeout};
-    }
-
-    eval {
-       die "VM $vmid not running\n" if !check_running($vmid, $nocheck);
-       my $sname = PVE::QemuServer::Helpers::qmp_socket($vmid);
-       if (-e $sname) { # test if VM is reasonambe new and supports qmp/qga
-           my $qmpclient = PVE::QMPClient->new();
-
-           $res = $qmpclient->cmd($vmid, $cmd, $timeout);
-       } else {
-           die "unable to open monitor socket\n";
-       }
-    };
-    if (my $err = $@) {
-       syslog("err", "VM $vmid qmp command failed - $err");
-       die $err;
-    }
-
-    return $res;
-}
-
-sub vm_human_monitor_command {
-    my ($vmid, $cmdline) = @_;
-
-    my $cmd = {
-       execute => 'human-monitor-command',
-       arguments => { 'command-line' => $cmdline},
-    };
-
-    return vm_qmp_command($vmid, $cmd);
-}
-
 sub vm_commandline {
     my ($storecfg, $vmid, $snapname) = @_;
 
@@ -5563,7 +5509,7 @@ sub vm_reset {
 
        PVE::QemuConfig->check_lock($conf) if !$skiplock;
 
-       vm_mon_cmd($vmid, "system_reset");
+       mon_cmd($vmid, "system_reset");
     });
 }
 
@@ -5646,15 +5592,12 @@ sub _do_vm_stop {
     eval {
        if ($shutdown) {
            if (defined($conf) && parse_guest_agent($conf)->{enabled}) {
-               vm_qmp_command($vmid, {
-                       execute => "guest-shutdown",
-                       arguments => { timeout => $timeout }
-                   }, $nocheck);
+               mon_cmd($vmid, "guest-shutdown", timeout => $timeout);
            } else {
-               vm_qmp_command($vmid, { execute => "system_powerdown" }, 
$nocheck);
+               mon_cmd($vmid, "system_powerdown");
            }
        } else {
-           vm_qmp_command($vmid, { execute => "quit" }, $nocheck);
+           mon_cmd($vmid, "quit");
        }
     };
     my $err = $@;
@@ -5770,7 +5713,7 @@ sub vm_suspend {
            $path = PVE::Storage::path($storecfg, $vmstate);
            PVE::QemuConfig->write_config($vmid, $conf);
        } else {
-           vm_mon_cmd($vmid, "stop");
+           mon_cmd($vmid, "stop");
        }
     });
 
@@ -5779,9 +5722,9 @@ sub vm_suspend {
        PVE::Storage::activate_volumes($storecfg, [$vmstate]);
 
        eval {
-           vm_mon_cmd($vmid, "savevm-start", statefile => $path);
+           mon_cmd($vmid, "savevm-start", statefile => $path);
            for(;;) {
-               my $state = vm_mon_cmd_nocheck($vmid, "query-savevm");
+               my $state = mon_cmd($vmid, "query-savevm");
                if (!$state->{status}) {
                    die "savevm not active\n";
                } elsif ($state->{status} eq 'active') {
@@ -5804,7 +5747,7 @@ sub vm_suspend {
            if ($err) {
                # cleanup, but leave suspending lock, to indicate something 
went wrong
                eval {
-                   vm_mon_cmd($vmid, "savevm-end");
+                   mon_cmd($vmid, "savevm-end");
                    PVE::Storage::deactivate_volumes($storecfg, [$vmstate]);
                    PVE::Storage::vdisk_free($storecfg, $vmstate);
                    delete $conf->@{qw(vmstate runningmachine)};
@@ -5817,7 +5760,7 @@ sub vm_suspend {
            die "lock changed unexpectedly\n"
                if !PVE::QemuConfig->has_lock($conf, 'suspending');
 
-           vm_qmp_command($vmid, { execute => "quit" });
+           mon_cmd($vmid, "quit");
            $conf->{lock} = 'suspended';
            PVE::QemuConfig->write_config($vmid, $conf);
        });
@@ -5828,8 +5771,7 @@ sub vm_resume {
     my ($vmid, $skiplock, $nocheck) = @_;
 
     PVE::QemuConfig->lock_config($vmid, sub {
-       my $vm_mon_cmd = $nocheck ? \&vm_mon_cmd_nocheck : \&vm_mon_cmd;
-       my $res = $vm_mon_cmd->($vmid, 'query-status');
+       my $res = mon_cmd($vmid, 'query-status');
        my $resume_cmd = 'cont';
 
        if ($res->{status} && $res->{status} eq 'suspended') {
@@ -5844,7 +5786,7 @@ sub vm_resume {
                if !($skiplock || PVE::QemuConfig->has_lock($conf, 'backup'));
        }
 
-       $vm_mon_cmd->($vmid, $resume_cmd);
+       mon_cmd($vmid, $resume_cmd);
     });
 }
 
@@ -5856,7 +5798,7 @@ sub vm_sendkey {
        my $conf = PVE::QemuConfig->load_config($vmid);
 
        # there is no qmp command, so we use the human monitor command
-       my $res = vm_human_monitor_command($vmid, "sendkey $key");
+       my $res = PVE::QemuServer::Monitor::hmp_cmd($vmid, "sendkey $key");
        die $res if $res ne '';
     });
 }
@@ -6670,7 +6612,7 @@ sub do_snapshots_with_qemu {
 sub qga_check_running {
     my ($vmid, $nowarn) = @_;
 
-    eval { vm_mon_cmd($vmid, "guest-ping", timeout => 3); };
+    eval { mon_cmd($vmid, "guest-ping", timeout => 3); };
     if ($@) {
        warn "Qemu Guest Agent is not running - $@" if !$nowarn;
        return 0;
@@ -6842,7 +6784,7 @@ sub qemu_drive_mirror {
     }
 
     # if a job already runs for this device we get an error, catch it for 
cleanup
-    eval { vm_mon_cmd($vmid, "drive-mirror", %$opts); };
+    eval { mon_cmd($vmid, "drive-mirror", %$opts); };
     if (my $err = $@) {
        eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) };
        warn "$@\n" if $@;
@@ -6861,7 +6803,7 @@ sub qemu_drive_mirror_monitor {
        while (1) {
            die "storage migration timed out\n" if $err_complete > 300;
 
-           my $stats = vm_mon_cmd($vmid, "query-block-jobs");
+           my $stats = mon_cmd($vmid, "query-block-jobs");
 
            my $running_mirror_jobs = {};
            foreach my $stat (@$stats) {
@@ -6904,7 +6846,7 @@ sub qemu_drive_mirror_monitor {
                    my $agent_running = $qga && qga_check_running($vmid);
                    if ($agent_running) {
                        print "freeze filesystem\n";
-                       eval { PVE::QemuServer::vm_mon_cmd($vmid, 
"guest-fsfreeze-freeze"); };
+                       eval { mon_cmd($vmid, "guest-fsfreeze-freeze"); };
                    } else {
                        print "suspend vm\n";
                        eval { PVE::QemuServer::vm_suspend($vmid, 1); };
@@ -6915,7 +6857,7 @@ sub qemu_drive_mirror_monitor {
 
                    if ($agent_running) {
                        print "unfreeze filesystem\n";
-                       eval { PVE::QemuServer::vm_mon_cmd($vmid, 
"guest-fsfreeze-thaw"); };
+                       eval { mon_cmd($vmid, "guest-fsfreeze-thaw"); };
                    } else {
                        print "resume vm\n";
                        eval {  PVE::QemuServer::vm_resume($vmid, 1, 1); };
@@ -6928,7 +6870,7 @@ sub qemu_drive_mirror_monitor {
                        # try to switch the disk if source and destination are 
on the same guest
                        print "$job: Completing block job...\n";
 
-                       eval { vm_mon_cmd($vmid, "block-job-complete", device 
=> $job) };
+                       eval { mon_cmd($vmid, "block-job-complete", device => 
$job) };
                        if ($@ =~ m/cannot be completed/) {
                            print "$job: Block job cannot be completed, try 
again.\n";
                            $err_complete++;
@@ -6956,12 +6898,12 @@ sub qemu_blockjobs_cancel {
 
     foreach my $job (keys %$jobs) {
        print "$job: Cancelling block job\n";
-       eval { vm_mon_cmd($vmid, "block-job-cancel", device => $job); };
+       eval { mon_cmd($vmid, "block-job-cancel", device => $job); };
        $jobs->{$job}->{cancel} = 1;
     }
 
     while (1) {
-       my $stats = vm_mon_cmd($vmid, "query-block-jobs");
+       my $stats = mon_cmd($vmid, "query-block-jobs");
 
        my $running_jobs = {};
        foreach my $stat (@$stats) {
@@ -7037,8 +6979,7 @@ sub clone_disk {
 sub get_current_qemu_machine {
     my ($vmid) = @_;
 
-    my $cmd = { execute => 'query-machines', arguments => {} };
-    my $res = vm_qmp_command($vmid, $cmd);
+    my $res = mon_cmd($vmid, "query-machines");
 
     my ($current, $default);
     foreach my $e (@$res) {
@@ -7052,8 +6993,7 @@ sub get_current_qemu_machine {
 
 sub get_running_qemu_version {
     my ($vmid) = @_;
-    my $cmd = { execute => 'query-version', arguments => {} };
-    my $res = vm_qmp_command($vmid, $cmd);
+    my $res = mon_cmd($vmid, "query-version");
     return "$res->{qemu}->{major}.$res->{qemu}->{minor}";
 }
 
@@ -7104,7 +7044,7 @@ sub version_cmp {
 sub runs_at_least_qemu_version {
     my ($vmid, $major, $minor, $extra) = @_;
 
-    my $v = vm_qmp_command($vmid, { execute => 'query-version' });
+    my $v = mon_cmd($vmid, "query-version");
     die "could not query currently running version for VM $vmid\n" if 
!defined($v);
     $v = $v->{qemu};
 
@@ -7164,7 +7104,7 @@ sub create_efidisk($$$$$) {
 sub vm_iothreads_list {
     my ($vmid) = @_;
 
-    my $res = vm_mon_cmd($vmid, 'query-iothreads');
+    my $res = mon_cmd($vmid, 'query-iothreads');
 
     my $iothreads = {};
     foreach my $iothread (@$res) {
@@ -7297,7 +7237,7 @@ sub generate_smbios1_uuid {
 sub nbd_stop {
     my ($vmid) = @_;
 
-    vm_mon_cmd($vmid, 'nbd-server-stop');
+    mon_cmd($vmid, 'nbd-server-stop');
 }
 
 sub create_reboot_request {
diff --git a/PVE/QemuServer/Agent.pm b/PVE/QemuServer/Agent.pm
index 586ac3a..9fec4fb 100644
--- a/PVE/QemuServer/Agent.pm
+++ b/PVE/QemuServer/Agent.pm
@@ -4,6 +4,7 @@ use strict;
 use warnings;
 
 use PVE::QemuServer;
+use PVE::QemuServer::Monitor;
 use MIME::Base64 qw(decode_base64);
 use JSON;
 use base 'Exporter';
@@ -59,7 +60,7 @@ sub agent_cmd {
     my $conf = PVE::QemuConfig->load_config($vmid); # also checks if VM exists
     agent_available($vmid, $conf, $noerr);
 
-    my $res = PVE::QemuServer::vm_mon_cmd($vmid, "guest-$cmd", %$params);
+    my $res = PVE::QemuServer::Monitor::mon_cmd($vmid, "guest-$cmd", %$params);
     check_agent_error($res, $errormsg, $noerr);
 
     return $res;
diff --git a/PVE/QemuServer/Makefile b/PVE/QemuServer/Makefile
index 56d1493..02b0209 100644
--- a/PVE/QemuServer/Makefile
+++ b/PVE/QemuServer/Makefile
@@ -6,6 +6,7 @@ SOURCES=PCI.pm          \
        Cloudinit.pm    \
        Agent.pm        \
        Helpers.pm      \
+       Monitor.pm      \
 
 .PHONY: install
 install: ${SOURCES}
diff --git a/PVE/QemuServer/Memory.pm b/PVE/QemuServer/Memory.pm
index f52f1d5..d500b3b 100644
--- a/PVE/QemuServer/Memory.pm
+++ b/PVE/QemuServer/Memory.pm
@@ -7,6 +7,7 @@ use PVE::Tools qw(run_command lock_file lock_file_full 
file_read_firstline dir_g
 use PVE::Exception qw(raise raise_param_exc);
 
 use PVE::QemuServer;
+use PVE::QemuServer::Monitor qw(mon_cmd);
 
 my $MAX_NUMA = 8;
 my $MAX_MEM = 4194304;
@@ -141,7 +142,7 @@ sub qemu_memory_hotplug {
                        my $hugepages_host_topology = hugepages_host_topology();
                        hugepages_allocate($hugepages_topology, 
$hugepages_host_topology);
 
-                       eval { PVE::QemuServer::vm_mon_cmd($vmid, "object-add", 
'qom-type' => "memory-backend-file", id => "mem-$name", props => {
+                       eval { mon_cmd($vmid, "object-add", 'qom-type' => 
"memory-backend-file", id => "mem-$name", props => {
                                             size => int($dimm_size*1024*1024), 
'mem-path' => $path, share => JSON::true, prealloc => JSON::true } ); };
                        if (my $err = $@) {
                            hugepages_reset($hugepages_host_topology);
@@ -153,7 +154,7 @@ sub qemu_memory_hotplug {
                    eval { hugepages_update_locked($code); };
 
                } else {
-                   eval { PVE::QemuServer::vm_mon_cmd($vmid, "object-add", 
'qom-type' => "memory-backend-ram", id => "mem-$name", props => { size => 
int($dimm_size*1024*1024) } ) };
+                   eval { mon_cmd($vmid, "object-add", 'qom-type' => 
"memory-backend-ram", id => "mem-$name", props => { size => 
int($dimm_size*1024*1024) } ) };
                }
 
                if (my $err = $@) {
@@ -161,7 +162,7 @@ sub qemu_memory_hotplug {
                    die $err;
                }
 
-               eval { PVE::QemuServer::vm_mon_cmd($vmid, "device_add", driver 
=> "pc-dimm", id => "$name", memdev => "mem-$name", node => $numanode) };
+               eval { mon_cmd($vmid, "device_add", driver => "pc-dimm", id => 
"$name", memdev => "mem-$name", node => $numanode) };
                if (my $err = $@) {
                    eval { PVE::QemuServer::qemu_objectdel($vmid, "mem-$name"); 
};
                    die $err;
@@ -201,7 +202,7 @@ sub qemu_memory_hotplug {
 sub qemu_dimm_list {
     my ($vmid) = @_;
 
-    my $dimmarray = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 
"query-memory-devices");
+    my $dimmarray = mon_cmd($vmid, "query-memory-devices");
     my $dimms = {};
 
     foreach my $dimm (@$dimmarray) {
diff --git a/PVE/VZDump/QemuServer.pm b/PVE/VZDump/QemuServer.pm
index e02a069..4322cb8 100644
--- a/PVE/VZDump/QemuServer.pm
+++ b/PVE/VZDump/QemuServer.pm
@@ -18,6 +18,7 @@ use PVE::Tools;
 use PVE::VZDump;
 
 use PVE::QemuServer;
+use PVE::QemuServer::Monitor qw(mon_cmd);
 
 use base qw (PVE::VZDump::Plugin);
 
@@ -417,7 +418,7 @@ sub archive {
        }
 
        if ($agent_running){
-           eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fsfreeze-freeze"); 
};
+           eval { mon_cmd($vmid, "guest-fsfreeze-freeze"); };
            if (my $err = $@) {
                $self->logerr($err);
            }
@@ -427,7 +428,7 @@ sub archive {
        my $qmperr = $@;
 
        if ($agent_running){
-           eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fsfreeze-thaw"); };
+           eval { mon_cmd($vmid, "guest-fsfreeze-thaw"); };
            if (my $err = $@) {
                $self->logerr($err);
            }
@@ -452,7 +453,7 @@ sub archive {
            } else {
                $self->loginfo("resuming VM again");
            }
-           PVE::QemuServer::vm_mon_cmd($vmid, 'cont');
+           mon_cmd($vmid, 'cont');
        }
 
        my $status;
@@ -465,7 +466,7 @@ sub archive {
        my $transferred;
 
        while(1) {
-           $status = PVE::QemuServer::vm_mon_cmd($vmid, 'query-backup');
+           $status = mon_cmd($vmid, 'query-backup');
            my $total = $status->{total} || 0;
            $transferred = $status->{transferred} || 0;
            my $per = $total ? int(($transferred * 100)/$total) : 0;
@@ -524,7 +525,7 @@ sub archive {
     if ($err) {
        $self->logerr($err);
        $self->loginfo("aborting backup job");
-       eval { PVE::QemuServer::vm_mon_cmd($vmid, 'backup-cancel'); };
+       eval { mon_cmd($vmid, 'backup-cancel'); };
        if (my $err1 = $@) {
            $self->logerr($err1);
        }
@@ -533,7 +534,7 @@ sub archive {
     if ($stop_after_backup) {
        # stop if not running
        eval {
-           my $resp = PVE::QemuServer::vm_mon_cmd($vmid, 'query-status');
+           my $resp = mon_cmd($vmid, 'query-status');
            my $status = $resp && $resp->{status} ?  $resp->{status} : 
'unknown';
            if ($status eq 'prelaunch') {
                $self->loginfo("stopping kvm after backup task");
diff --git a/test/snapshot-test.pm b/test/snapshot-test.pm
index a76b4fd..685934c 100644
--- a/test/snapshot-test.pm
+++ b/test/snapshot-test.pm
@@ -312,13 +312,9 @@ sub vm_running_locally {
 
 # END mocked PVE::QemuServer::Helpers methods
 
-# BEGIN redefine PVE::QemuServer methods
+# BEGIN mocked PVE::QemuServer::Monitor methods
 
-sub do_snapshots_with_qemu {
-    return 0;
-}
-
-sub vm_qmp_command {
+sub qmp_cmd {
     my ($vmid, $cmd, $nocheck) = @_;
 
     my $exec = $cmd->{execute};
@@ -351,6 +347,14 @@ sub vm_qmp_command {
     die "unexpected vm_qmp_command!\n";
 }
 
+# END mocked PVE::QemuServer::Monitor methods
+
+# BEGIN redefine PVE::QemuServer methods
+
+sub do_snapshots_with_qemu {
+    return 0;
+}
+
 sub vm_start {
     my ($storecfg, $vmid, $statefile, $skiplock, $migratedfrom, $paused, 
$forcemachine) = @_;
 
@@ -380,6 +384,9 @@ PVE::Tools::run_command("cp -a snapshot-input 
snapshot-working");
 my $qemu_helpers_module = new Test::MockModule('PVE::QemuServer::Helpers');
 $qemu_helpers_module->mock('vm_running_locally', \&vm_running_locally);
 
+my $qemu_monitor_module = new Test::MockModule('PVE::QemuServer::Monitor');
+$qemu_monitor_module->mock('qmp_cmd', \&qmp_cmd);
+
 my $qemu_config_module = new Test::MockModule('PVE::QemuConfig');
 $qemu_config_module->mock('config_file_lock', \&config_file_lock);
 $qemu_config_module->mock('cfs_config_path', \&cfs_config_path);
-- 
2.20.1


_______________________________________________
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

Reply via email to