applied, with a minor fixup on top - remove the now unnecessary
part of the volid regex, keeping only /^nbd:/

On 2/15/18 1:43 PM, Alexandre Derumier wrote:
> socat tunnel for nbd mirror was introduce here
> https://pve.proxmox.com/pipermail/pve-devel/2017-January/024777.html
> to workaround when nbd client was hanging on non responding nbd server.
> 
> We have added a 30s timeout on socat tunnel, but when we migrate
> multiple disks, it can break migration if for example first disk
> is already finished and don't send any new datas in the tunnel.
> 
> The connect timeout bug has been fixed in qemu 2.9,
> so we can remove the socat tunnel now.
> ---
>  PVE/QemuServer.pm | 46 +---------------------------------------------
>  1 file changed, 1 insertion(+), 45 deletions(-)
> 
> diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
> index 20d6682..b134be0 100644
> --- a/PVE/QemuServer.pm
> +++ b/PVE/QemuServer.pm
> @@ -6052,31 +6052,8 @@ sub qemu_drive_mirror {
>      $jobs->{"drive-$drive"} = {};
>  
>      if ($dst_volid =~ 
> /^nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+)/) {
> -     my $server = $1;
> -     my $port = $2;
> -     my $exportname = $3;
> -
> +     $qemu_target = $dst_volid;
>       $format = "nbd";
> -     my $unixsocket = "/run/qemu-server/$vmid.mirror-drive-$drive";
> -     $qemu_target = "nbd+unix:///$exportname?socket=$unixsocket";
> -     my $cmd = ['socat', '-T30', "UNIX-LISTEN:$unixsocket,fork", 
> "TCP:$server:$2,connect-timeout=5"];
> -
> -     my $pid = fork();
> -     if (!defined($pid)) {
> -         die "forking socat tunnel failed\n";
> -     } elsif ($pid == 0) {
> -         exec(@$cmd);
> -         warn "exec failed: $!\n";
> -         POSIX::_exit(-1);
> -     }
> -     $jobs->{"drive-$drive"}->{pid} = $pid;
> -
> -     my $timeout = 0;
> -     while (!-S $unixsocket) {
> -         die "nbd connection helper timed out\n"
> -             if $timeout++ > 5;
> -         sleep 1;
> -     }
>      } else {
>       my $storecfg = PVE::Storage::config();
>       my ($dst_storeid, $dst_volname) = 
> PVE::Storage::parse_volume_id($dst_volid);
> @@ -6188,7 +6165,6 @@ sub qemu_drive_mirror_monitor {
>                       }else {
>                           print "$job: Completed successfully.\n";
>                           $jobs->{$job}->{complete} = 1;
> -                         eval { qemu_blockjobs_finish_tunnel($vmid, $job, 
> $jobs->{$job}->{pid}) } ;
>                       }
>                   }
>               }
> @@ -6226,7 +6202,6 @@ sub qemu_blockjobs_cancel {
>  
>           if (defined($jobs->{$job}->{cancel}) && 
> !defined($running_jobs->{$job})) {
>               print "$job: Done.\n";
> -             eval { qemu_blockjobs_finish_tunnel($vmid, $job, 
> $jobs->{$job}->{pid}) } ;
>               delete $jobs->{$job};
>           }
>       }
> @@ -6237,25 +6212,6 @@ sub qemu_blockjobs_cancel {
>      }
>  }
>  
> -sub qemu_blockjobs_finish_tunnel {
> -   my ($vmid, $job, $cpid) = @_;
> -
> -   return if !$cpid;
> -
> -   for (my $i = 1; $i < 20; $i++) {
> -     my $waitpid = waitpid($cpid, WNOHANG);
> -     last if (defined($waitpid) && ($waitpid == $cpid));
> - 
> -     if ($i == 10) {
> -         kill(15, $cpid);
> -     } elsif ($i >= 15) {
> -         kill(9, $cpid);
> -     }
> -     sleep (1);
> -    }
> -    unlink "/run/qemu-server/$vmid.mirror-$job";
> -}
> -
>  sub clone_disk {
>      my ($storecfg, $vmid, $running, $drivename, $drive, $snapname,
>       $newvmid, $storage, $format, $full, $newvollist, $jobs, $skipcomplete, 
> $qga) = @_;
> 


_______________________________________________
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

Reply via email to