--- src/PVE/API2/LXC.pm | 212 ++++++++++++++++++++++++++++------------------------ 1 file changed, 113 insertions(+), 99 deletions(-)
diff --git a/src/PVE/API2/LXC.pm b/src/PVE/API2/LXC.pm index 0397224..2d84f0b 100644 --- a/src/PVE/API2/LXC.pm +++ b/src/PVE/API2/LXC.pm @@ -1191,6 +1191,9 @@ __PACKAGE__->register_method({ my $storage = extract_param($param, 'storage'); + die "Full clone requires a target storage.\n" + if $param->{full} && !$storage; + my $localnode = PVE::INotify::nodename(); my $storecfg = PVE::Storage::config(); @@ -1202,142 +1205,153 @@ __PACKAGE__->register_method({ PVE::Cluster::check_cfs_quorum(); - my $running = PVE::LXC::check_running($vmid) || 0; - - my $clonefn = sub { - - # do all tests after lock - # we also try to do all tests before we fork the worker - my $conf = PVE::LXC::Config->load_config($vmid); - - PVE::LXC::Config->check_lock($conf); + my $conffile; + my $newconf = {}; + my $mountpoints = {}; + my $fullclone = {}; + my $vollist = []; - my $verify_running = PVE::LXC::check_running($vmid) || 0; - - die "unexpected state change\n" if $verify_running != $running; + PVE::LXC::Config->lock_config($vmid, sub { + my $src_conf = PVE::LXC::Config->set_lock($vmid, 'disk'); + eval { + die "snapshot '$snapname' does not exist\n" + if $snapname && !defined($src_conf->{snapshots}->{$snapname}); - die "snapshot '$snapname' does not exist\n" - if $snapname && !defined( $conf->{snapshots}->{$snapname}); + my $running = PVE::LXC::check_running($vmid) || 0; - my $oldconf = $snapname ? $conf->{snapshots}->{$snapname} : $conf; + my $src_conf = $snapname ? $src_conf->{snapshots}->{$snapname} : $src_conf; - my $conffile = PVE::LXC::Config->config_file($newid); - die "unable to create CT $newid: config file already exists\n" - if -f $conffile; + $conffile = PVE::LXC::Config->config_file($newid); + die "unable to create CT $newid: config file already exists\n" + if -f $conffile; - my $newconf = { lock => 'clone' }; - my $mountpoints = {}; - my $fullclone = {}; - my $vollist = []; + foreach my $opt (keys %$src_conf) { + next if $opt =~ m/^unused\d+$/; - foreach my $opt (keys %$oldconf) { - my $value = $oldconf->{$opt}; + my $value = $src_conf->{$opt}; - # no need to copy unused images, because VMID(owner) changes anyways - next if $opt =~ m/^unused\d+$/; + if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) { + my $mp = $opt eq 'rootfs' ? + PVE::LXC::Config->parse_ct_rootfs($value) : + PVE::LXC::Config->parse_ct_mountpoint($value); - if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) { - my $mp = $opt eq 'rootfs' ? - PVE::LXC::Config->parse_ct_rootfs($value) : - PVE::LXC::Config->parse_ct_mountpoint($value); + if ($mp->{type} eq 'volume') { + my $volid = $mp->{volume}; + if ($param->{full}) { + die "Cannot do full clones on a running container without snapshots\n" + if $running && !defined($snapname); + $fullclone->{$opt} = 1; + } else { + # not full means clone instead of copy + die "Linked clone feature for '$volid' is not available\n" + if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running); + } - if ($mp->{type} eq 'volume') { - my $volid = $mp->{volume}; - if ($param->{full}) { - die "fixme: full clone not implemented"; + $mountpoints->{$opt} = $mp; + push @$vollist, $volid; - die "Full clone feature for '$volid' is not available\n" - if !PVE::Storage::volume_has_feature($storecfg, 'copy', $volid, $snapname, $running); - $fullclone->{$opt} = 1; } else { - # not full means clone instead of copy - die "Linked clone feature for '$volid' is not available\n" - if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running); + # TODO: allow bind mounts? + die "unable to clone mountpint '$opt' (type $mp->{type})\n"; } - - $mountpoints->{$opt} = $mp; - push @$vollist, $volid; - } else { - # TODO: allow bind mounts? - die "unable to clone mountpint '$opt' (type $mp->{type})\n"; + # copy everything else + $newconf->{$opt} = $value; } + } - } else { - # copy everything else - $newconf->{$opt} = $value; + # Replace the 'disk' lock with a 'create' lock. + $newconf->{lock} = 'create'; + + delete $newconf->{template}; + if ($param->{hostname}) { + $newconf->{hostname} = $param->{hostname}; } - } - delete $newconf->{template}; - if ($param->{hostname}) { - $newconf->{hostname} = $param->{hostname}; - } + if ($param->{description}) { + $newconf->{description} = $param->{description}; + } - if ($param->{description}) { - $newconf->{description} = $param->{description}; + # create empty/temp config - this fails if CT already exists on other node + PVE::LXC::Config->write_config($newid, $newconf); + }; + if (my $err = $@) { + eval { PVE::LXC::Config->remove_lock($vmid, 'disk') }; + warn $@ if $@; + die $err; } + }); - # create empty/temp config - this fails if CT already exists on other node - PVE::Tools::file_set_contents($conffile, "# ctclone temporary file\nlock: clone\n"); + my $update_conf = sub { + my ($key, $value) = @_; + return PVE::LXC::Config->lock_config($newid, sub { + my $conf = PVE::LXC::Config->load_config($newid); + die "Lost 'create' config lock, aborting.\n" + if !PVE::LXC::Config->has_lock($conf, 'create'); + $conf->{$key} = $value; + PVE::LXC::Config->write_config($newid, $conf); + }); + }; - my $realcmd = sub { - my $upid = shift; + my $realcmd = sub { + my ($upid) = @_; - my $newvollist = []; + my $newvollist = []; - eval { - local $SIG{INT} = - local $SIG{TERM} = - local $SIG{QUIT} = - local $SIG{HUP} = sub { die "interrupted by signal\n"; }; - - PVE::Storage::activate_volumes($storecfg, $vollist, $snapname); + eval { + local $SIG{INT} = + local $SIG{TERM} = + local $SIG{QUIT} = + local $SIG{HUP} = sub { die "interrupted by signal\n"; }; - foreach my $opt (keys %$mountpoints) { - my $mp = $mountpoints->{$opt}; - my $volid = $mp->{volume}; + PVE::Storage::activate_volumes($storecfg, $vollist, $snapname); - if ($fullclone->{$opt}) { - die "fixme: full clone not implemented\n"; - } else { - print "create linked clone of mount point $opt ($volid)\n"; - my $newvolid = PVE::Storage::vdisk_clone($storecfg, $volid, $newid, $snapname); - push @$newvollist, $newvolid; - $mp->{volume} = $newvolid; + foreach my $opt (keys %$mountpoints) { + my $mp = $mountpoints->{$opt}; + my $volid = $mp->{volume}; - $newconf->{$opt} = PVE::LXC::Config->print_ct_mountpoint($mp, $opt eq 'rootfs'); - PVE::LXC::Config->write_config($newid, $newconf); - } + my $newvolid; + if ($fullclone->{$opt}) { + print "create full clone of mountpoint $opt ($volid)\n"; + $newvolid = PVE::LXC::copy_volume($mp, $newid, $storage, $storecfg, $newconf, $snapname); + } else { + print "create linked clone of mount point $opt ($volid)\n"; + $newvolid = PVE::Storage::vdisk_clone($storecfg, $volid, $newid, $snapname); } - delete $newconf->{lock}; - PVE::LXC::Config->write_config($newid, $newconf); + push @$newvollist, $newvolid; + $mp->{volume} = $newvolid; - PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool; - }; - if (my $err = $@) { - unlink $conffile; - - sleep 1; # some storage like rbd need to wait before release volume - really? - - foreach my $volid (@$newvollist) { - eval { PVE::Storage::vdisk_free($storecfg, $volid); }; - warn $@ if $@; - } - die "clone failed: $err"; + $update_conf->($opt, PVE::LXC::Config->print_ct_mountpoint($mp, $opt eq 'rootfs')); } - return; + PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool; + PVE::LXC::Config->remove_lock($newid, 'create'); }; + my $err = $@; - PVE::Firewall::clone_vmfw_conf($vmid, $newid); + # Unlock the source config in any case: + eval { PVE::LXC::Config->remove_lock($vmid, 'disk') }; + warn $@ if $@; - return $rpcenv->fork_worker('vzclone', $vmid, $authuser, $realcmd); + if ($err) { + # Now cleanup the config & disks: + unlink $conffile; + sleep 1; # some storages like rbd need to wait before release volume - really? + + foreach my $volid (@$newvollist) { + eval { PVE::Storage::vdisk_free($storecfg, $volid); }; + warn $@ if $@; + } + die "clone failed: $err"; + } + + return; }; - return PVE::LXC::Config->lock_config($vmid, $clonefn); + PVE::Firewall::clone_vmfw_conf($vmid, $newid); + return $rpcenv->fork_worker('vzclone', $vmid, $authuser, $realcmd); }}); -- 2.11.0 _______________________________________________ pve-devel mailing list pve-devel@pve.proxmox.com https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel