[pve-devel] [PATCH v2 2/3] remove unnecessary eslint.js

2021-07-19 Thread Dominik Csapak
Signed-off-by: Dominik Csapak 
---
 src/eslint.js | 138871 ---
 1 file changed, 138871 deletions(-)
 delete mode 100755 src/eslint.js

diff --git a/src/eslint.js b/src/eslint.js
deleted file mode 100755
index 6115dea..000
-- 
2.30.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v2 3/3] use worker_threads for linting

2021-07-19 Thread Dominik Csapak
instead linting all files in the main thread, use worker threads
for that (4 by default) and add the '-t' switch to able to control that

a basic benchmark of eslint of pve-manager showed some performance
gains:

Benchmark #1: Current
  Time (mean ± σ):  6.468 s ±  0.116 s[User: 9.803 s, System: 0.333 s]
  Range (min … max):6.264 s …  6.647 s10 runs

Benchmark #2: 2Threads
  Time (mean ± σ):  4.509 s ±  0.106 s[User: 12.706 s, System: 0.530 s]
  Range (min … max):4.335 s …  4.674 s10 runs

Benchmark #3: 4Threads
  Time (mean ± σ):  3.471 s ±  0.033 s[User: 16.390 s, System: 0.630 s]
  Range (min … max):3.431 s …  3.542 s10 runs

Benchmark #4: 8Threads
  Time (mean ± σ):  2.880 s ±  0.044 s[User: 22.454 s, System: 0.938 s]
  Range (min … max):2.813 s …  2.964 s10 runs

Summary
  '8Threads' ran
1.21 ± 0.02 times faster than '4Threads'
1.57 ± 0.04 times faster than '2Threads'
2.25 ± 0.05 times faster than 'Current'

after 8 threads, there were no real performance benefits since the
overhead to load the module seems to be the biggest factor.

Signed-off-by: Dominik Csapak 
---
 src/bin/app.js| 35 +--
 src/index.js  |  2 ++
 src/lib/worker.js | 27 +++
 src/package.json  |  3 ++-
 4 files changed, 60 insertions(+), 7 deletions(-)
 create mode 100644 src/lib/worker.js

diff --git a/src/bin/app.js b/src/bin/app.js
index 8a28923..10e7e6a 100644
--- a/src/bin/app.js
+++ b/src/bin/app.js
@@ -6,6 +6,7 @@
 const path = require('path');
 const color = require('colors');
 const program = require('commander');
+const worker = require('worker_threads');
 const eslint = require('pve-eslint');
 
 program
@@ -14,6 +15,7 @@ program
 .option('-e, --extend ', 'uses  ontop of default 
eslint config.')
 .option('-f, --fix', 'if set, fixes will be applied.')
 .option('-s, --strict', 'if set, also exit uncleanly on warnings')
+.option('-t, --threads ', 'how many worker_threads should be used 
(default=4)')
 .option('--output-config', 'if set, only output the config as JSON and 
exit.')
 ;
 
@@ -42,6 +44,11 @@ if (!paths.length) {
 paths = [process.cwd()];
 }
 
+let threadCount = 4;
+if (program.threads) {
+threadCount = program.threads;
+}
+
 const defaultConfig = {
 parserOptions: {
ecmaVersion: 2020,
@@ -283,20 +290,36 @@ if (program.outputConfig) {
 process.exit(0);
 }
 
-const cli = new eslint.CLIEngine({
+const cliOptions = {
 baseConfig: config,
 useEslintrc: true,
 fix: !!program.fix,
 cwd: process.cwd(),
-});
+};
+
+let promises = [];
+let filesPerThread = Math.round(paths.length / threadCount);
+for (let i = 0; i < (threadCount - 1); i++) {
+let files = paths.splice(0, filesPerThread);
+promises.push(eslint.createWorker({
+   cliOptions,
+   files
+}));
+}
+
+// the remaining paths
+promises.push(eslint.createWorker({
+cliOptions,
+files: paths
+}));
 
-const report = cli.executeOnFiles(paths);
+let results = (await Promise.all(promises)).map(res => res.results).flat(1);
 
 let exitcode = 0;
 let files_err = [], files_warn = [], files_ok = [];
 let fixes = 0;
 console.log('');
-report.results.forEach(function(result) {
+results.forEach(function(result) {
 let filename = path.relative(process.cwd(), result.filePath);
 let msgs = result.messages;
 let max_sev = 0;
@@ -348,7 +371,7 @@ report.results.forEach(function(result) {
 
console.log('');
 });
 
-if (report.results.length > 1) {
+if (results.length > 1) {
 console.log(`${color.bold(files_ok.length + files_err.length)} files:`);
 if (files_err.length > 0) {
console.log(color.red(` ${color.bold(files_err.length)} files have 
Errors`));
@@ -367,7 +390,7 @@ 
console.log('');
 if (program.fix) {
 if (fixes > 0) {
console.log(`Writing ${color.bold(fixes)} fixed files...`);
-   eslint.CLIEngine.outputFixes(report);
+   eslint.CLIEngine.outputFixes({ results });
console.log('Done');
 } else {
console.log("No fixable Errors/Warnings found.");
diff --git a/src/index.js b/src/index.js
index 01b9a1d..311ae38 100644
--- a/src/index.js
+++ b/src/index.js
@@ -1,3 +1,5 @@
 const eslint = require('./lib/eslint.js');
+const createWorker = require('./lib/worker.js');
 
 module.exports = eslint;
+module.exports.createWorker = createWorker;
diff --git a/src/lib/worker.js b/src/lib/worker.js
new file mode 100644
index 000..9a8c955
--- /dev/null
+++ b/src/lib/worker.js
@@ -0,0 +1,27 @@
+'use strict';
+
+const worker = require('worker_threads');
+
+if (!worker.isMainThread) {
+const eslint = require('pve-eslint');
+const data = worker.workerData;
+const cli = new eslint.CLIEngine(data.cliOptions);
+c

[pve-devel] [PATCH v2 1/3] ship proper nodejs module 'pve-eslint'

2021-07-19 Thread Dominik Csapak
instead of concatenating the eslint module into our app.js, ship
a 'pve-eslint' module that exports the built eslint module

to do this, we have to leave the module type on 'umd' instead of
changing to 'var' so that nodejs can properly import it.

Signed-off-by: Dominik Csapak 
---
 Makefile|  2 +-
 debian/control  |  7 +--
 debian/dirs |  1 +
 debian/links|  1 +
 debian/rules|  5 -
 patches/0001-adapt-webpack-config.patch | 19 +--
 src/Makefile| 15 ---
 src/{ => bin}/app.js|  5 -
 src/index.js|  3 +++
 src/package.json|  9 +
 10 files changed, 33 insertions(+), 34 deletions(-)
 create mode 100644 debian/dirs
 create mode 100644 debian/links
 delete mode 100644 src/Makefile
 rename src/{ => bin}/app.js (99%)
 create mode 100644 src/index.js
 create mode 100644 src/package.json

diff --git a/Makefile b/Makefile
index 9dbe3d0..2ac 100644
--- a/Makefile
+++ b/Makefile
@@ -49,7 +49,7 @@ download:
 # NOTE: needs npm installed, downloads packages from npm
 .PHONY: buildupstream
 buildupstream: ${BUILDSRC}
-   cp ${BUILDSRC}/build/eslint.js ${SRCDIR}/eslint.js
+   cp ${BUILDSRC}/build/eslint.js ${SRCDIR}/lib/eslint.js
 
 ${BUILDSRC}: ${UPSTREAM} patches
rm -rf $@
diff --git a/debian/control b/debian/control
index 3f9b014..7ea3664 100644
--- a/debian/control
+++ b/debian/control
@@ -2,13 +2,16 @@ Source: pve-eslint
 Section: devel
 Priority: optional
 Maintainer: Proxmox Support Team 
-Build-Depends: debhelper (>= 12~)
+Build-Depends: debhelper (>= 12~),
+   nodejs,
+   pkg-js-tools (>= 0.8.11)
 Standards-Version: 4.3.0
 Homepage: http://www.proxmox.com
 
 Package: pve-eslint
 Architecture: all
-Depends: node-commander, node-colors, nodejs, ${misc:Depends},
+Depends: node-commander, node-colors, nodejs (>= ${nodejs:Version}), 
${misc:Depends},
+Provides: ${nodejs:Provides}
 Description: ESLint for Proxmox Virtual Environment development
  This package contains a version of eslint used to develop the
  Proxmox Virtual Environment, and other Proxmox projects, web GUI.
diff --git a/debian/dirs b/debian/dirs
new file mode 100644
index 000..e772481
--- /dev/null
+++ b/debian/dirs
@@ -0,0 +1 @@
+usr/bin
diff --git a/debian/links b/debian/links
new file mode 100644
index 000..99342ed
--- /dev/null
+++ b/debian/links
@@ -0,0 +1 @@
+usr/share/nodejs/pve-eslint/bin/app.js usr/bin/eslint
diff --git a/debian/rules b/debian/rules
index 2d33f6a..b4c4090 100755
--- a/debian/rules
+++ b/debian/rules
@@ -1,4 +1,7 @@
 #!/usr/bin/make -f
 
 %:
-   dh $@
+   dh $@ --with nodejs
+
+execute_after_dh_fixperms:
+   chmod --recursive a+x -- debian/*/usr/share/nodejs/pve-eslint/bin/*
diff --git a/patches/0001-adapt-webpack-config.patch 
b/patches/0001-adapt-webpack-config.patch
index 4698e74..b0201e1 100644
--- a/patches/0001-adapt-webpack-config.patch
+++ b/patches/0001-adapt-webpack-config.patch
@@ -3,21 +3,20 @@ From: Dominik Csapak 
 Date: Thu, 2 Apr 2020 07:10:18 +
 Subject: [PATCH] adapt webpack config
 
-changes to 'var' from 'umd' since we want to use it in the same file
 adds 'cli-engine' to build (we use it in our wrapper)
 and target 'node' since we will use it on the cli
 
 Signed-off-by: Dominik Csapak 
 ---
- webpack.config.js | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
+ webpack.config.js | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
 
 diff --git a/webpack.config.js b/webpack.config.js
-index 29d60cb4..95027075 100644
+index a22c99b..9209159 100644
 --- a/webpack.config.js
 +++ b/webpack.config.js
-@@ -2,14 +2,14 @@
- 
+@@ -4,8 +4,9 @@ const NodePolyfillPlugin = 
require("node-polyfill-webpack-plugin");
+ /** @type {import("webpack").Configuration} */
  module.exports = {
  mode: "none",
 +target: "node",
@@ -27,13 +26,5 @@ index 29d60cb4..95027075 100644
  },
  output: {
  filename: "[name].js",
- library: "[name]",
--libraryTarget: "umd",
--globalObject: "this"
-+libraryTarget: "var"
- },
- module: {
- rules: [
 -- 
 2.20.1
-
diff --git a/src/Makefile b/src/Makefile
deleted file mode 100644
index bef1c57..000
diff --git a/src/app.js b/src/bin/app.js
similarity index 99%
rename from src/app.js
rename to src/bin/app.js
index 9226234..8a28923 100644
--- a/src/app.js
+++ b/src/bin/app.js
@@ -1,9 +1,12 @@
-(function() {
+#!/usr/bin/env node
+
+(async function(){
 'use strict';
 
 const path = require('path');
 const color = require('colors');
 const program = require('commander');
+const eslint = require('pve-eslint');
 
 program
 .usage('[options] []')
diff --git a/src/index.js b/src/index.js
new file mode 100644
index 000..01b9a1d
--- /dev/null
+++ b/src/index.js
@@

[pve-devel] [PATCH v2 0/3] make linting threaded

2021-07-19 Thread Dominik Csapak
NOTE: this series will not build until a 'make buildupstream' is
executed (and 'src/lib/eslint.js' should be committed as well). I
did not send it because it would be too big.

this series convert the package into a proper nodejs module
'pve-eslint', and adds threading to the linting binary

changes from v1:
* convert to nodejs module
* split worker code to own file
* drop hacky self-loading script, and use the 'pve-eslint' module instead

Dominik Csapak (3):
  ship proper nodejs module 'pve-eslint'
  remove unnecessary eslint.js
  use worker_threads for linting

 Makefile|  2 +-
 debian/control  |  7 +-
 debian/dirs |  1 +
 debian/links|  1 +
 debian/rules|  5 +-
 patches/0001-adapt-webpack-config.patch | 19 +-
 src/Makefile| 15 -
 src/{ => bin}/app.js| 40 +-
 src/eslint.js   | 138871 -
 src/index.js|  5 +
 src/lib/worker.js   | 27 +
 src/package.json| 10 +
 12 files changed, 92 insertions(+), 138911 deletions(-)
 create mode 100644 debian/dirs
 create mode 100644 debian/links
 delete mode 100644 src/Makefile
 rename src/{ => bin}/app.js (92%)
 delete mode 100755 src/eslint.js
 create mode 100644 src/index.js
 create mode 100644 src/lib/worker.js
 create mode 100644 src/package.json

-- 
2.30.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH pve-docs] fix typo: operation system -> operating system

2021-07-19 Thread Dylan Whyte
Signed-off-by: Dylan Whyte 
---
 local-lvm.adoc | 2 +-
 qm.adoc| 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/local-lvm.adoc b/local-lvm.adoc
index 2489b9d..6abdee8 100644
--- a/local-lvm.adoc
+++ b/local-lvm.adoc
@@ -33,7 +33,7 @@ VG:
   swap pve  -wi-ao 896.00m 
 
 
-root:: Formatted as `ext4`, and contains the operation system.
+root:: Formatted as `ext4`, and contains the operating system.
 
 swap:: Swap partition
 
diff --git a/qm.adoc b/qm.adoc
index ba303fd..c291cb0 100644
--- a/qm.adoc
+++ b/qm.adoc
@@ -1108,7 +1108,7 @@ Copies and Clones
 [thumbnail="screenshot/gui-qemu-full-clone.png"]
 
 VM installation is usually done using an installation media (CD-ROM)
-from the operation system vendor. Depending on the OS, this can be a
+from the operating system vendor. Depending on the OS, this can be a
 time consuming task one might want to avoid.
 
 An easy way to deploy many VMs of the same type is to copy an existing
-- 
2.30.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH manager] vzdump: allow backups on btrfs

2021-07-19 Thread Oguz Bektas
see forum post [0]

[0]:
https://forum.proxmox.com/threads/cant-use-storage-type-btrfs-for-backup-500-help.92918/

Signed-off-by: Oguz Bektas 
---
 PVE/VZDump.pm | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/PVE/VZDump.pm b/PVE/VZDump.pm
index 46cb9e6d..39b5e46d 100644
--- a/PVE/VZDump.pm
+++ b/PVE/VZDump.pm
@@ -102,7 +102,8 @@ sub storage_info {
 
 die "can't use storage type '$type' for backup\n"
if (!($type eq 'dir' || $type eq 'nfs' || $type eq 'glusterfs'
- || $type eq 'cifs' || $type eq 'cephfs' || $type eq 'pbs'));
+ || $type eq 'cifs' || $type eq 'cephfs' || $type eq 'pbs'
+ || $type eq 'btrfs'));
 die "can't use storage '$storage' for backups - wrong content type\n"
if (!$scfg->{content}->{backup});
 
-- 
2.30.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [PATCH manager] vzdump: allow backups on btrfs

2021-07-19 Thread Thomas Lamprecht
On 19.07.21 13:58, Oguz Bektas wrote:
> see forum post [0]
> 
> [0]:
> https://forum.proxmox.com/threads/cant-use-storage-type-btrfs-for-backup-500-help.92918/
> 
> Signed-off-by: Oguz Bektas 
> ---
>  PVE/VZDump.pm | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/PVE/VZDump.pm b/PVE/VZDump.pm
> index 46cb9e6d..39b5e46d 100644
> --- a/PVE/VZDump.pm
> +++ b/PVE/VZDump.pm
> @@ -102,7 +102,8 @@ sub storage_info {
>  
>  die "can't use storage type '$type' for backup\n"
>   if (!($type eq 'dir' || $type eq 'nfs' || $type eq 'glusterfs'
> -   || $type eq 'cifs' || $type eq 'cephfs' || $type eq 'pbs'));
> +   || $type eq 'cifs' || $type eq 'cephfs' || $type eq 'pbs'
> +   || $type eq 'btrfs'));

Rather, that whole check above needs to be removed, we can derive that from
$scfg->{content}->{backup} below already, that can only get set when on a
storage that declares to support backups, and actually works for external
ttorage plugins too.

>  die "can't use storage '$storage' for backups - wrong content type\n"
>   if (!$scfg->{content}->{backup});
>  
> 



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v2 manager] vzdump: allow backups from plugins

2021-07-19 Thread Oguz Bektas
remove type check for the specific plugins, instead we can deduce it
from the supported content type in config (this can only be set on
storages that declare to support backups). should also work with
external storage plugins.

Signed-off-by: Oguz Bektas 
---
v1->v2:
* remove type check instead of adding btrfs to it

 PVE/VZDump.pm | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/PVE/VZDump.pm b/PVE/VZDump.pm
index 46cb9e6d..b671ab19 100644
--- a/PVE/VZDump.pm
+++ b/PVE/VZDump.pm
@@ -100,9 +100,6 @@ sub storage_info {
 my $scfg = PVE::Storage::storage_config($cfg, $storage);
 my $type = $scfg->{type};
 
-die "can't use storage type '$type' for backup\n"
-   if (!($type eq 'dir' || $type eq 'nfs' || $type eq 'glusterfs'
- || $type eq 'cifs' || $type eq 'cephfs' || $type eq 'pbs'));
 die "can't use storage '$storage' for backups - wrong content type\n"
if (!$scfg->{content}->{backup});
 
-- 
2.30.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v1 storage 1/9] storage: expose find_free_diskname

2021-07-19 Thread Aaron Lauterer
We do not expose the parameter 'add_fmt_suffix' used by the internal
implemantion of 'find_free_diskname'. This is something only the plugins
themselves know but cannot be determined easily and reliably from an
outside caller.

This is why the new 'wants_fmt_suffix' method has been introduced. For
most plugins the return value is very clear. For the default
implementation in Plugin.pm we add another check to be on the safe side
and only return true if the '$scfg->{path}' option is present.
It indicates that the volume in question is an actual file which will
need the suffix.

Signed-off-by: Aaron Lauterer 
---
rfc -> v1:
dropped $add_fmt_suffix parameter and added the "wants_fmt_suffix"
helper method in each plugin.

 PVE/Storage.pm   | 11 +++
 PVE/Storage/LVMPlugin.pm |  5 +
 PVE/Storage/Plugin.pm|  7 +++
 PVE/Storage/RBDPlugin.pm |  5 +
 PVE/Storage/ZFSPoolPlugin.pm |  5 +
 5 files changed, 33 insertions(+)

diff --git a/PVE/Storage.pm b/PVE/Storage.pm
index c04b5a2..afeb2e3 100755
--- a/PVE/Storage.pm
+++ b/PVE/Storage.pm
@@ -203,6 +203,17 @@ sub storage_can_replicate {
 return $plugin->storage_can_replicate($scfg, $storeid, $format);
 }
 
+sub find_free_diskname {
+my ($cfg, $storeid, $vmid, $fmt) = @_;
+
+my $scfg = storage_config($cfg, $storeid);
+my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
+
+my $add_fmt_suffix = $plugin->wants_fmt_suffix($scfg);
+
+return $plugin->find_free_diskname($storeid, $scfg, $vmid, $fmt, 
$add_fmt_suffix);
+}
+
 sub storage_ids {
 my ($cfg) = @_;
 
diff --git a/PVE/Storage/LVMPlugin.pm b/PVE/Storage/LVMPlugin.pm
index 139d391..3e5b6c8 100644
--- a/PVE/Storage/LVMPlugin.pm
+++ b/PVE/Storage/LVMPlugin.pm
@@ -201,6 +201,11 @@ sub type {
 return 'lvm';
 }
 
+sub wants_fmt_suffix {
+my ($class, $scfg) = @_;
+return 0;
+}
+
 sub plugindata {
 return {
content => [ {images => 1, rootdir => 1}, { images => 1 }],
diff --git a/PVE/Storage/Plugin.pm b/PVE/Storage/Plugin.pm
index b1865cb..5c6c659 100644
--- a/PVE/Storage/Plugin.pm
+++ b/PVE/Storage/Plugin.pm
@@ -191,6 +191,13 @@ sub default_format {
 return wantarray ? ($def_format, $valid_formats) : $def_format;
 }
 
+sub wants_fmt_suffix {
+my ($class, $scfg) = @_;
+return 1 if $scfg->{path};
+return 0;
+}
+
+
 PVE::JSONSchema::register_format('pve-storage-path', \&verify_path);
 sub verify_path {
 my ($path, $noerr) = @_;
diff --git a/PVE/Storage/RBDPlugin.pm b/PVE/Storage/RBDPlugin.pm
index a8d1243..86ea45a 100644
--- a/PVE/Storage/RBDPlugin.pm
+++ b/PVE/Storage/RBDPlugin.pm
@@ -273,6 +273,11 @@ sub type {
 return 'rbd';
 }
 
+sub wants_fmt_suffix {
+my ($class, $scfg) = @_;
+return 0;
+}
+
 sub plugindata {
 return {
content => [ {images => 1, rootdir => 1}, { images => 1 }],
diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index c4be70f..85e2211 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -18,6 +18,11 @@ sub type {
 return 'zfspool';
 }
 
+sub wants_fmt_suffix {
+my ($class, $scfg) = @_;
+return 0;
+}
+
 sub plugindata {
 return {
content => [ {images => 1, rootdir => 1}, {images => 1 , rootdir => 1}],
-- 
2.30.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v1 container 9/9] api: move-volume: cleanup very long lines

2021-07-19 Thread Aaron Lauterer
Signed-off-by: Aaron Lauterer 
---

 src/PVE/API2/LXC.pm | 33 +++--
 1 file changed, 27 insertions(+), 6 deletions(-)

diff --git a/src/PVE/API2/LXC.pm b/src/PVE/API2/LXC.pm
index 0af22c1..fecd4ca 100644
--- a/src/PVE/API2/LXC.pm
+++ b/src/PVE/API2/LXC.pm
@@ -1820,13 +1820,15 @@ __PACKAGE__->register_method({
}),
delete => {
type => 'boolean',
-   description => "Delete the original volume after successful 
copy. By default the original is kept as an unused volume entry.",
+   description => "Delete the original volume after successful 
copy. By default the " .
+   "original is kept as an unused volume entry.",
optional => 1,
default => 0,
},
digest => {
type => 'string',
-   description => 'Prevent changes if current configuration file 
has different SHA1 digest. This can be used to prevent concurrent 
modifications.',
+   description => 'Prevent changes if current configuration file 
has different SHA1 " .
+   "digest. This can be used to prevent concurrent 
modifications.',
maxLength => 40,
optional => 1,
},
@@ -1909,7 +1911,11 @@ __PACKAGE__->register_method({
 
my $storage_realcmd = sub {
eval {
-   PVE::Cluster::log_msg('info', $authuser, "move volume CT $vmid: 
move --volume $mpkey --storage $storage");
+   PVE::Cluster::log_msg(
+   'info',
+   $authuser,
+   "move volume CT $vmid: move --volume $mpkey --storage 
$storage"
+   );
 
my $conf = PVE::LXC::Config->load_config($vmid);
my $storage_cfg = PVE::Storage::config();
@@ -1920,8 +1926,20 @@ __PACKAGE__->register_method({
PVE::Storage::activate_volumes($storage_cfg, [ $old_volid 
]);
my $bwlimit = extract_param($param, 'bwlimit');
my $source_storage = 
PVE::Storage::parse_volume_id($old_volid);
-   my $movelimit = PVE::Storage::get_bandwidth_limit('move', 
[$source_storage, $storage], $bwlimit);
-   $new_volid = PVE::LXC::copy_volume($mpdata, $vmid, 
$storage, $storage_cfg, $conf, undef, $movelimit);
+   my $movelimit = PVE::Storage::get_bandwidth_limit(
+   'move',
+   [$source_storage, $storage],
+   $bwlimit
+   );
+   $new_volid = PVE::LXC::copy_volume(
+   $mpdata,
+   $vmid,
+   $storage,
+   $storage_cfg,
+   $conf,
+   undef,
+   $movelimit
+   );
if (PVE::LXC::Config->is_template($conf)) {
PVE::Storage::activate_volumes($storage_cfg, [ 
$new_volid ]);
my $template_volid = 
PVE::Storage::vdisk_create_base($storage_cfg, $new_volid);
@@ -1935,7 +1953,10 @@ __PACKAGE__->register_method({
$conf = PVE::LXC::Config->load_config($vmid);
PVE::Tools::assert_if_modified($digest, 
$conf->{digest});
 
-   $conf->{$mpkey} = 
PVE::LXC::Config->print_ct_mountpoint($mpdata, $mpkey eq 'rootfs');
+   $conf->{$mpkey} = PVE::LXC::Config->print_ct_mountpoint(
+   $mpdata,
+   $mpkey eq 'rootfs'
+   );
 
PVE::LXC::Config->add_unused_volume($conf, $old_volid) 
if !$param->{delete};
 
-- 
2.30.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v1 container 7/9] cli: pct: change move_volume to move-volume

2021-07-19 Thread Aaron Lauterer
also add alias to keep move_volume working

Signed-off-by: Aaron Lauterer 
---
 src/PVE/CLI/pct.pm | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/src/PVE/CLI/pct.pm b/src/PVE/CLI/pct.pm
index 8c40bbe..7ac5a55 100755
--- a/src/PVE/CLI/pct.pm
+++ b/src/PVE/CLI/pct.pm
@@ -849,7 +849,8 @@ our $cmddef = {
 
 clone => [ "PVE::API2::LXC", 'clone_vm', ['vmid', 'newid'], { node => 
$nodename }, $upid_exit ],
 migrate => [ "PVE::API2::LXC", 'migrate_vm', ['vmid', 'target'], { node => 
$nodename }, $upid_exit],
-move_volume => [ "PVE::API2::LXC", 'move_volume', ['vmid', 'volume', 
'storage'], { node => $nodename }, $upid_exit ],
+'move-volume' => [ "PVE::API2::LXC", 'move_volume', ['vmid', 'volume', 
'storage'], { node => $nodename }, $upid_exit ],
+move_volume => { alias => 'move-disk' },
 
 snapshot => [ "PVE::API2::LXC::Snapshot", 'snapshot', ['vmid', 
'snapname'], { node => $nodename } , $upid_exit ],
 delsnapshot => [ "PVE::API2::LXC::Snapshot", 'delsnapshot', ['vmid', 
'snapname'], { node => $nodename } , $upid_exit ],
-- 
2.30.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v1 qemu-server 4/9] Drive: add valid_drive_names_with_unused

2021-07-19 Thread Aaron Lauterer
Signed-off-by: Aaron Lauterer 
---
 PVE/QemuServer/Drive.pm | 4 
 1 file changed, 4 insertions(+)

diff --git a/PVE/QemuServer/Drive.pm b/PVE/QemuServer/Drive.pm
index 5110190..09f37c1 100644
--- a/PVE/QemuServer/Drive.pm
+++ b/PVE/QemuServer/Drive.pm
@@ -393,6 +393,10 @@ sub valid_drive_names {
 'efidisk0');
 }
 
+sub valid_drive_names_with_unused {
+return (valid_drive_names(), map {"unused$_"} (0 .. ($MAX_UNUSED_DISKS 
-1)));
+}
+
 sub is_valid_drivename {
 my $dev = shift;
 
-- 
2.30.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v1 qemu-server 3/9] cli: qm: change move_disk to move-disk

2021-07-19 Thread Aaron Lauterer
also add alias to keep move_disk working.

Signed-off-by: Aaron Lauterer 
---
 PVE/CLI/qm.pm | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/PVE/CLI/qm.pm b/PVE/CLI/qm.pm
index 8307dc1..ef99b6d 100755
--- a/PVE/CLI/qm.pm
+++ b/PVE/CLI/qm.pm
@@ -910,7 +910,8 @@ our $cmddef = {
 
 resize => [ "PVE::API2::Qemu", 'resize_vm', ['vmid', 'disk', 'size'], { 
node => $nodename } ],
 
-move_disk => [ "PVE::API2::Qemu", 'move_vm_disk', ['vmid', 'disk', 
'storage'], { node => $nodename }, $upid_exit ],
+'move-disk' => [ "PVE::API2::Qemu", 'move_vm_disk', ['vmid', 'disk', 
'storage'], { node => $nodename }, $upid_exit ],
+move_disk => { alias => 'move-disk' },
 
 unlink => [ "PVE::API2::Qemu", 'unlink', ['vmid'], { node => $nodename } ],
 
-- 
2.30.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v1 qemu-server 6/9] api: move-disk: cleanup very long lines

2021-07-19 Thread Aaron Lauterer
Signed-off-by: Aaron Lauterer 
---
 PVE/API2/Qemu.pm | 25 -
 1 file changed, 20 insertions(+), 5 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index ed1179b..0529c1b 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -3301,13 +3301,15 @@ __PACKAGE__->register_method({
 },
delete => {
type => 'boolean',
-   description => "Delete the original disk after successful copy. 
By default the original disk is kept as unused disk.",
+   description => "Delete the original disk after successful copy. 
By default the " .
+   "original disk is kept as unused disk.",
optional => 1,
default => 0,
},
digest => {
type => 'string',
-   description => 'Prevent changes if current configuration file 
has different SHA1 digest. This can be used to prevent concurrent 
modifications.',
+   description => 'Prevent changes if current configuration file 
has different SHA1 " .
+   "digest. This can be used to prevent concurrent 
modifications.',
maxLength => 40,
optional => 1,
},
@@ -3386,11 +3388,20 @@ __PACKAGE__->register_method({
 (!$format || !$oldfmt || $oldfmt eq $format);
 
# this only checks snapshots because $disk is passed!
-   my $snapshotted = 
PVE::QemuServer::Drive::is_volume_in_use($storecfg, $conf, $disk, $old_volid);
+   my $snapshotted = PVE::QemuServer::Drive::is_volume_in_use(
+   $storecfg,
+   $conf,
+   $disk,
+   $old_volid
+   );
die "you can't move a disk with snapshots and delete the source\n"
if $snapshotted && $param->{delete};
 
-   PVE::Cluster::log_msg('info', $authuser, "move disk VM $vmid: move 
--disk $disk --storage $storeid");
+   PVE::Cluster::log_msg(
+   'info',
+   $authuser,
+   "move disk VM $vmid: move --disk $disk --storage $storeid"
+   );
 
my $running = PVE::QemuServer::check_running($vmid);
 
@@ -3409,7 +3420,11 @@ __PACKAGE__->register_method({
if $snapshotted;
 
my $bwlimit = extract_param($param, 'bwlimit');
-   my $movelimit = PVE::Storage::get_bandwidth_limit('move', 
[$oldstoreid, $storeid], $bwlimit);
+   my $movelimit = PVE::Storage::get_bandwidth_limit(
+   'move',
+   [$oldstoreid, $storeid],
+   $bwlimit
+   );
 
my $newdrive = PVE::QemuServer::clone_disk(
$storecfg,
-- 
2.30.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v1 storage qemu-server container 0/9] move disk or volume to other guests

2021-07-19 Thread Aaron Lauterer
This is the continuation of 'disk-reassign' but instead of a separate
API endpoint we now follow the approach to make it part of the
'move-disk' and 'move-volume' endpoints for VMs and containers.

The main idea is to make it easy to move a disk/volume to another guest.
Currently this is a manual and error prone process that requires
knowledge of how PVE handles disks/volumes and the mapping which guest
they belong to.

With this, the 'qm move-disk' and 'pct move-volume' are changed in the
way that the storage parameter is optional as well as the new
target-vmid and target-{disk,mp}. This will keep old calls to move the
disk/volume to another storage working. To move to another guest, the
storage needs to be omitted.

Major changes since the last iteration as dedicated API endpoint [0] are
that the storage layer only implements the renaming itself. The layer
above (qemu-server and pve-container) define the name of the new
volume/disk.  Therefore it was necessary to expose the
'find_free_diskname' function.  The rename function on the storage layer
handles possible template referneces and the creation of the new volid
as that is highly dependent on the actual storage.

The following storage types are implemented at the moment:
* dir based ones
* ZFS
* (thin) LVM
* Ceph RBD


Most parts of the disk-reassign code has been taken and moved into the
'move_disk' and 'move_volume' endpoints with conditional checking if the
reassign code or the move to other storage code is meant to run
depending on the given parameters.

Changes since the RFC [1]:
* added check if target guest is replicated and fail if storage does not
  support replication
* only pass minimum of needed parameters to the storage layer and infer
  other needed information from that
* lock storage and check if the volume aready exists (handling a
  possible race condition between calling find_free_disk and the actual
  renaming)
* use a helper method to determine if the plugin needs the fmt suffix
  in the volume name
* getting format of the source and pass it to find_free_disk
* style fixes (long lines, multiline post-if, ...)

[0] https://lists.proxmox.com/pipermail/pve-devel/2021-April/047481.html
[1] https://lists.proxmox.com/pipermail/pve-devel/2021-June/048400.html

storage: Aaron Lauterer (2):
  storage: expose find_free_diskname
  add disk rename feature

 PVE/Storage.pm   | 31 +++--
 PVE/Storage/LVMPlugin.pm | 32 ++
 PVE/Storage/LvmThinPlugin.pm |  1 +
 PVE/Storage/Plugin.pm| 65 
 PVE/Storage/RBDPlugin.pm | 34 +++
 PVE/Storage/ZFSPoolPlugin.pm | 29 
 6 files changed, 190 insertions(+), 2 deletions(-)


qemu-server: Aaron Lauterer (4):
  cli: qm: change move_disk to move-disk
  Drive: add valid_drive_names_with_unused
  api: move-disk: add move to other VM
  api: move-disk: cleanup very long lines

 PVE/API2/Qemu.pm| 254 ++--
 PVE/CLI/qm.pm   |   3 +-
 PVE/QemuServer/Drive.pm |   4 +
 3 files changed, 250 insertions(+), 11 deletions(-)


container: Aaron Lauterer (3):
  cli: pct: change move_volume to move-volume
  api: move-volume: add move to another container
  api: move-volume: cleanup very long lines

 src/PVE/API2/LXC.pm | 303 
 src/PVE/CLI/pct.pm  |   3 +-
 2 files changed, 278 insertions(+), 28 deletions(-)


-- 
2.30.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v1 storage 2/9] add disk rename feature

2021-07-19 Thread Aaron Lauterer
Functionality has been added for the following storage types:

* directory ones, based on the default implementation:
* directory
* NFS
* CIFS
* gluster
* ZFS
* (thin) LVM
* Ceph

A new feature `rename` has been introduced to mark which storage
plugin supports the feature.

Version API and AGE have been bumped.

The storage gets locked and each plugin checks if the target volume
already exists prior renaming.
This is done because there could be a race condition from the time the
external caller requests a new free disk name to the time the volume is
actually renamed.

Signed-off-by: Aaron Lauterer 
---
rfc -> v1:
* reduced number of parameters to minimum needed, plugins infer needed
  information themselves
* added storage locking and checking if volume already exists
* parse target_volname prior to renaming to check if valid

old dedicated API endpoint -> rfc:
only do rename now but the rename function handles templates and returns
the new volid as this can be differently handled on some storages.

 PVE/Storage.pm   | 20 +++--
 PVE/Storage/LVMPlugin.pm | 27 +
 PVE/Storage/LvmThinPlugin.pm |  1 +
 PVE/Storage/Plugin.pm| 58 
 PVE/Storage/RBDPlugin.pm | 29 ++
 PVE/Storage/ZFSPoolPlugin.pm | 24 +++
 6 files changed, 157 insertions(+), 2 deletions(-)

diff --git a/PVE/Storage.pm b/PVE/Storage.pm
index afeb2e3..f6d86e1 100755
--- a/PVE/Storage.pm
+++ b/PVE/Storage.pm
@@ -41,11 +41,11 @@ use PVE::Storage::PBSPlugin;
 use PVE::Storage::BTRFSPlugin;
 
 # Storage API version. Increment it on changes in storage API interface.
-use constant APIVER => 9;
+use constant APIVER => 10;
 # Age is the number of versions we're backward compatible with.
 # This is like having 'current=APIVER' and age='APIAGE' in libtool,
 # see 
https://www.gnu.org/software/libtool/manual/html_node/Libtool-versioning.html
-use constant APIAGE => 0;
+use constant APIAGE => 1;
 
 # load standard plugins
 PVE::Storage::DirPlugin->register();
@@ -360,6 +360,7 @@ sub volume_snapshot_needs_fsfreeze {
 #snapshot - taking a snapshot is possible
 #sparseinit - volume is sparsely initialized
 #template - conversion to base image is possible
+#rename - renaming volumes is possible
 # $snap - check if the feature is supported for a given snapshot
 # $running - if the guest owning the volume is running
 # $opts - hash with further options:
@@ -1868,6 +1869,21 @@ sub complete_volume {
 return $res;
 }
 
+sub rename_volume {
+my ($cfg, $source_volid, $target_volname) = @_;
+
+my ($storeid) = parse_volume_id($source_volid);
+
+activate_storage($cfg, $storeid);
+
+my $scfg = storage_config($cfg, $storeid);
+my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
+
+return $plugin->cluster_lock_storage($storeid, $scfg->{shared}, undef, sub 
{
+   return $plugin->rename_volume($scfg, $storeid, $source_volid, 
$target_volname);
+});
+}
+
 # Various io-heavy operations require io/bandwidth limits which can be
 # configured on multiple levels: The global defaults in datacenter.cfg, and
 # per-storage overrides. When we want to do a restore from storage A to storage
diff --git a/PVE/Storage/LVMPlugin.pm b/PVE/Storage/LVMPlugin.pm
index 3e5b6c8..7a13a96 100644
--- a/PVE/Storage/LVMPlugin.pm
+++ b/PVE/Storage/LVMPlugin.pm
@@ -344,6 +344,16 @@ sub lvcreate {
 run_command($cmd, errmsg => "lvcreate '$vg/$name' error");
 }
 
+sub lvrename {
+my ($vg, $oldname, $newname) = @_;
+
+my $cmd = ['/sbin/lvrename', $vg, $oldname, $newname];
+run_command(
+   ['/sbin/lvrename', $vg, $oldname, $newname],
+   errmsg => "lvrename '${vg}/${oldname}' to '${newname}' error"
+);
+}
+
 sub alloc_image {
 my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
 
@@ -589,6 +599,7 @@ sub volume_has_feature {
 
 my $features = {
copy => { base => 1, current => 1},
+   rename => {current => 1},
 };
 
 my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
@@ -697,4 +708,20 @@ sub volume_import_write {
input => '<&'.fileno($input_fh));
 }
 
+sub rename_volume {
+my ($class, $scfg, $storeid, $source_volid, $target_volname) = @_;
+
+$class->parse_volname($target_volname);
+
+my (undef, $source_volname) =  
PVE::Storage::Plugin::parse_volume_id($source_volid);
+
+my $vg = $scfg->{vgname};
+my $lvs = lvm_list_volumes($vg);
+die "target volume '${target_volname}' already exists\n"
+   if ($lvs->{$vg}->{$target_volname});
+
+lvrename($scfg->{vgname}, $source_volname, $target_volname);
+return "${storeid}:${target_volname}";
+}
+
 1;
diff --git a/PVE/Storage/LvmThinPlugin.pm b/PVE/Storage/LvmThinPlugin.pm
index 4ba6f90..c24af22 100644
--- a/PVE/Storage/LvmThinPlugin.pm
+++ b/PVE/Storage/LvmThinPlugin.pm
@@ -355,6 +355,7 @@ sub volume_has_feature {
template => { current =

[pve-devel] [PATCH v1 qemu-server 5/9] api: move-disk: add move to other VM

2021-07-19 Thread Aaron Lauterer
The goal of this is to expand the move-disk API endpoint to make it
possible to move a disk to another VM. Previously this was only possible
with manual intervertion either by renaming the VM disk or by manually
adding the disks volid to the config of the other VM.

Signed-off-by: Aaron Lauterer 
---
rfc -> v1:
* add check if target guest is replicated and fail if the moved volume
  does not support it
* check if source volume has a format suffix and pass it to
  'find_free_disk'
* fixed some style nits

old dedicated api endpoint -> rfc:
There are some big changes here. The old [0] dedicated API endpoint is
gone and most of its code is now part of move_disk. Error messages have
been changed accordingly and sometimes enahnced by adding disk keys and
VMIDs where appropriate.

Since a move to other guests should be possible for unused disks, we
need to check before doing a move to storage to make sure to not
handle unused disks.

[0] https://lists.proxmox.com/pipermail/pve-devel/2021-April/047738.html

 PVE/API2/Qemu.pm | 229 +--
 PVE/CLI/qm.pm|   2 +-
 2 files changed, 225 insertions(+), 6 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index f2557e3..ed1179b 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -35,6 +35,7 @@ use PVE::API2::Qemu::Agent;
 use PVE::VZDump::Plugin;
 use PVE::DataCenterConfig;
 use PVE::SSHInfo;
+use PVE::Replication;
 
 BEGIN {
 if (!$ENV{PVE_GENERATING_DOCS}) {
@@ -3263,9 +3264,11 @@ __PACKAGE__->register_method({
 method => 'POST',
 protected => 1,
 proxyto => 'node',
-description => "Move volume to different storage.",
+description => "Move volume to different storage or to a different VM.",
 permissions => {
-   description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, 
and 'Datastore.AllocateSpace' permissions on the storage.",
+   description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " 
.
+   "and 'Datastore.AllocateSpace' permissions on the storage. To move 
".
+   "a disk to another VM, you need the permissions on the target VM as 
well.",
check => [ 'and',
   ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
   ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' 
]],
@@ -3276,14 +3279,19 @@ __PACKAGE__->register_method({
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => 
\&PVE::QemuServer::complete_vmid }),
+   'target-vmid' => get_standard_option('pve-vmid', {
+   completion => \&PVE::QemuServer::complete_vmid,
+   optional => 1,
+   }),
disk => {
type => 'string',
description => "The disk you want to move.",
-   enum => [PVE::QemuServer::Drive::valid_drive_names()],
+   enum => 
[PVE::QemuServer::Drive::valid_drive_names_with_unused()],
},
 storage => get_standard_option('pve-storage-id', {
description => "Target storage.",
completion => \&PVE::QemuServer::complete_storage,
+   optional => 1,
 }),
 'format' => {
 type => 'string',
@@ -3310,6 +3318,20 @@ __PACKAGE__->register_method({
minimum => '0',
default => 'move limit from datacenter or storage config',
},
+   'target-disk' => {
+   type => 'string',
+   description => "The config key the disk will be moved to on the 
target VM " .
+   "(for example, ide0 or scsi1).",
+   enum => 
[PVE::QemuServer::Drive::valid_drive_names_with_unused()],
+   optional => 1,
+   },
+   'target-digest' => {
+   type => 'string',
+   description => 'Prevent changes if current configuration file 
of the target VM has " .
+   "a different SHA1 digest. This can be used to prevent 
concurrent modifications.',
+   maxLength => 40,
+   optional => 1,
+   },
},
 },
 returns => {
@@ -3324,14 +3346,22 @@ __PACKAGE__->register_method({
 
my $node = extract_param($param, 'node');
my $vmid = extract_param($param, 'vmid');
+   my $target_vmid = extract_param($param, 'target-vmid');
my $digest = extract_param($param, 'digest');
+   my $target_digest = extract_param($param, 'target-digest');
my $disk = extract_param($param, 'disk');
+   my $target_disk = extract_param($param, 'target-disk');
my $storeid = extract_param($param, 'storage');
my $format = extract_param($param, 'format');
 
+   die "either set storage or target-vmid, but not both\n"
+   if $storeid && $target_vmid;
+
+
my $storecfg = PVE::Storage::config();
+   my $source_volid;
 
-   my $up

[pve-devel] [PATCH v1 container 8/9] api: move-volume: add move to another container

2021-07-19 Thread Aaron Lauterer
The goal of this is to expand the move-volume API endpoint to make it
possible to move a container volume / mountpoint to another container.

Currently it works for regular mountpoints though it would be nice to be
able to do it for unused mounpoints as well.

Signed-off-by: Aaron Lauterer 
---
This is mostly the code from qemu-server with some adaptions. Mainly
error messages and some checks.

Previous checks have been moved to '$move_to_storage_checks'.

rfc -> v1:
* add check if target guest is replicated and fail if the moved volume
  does not support it
* check if source volume has a format suffix and pass it to
  'find_free_disk' or if the prefix is vm/subvol as those also have
  their own meaning, see the comment in the code
* fixed some style nits

 src/PVE/API2/LXC.pm | 270 
 src/PVE/CLI/pct.pm  |   2 +-
 2 files changed, 250 insertions(+), 22 deletions(-)

diff --git a/src/PVE/API2/LXC.pm b/src/PVE/API2/LXC.pm
index b929481..0af22c1 100644
--- a/src/PVE/API2/LXC.pm
+++ b/src/PVE/API2/LXC.pm
@@ -27,6 +27,8 @@ use PVE::API2::LXC::Snapshot;
 use PVE::JSONSchema qw(get_standard_option);
 use base qw(PVE::RESTHandler);
 
+use Data::Dumper;
+
 BEGIN {
 if (!$ENV{PVE_GENERATING_DOCS}) {
require PVE::HA::Env::PVE2;
@@ -1784,10 +1786,12 @@ __PACKAGE__->register_method({
 method => 'POST',
 protected => 1,
 proxyto => 'node',
-description => "Move a rootfs-/mp-volume to a different storage",
+description => "Move a rootfs-/mp-volume to a different storage or to a 
different container.",
 permissions => {
description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " 
.
-   "and 'Datastore.AllocateSpace' permissions on the storage.",
+   "and 'Datastore.AllocateSpace' permissions on the storage. To move 
".
+   "a volume to another container, you need the permissions on the ".
+   "target container as well.",
check =>
[ 'and',
  ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
@@ -1799,14 +1803,20 @@ __PACKAGE__->register_method({
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => 
\&PVE::LXC::complete_ctid }),
+   'target-vmid' => get_standard_option('pve-vmid', {
+   completion => \&PVE::LXC::complete_ctid,
+   optional => 1,
+   }),
volume => {
type => 'string',
+   #TODO: check how to handle unused mount points as the mp 
parameter is not configured
enum => [ PVE::LXC::Config->valid_volume_keys() ],
description => "Volume which will be moved.",
},
storage => get_standard_option('pve-storage-id', {
description => "Target Storage.",
completion => \&PVE::Storage::complete_storage_enabled,
+   optional => 1,
}),
delete => {
type => 'boolean',
@@ -1827,6 +1837,20 @@ __PACKAGE__->register_method({
minimum => '0',
default => 'clone limit from datacenter or storage config',
},
+   'target-mp' => {
+   type => 'string',
+   description => "The config key the mp will be moved to.",
+   enum => [PVE::LXC::Config->valid_volume_keys()],
+   optional => 1,
+   },
+   'target-digest' => {
+   type => 'string',
+   description => 'Prevent changes if current configuration file 
of the target " .
+   "container has a different SHA1 digest. This can be used to 
prevent " .
+   "concurrent modifications.',
+   maxLength => 40,
+   optional => 1,
+   },
},
 },
 returns => {
@@ -1841,32 +1865,49 @@ __PACKAGE__->register_method({
 
my $vmid = extract_param($param, 'vmid');
 
+   my $target_vmid = extract_param($param, 'target-vmid');
+
my $storage = extract_param($param, 'storage');
 
my $mpkey = extract_param($param, 'volume');
 
+   my $target_mp = extract_param($param, 'target-mp');
+
+   my $digest = extract_param($param, 'digest');
+
+   my $target_digest = extract_param($param, 'target-digest');
+
my $lockname = 'disk';
 
my ($mpdata, $old_volid);
 
-   PVE::LXC::Config->lock_config($vmid, sub {
-   my $conf = PVE::LXC::Config->load_config($vmid);
-   PVE::LXC::Config->check_lock($conf);
+   die "either set storage or target-vmid, but not both\n"
+   if $storage && $target_vmid;
 
-   die "cannot move volumes of a running container\n" if 
PVE::LXC::check_running($vmid);
+   die "cannot move volumes of a running container\n" if 
PVE::LXC::check_running($vmid);
 
-   $mpdata = PVE::LXC::Config->parse_volume($mpkey, $conf->{$mpkey});
-  

[pve-devel] applied: [PATCH pve-docs] fix typo: operation system -> operating system

2021-07-19 Thread Thomas Lamprecht
On 19.07.21 13:41, Dylan Whyte wrote:
> Signed-off-by: Dylan Whyte 
> ---
>  local-lvm.adoc | 2 +-
>  qm.adoc| 2 +-
>  2 files changed, 2 insertions(+), 2 deletions(-)
> 
>

applied, thanks!


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] applied: [PATCH proxmox-widget-toolkit] apt: match "Debian Backports" origin

2021-07-19 Thread Thomas Lamprecht
On 15.07.21 10:08, Fabian Grünbichler wrote:
> some users might have that enabled, and we know it is from Debian and
> not '?'
> 
> Signed-off-by: Fabian Grünbichler 
> ---
>  src/node/APTRepositories.js | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
>

applied, thanks!


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH widget-toolkit] api-viewer: drop extra slash in api path

2021-07-19 Thread Thomas Lamprecht
On 15.07.21 13:10, Dominik Csapak wrote:
> 'endpoint' already begins with a slash, so drop it after /api2/json
> 
> Signed-off-by: Dominik Csapak 
> ---
>  src/api-viewer/APIViewer.js | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
>

applied, thanks!


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel