[pve-devel] [PATCH manager v2 2/2] ui: hide bulk migrate options on standalone nodes

2023-11-13 Thread Dominik Csapak
since there is nowhere to migrate to and we hide the regular migrate
buttons/options too.

Signed-off-by: Dominik Csapak 
---
changes from v1:
* use new 'isStandaloneNode()' from Utils

 www/manager6/node/CmdMenu.js | 4 
 www/manager6/node/Config.js  | 1 +
 2 files changed, 5 insertions(+)

diff --git a/www/manager6/node/CmdMenu.js b/www/manager6/node/CmdMenu.js
index dc56ef08..1dbcd781 100644
--- a/www/manager6/node/CmdMenu.js
+++ b/www/manager6/node/CmdMenu.js
@@ -139,5 +139,9 @@ Ext.define('PVE.node.CmdMenu', {
if (me.pveSelNode.data.running) {
me.getComponent('wakeonlan').setDisabled(true);
}
+
+   if (PVE.Utils.isStandaloneNode()) {
+   me.getComponent('bulkmigrate').setVisible(false);
+   }
 },
 });
diff --git a/www/manager6/node/Config.js b/www/manager6/node/Config.js
index 6ed2172a..6deafcdf 100644
--- a/www/manager6/node/Config.js
+++ b/www/manager6/node/Config.js
@@ -69,6 +69,7 @@ Ext.define('PVE.node.Config', {
text: gettext('Bulk Migrate'),
iconCls: 'fa fa-fw fa-send-o',
disabled: !caps.vms['VM.Migrate'],
+   hidden: PVE.Utils.isStandaloneNode(),
handler: function() {
Ext.create('PVE.window.BulkAction', {
autoShow: true,
-- 
2.30.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH manager 1/2] ui: factor out standalone node check

2023-11-13 Thread Dominik Csapak
into Utils and use it where we manually checked that

Signed-off-by: Dominik Csapak 
---
I put it into utils since i did not find a better place. Could have put it
in the ResourceStore, but coupling those things seemed wrong to me.

 www/manager6/Utils.js | 4 
 www/manager6/form/ComboBoxSetStoreNode.js | 2 +-
 www/manager6/grid/Replication.js  | 4 ++--
 www/manager6/lxc/CmdMenu.js   | 2 +-
 www/manager6/lxc/Config.js| 2 +-
 www/manager6/menu/TemplateMenu.js | 2 +-
 www/manager6/qemu/CmdMenu.js  | 2 +-
 www/manager6/qemu/Config.js   | 2 +-
 www/manager6/storage/LVMEdit.js   | 2 +-
 9 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/www/manager6/Utils.js b/www/manager6/Utils.js
index be30393e..9b77ebd3 100644
--- a/www/manager6/Utils.js
+++ b/www/manager6/Utils.js
@@ -1921,6 +1921,10 @@ Ext.define('PVE.Utils', {
'ok': 2,
'__default__': 3,
 },
+
+isStandaloneNode: function() {
+   return PVE.data.ResourceStore.getNodes().length < 2;
+},
 },
 
 singleton: true,
diff --git a/www/manager6/form/ComboBoxSetStoreNode.js 
b/www/manager6/form/ComboBoxSetStoreNode.js
index d5695bad..26b1f95b 100644
--- a/www/manager6/form/ComboBoxSetStoreNode.js
+++ b/www/manager6/form/ComboBoxSetStoreNode.js
@@ -56,7 +56,7 @@ Ext.define('PVE.form.ComboBoxSetStoreNode', {
 initComponent: function() {
let me = this;
 
-   if (me.showNodeSelector && PVE.data.ResourceStore.getNodes().length > 
1) {
+   if (me.showNodeSelector && !PVE.Utils.isStandaloneNode()) {
me.errorHeight = 140;
Ext.apply(me.listConfig ?? {}, {
tbar: {
diff --git a/www/manager6/grid/Replication.js b/www/manager6/grid/Replication.js
index 1e4e00fc..79824b9b 100644
--- a/www/manager6/grid/Replication.js
+++ b/www/manager6/grid/Replication.js
@@ -220,7 +220,7 @@ Ext.define('PVE.grid.ReplicaView', {
// currently replication is for cluster only, so disable the whole 
component for non-cluster
checkPrerequisites: function() {
let view = this.getView();
-   if (PVE.data.ResourceStore.getNodes().length < 2) {
+   if (PVE.Utils.isStandaloneNode()) {
view.mask(gettext("Replication needs at least two nodes"), 
['pve-static-mask']);
}
},
@@ -450,7 +450,7 @@ Ext.define('PVE.grid.ReplicaView', {
 
// if we set the warning mask, we do not want to load
// or set the mask on store errors
-   if (PVE.data.ResourceStore.getNodes().length < 2) {
+   if (PVE.Utils.isStandaloneNode()) {
return;
}
 
diff --git a/www/manager6/lxc/CmdMenu.js b/www/manager6/lxc/CmdMenu.js
index 56f36b5e..b1403fc6 100644
--- a/www/manager6/lxc/CmdMenu.js
+++ b/www/manager6/lxc/CmdMenu.js
@@ -31,7 +31,7 @@ Ext.define('PVE.lxc.CmdMenu', {
};
 
let caps = Ext.state.Manager.get('GuiCap');
-   let standalone = PVE.data.ResourceStore.getNodes().length < 2;
+   let standalone = PVE.Utils.isStandaloneNode();
 
let running = false, stopped = true, suspended = false;
switch (info.status) {
diff --git a/www/manager6/lxc/Config.js b/www/manager6/lxc/Config.js
index 85d32e3c..4516ee8f 100644
--- a/www/manager6/lxc/Config.js
+++ b/www/manager6/lxc/Config.js
@@ -92,7 +92,7 @@ Ext.define('PVE.lxc.Config', {
var migrateBtn = Ext.create('Ext.Button', {
text: gettext('Migrate'),
disabled: !caps.vms['VM.Migrate'],
-   hidden: PVE.data.ResourceStore.getNodes().length < 2,
+   hidden: PVE.Utils.isStandaloneNode(),
handler: function() {
var win = Ext.create('PVE.window.Migrate', {
vmtype: 'lxc',
diff --git a/www/manager6/menu/TemplateMenu.js 
b/www/manager6/menu/TemplateMenu.js
index eb91481c..7cd87f6a 100644
--- a/www/manager6/menu/TemplateMenu.js
+++ b/www/manager6/menu/TemplateMenu.js
@@ -22,7 +22,7 @@ Ext.define('PVE.menu.TemplateMenu', {
me.title = (guestType === 'qemu' ? 'VM ' : 'CT ') + info.vmid;
 
let caps = Ext.state.Manager.get('GuiCap');
-   let standaloneNode = PVE.data.ResourceStore.getNodes().length < 2;
+   let standaloneNode = PVE.Utils.isStandaloneNode();
 
me.items = [
{
diff --git a/www/manager6/qemu/CmdMenu.js b/www/manager6/qemu/CmdMenu.js
index ccc5f74d..4f59d5f7 100644
--- a/www/manager6/qemu/CmdMenu.js
+++ b/www/manager6/qemu/CmdMenu.js
@@ -32,7 +32,7 @@ Ext.define('PVE.qemu.CmdMenu', {
};
 
let caps = Ext.state.Manager.get('GuiCap');
-   let standalone = PVE.data.ResourceStore.getNodes().length < 2;
+   let standalone = PVE.Utils.isStandaloneNode();
 
let running = false, stopped = true, suspended = false;
switch (info.status) {
diff --git a/www/manager6/qemu/Config.js b/www/manager6/qemu/Config.js
index 6acf589c..fb0d9cde 100644
--- a/www/manager6/qemu/Config.js
+

Re: [pve-devel] [PATCH qemu-server 1/1] fix #5033: api: Add target-filename option to move_disk reassign

2023-11-13 Thread Fabian Grünbichler
On November 10, 2023 2:07 pm, Fiona Ebner wrote:
> I'd rather go with your current approach than a new endpoint. Having
> said that, another issue is that custom naming is not a first-class
> feature currently, e.g. live-migration of a local disk or moving a disk
> to a different storage or restoring from a backup will just throw away
> the custom name. If we add more explicit support for custom names here,
> then it's natural that users expect it to "just work" in all scenarios.
> 
> So I feel like it's a question of whether we want to better support
> custom names in general.
> 
> There also is an open feature request to support comment fields for
> disks (and other hardware items):
> https://bugzilla.proxmox.com/show_bug.cgi?id=2672 and IMHO that would be
> a cleaner solution than encoding custom information in the disk name itself.
> 
> Opinions from other devs?

there have been similar requests in the past, my preference would be:
- expose "human readable" custom name more prominently in places where new
  volumes are created, not just on the CLI/API only storage layer
- keep the name on operations that copy/move a volume

if a conflict arises for the second part, we could simply bump the
number-suffix until we find a free "slot", just like we do now for
vm-XXX-disk-N. this would require a coordinated change through the
storage plugins, although it would be possible to fall back to the old
behaviour ("custom name" is lost) in PVE::Storage for (external) plugins
that don't support this feature/API (yet).

for doing a strict match/integration into external tooling, some sort of
comment or attribute would be better, as that could be easily preserved
on "move" type operations, and the external tooling would need to ensure
it is set for "create" type operations, and updated for "copy" type
operations if that is required (e.g., if it is supposed to be unique).

one could argue that a comment is all that is needed for human readable
labelling as well, but there are lots of people that *really* want

vm-XXX-system-0.raw and vm-XXX-data-0.raw (or 'db', or ..)

encoded directly in the volume (and thus file/..) name, since that is
then also visible on the storage layer (in PVE, but also directly when
doing disaster recovery/..), and I kind of see value in that as well.


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [RFC pve-network 2/9] dhcp : add|del_ip_mapping: only add|del dhcp reservervation

2023-11-13 Thread Alexandre Derumier
don't try to add|del ip from ipam here

Signed-off-by: Alexandre Derumier 
---
 src/PVE/Network/SDN/Dhcp.pm | 75 +
 1 file changed, 18 insertions(+), 57 deletions(-)

diff --git a/src/PVE/Network/SDN/Dhcp.pm b/src/PVE/Network/SDN/Dhcp.pm
index e4c4078..1c32fec 100644
--- a/src/PVE/Network/SDN/Dhcp.pm
+++ b/src/PVE/Network/SDN/Dhcp.pm
@@ -21,72 +21,33 @@ PVE::Network::SDN::Dhcp::Dnsmasq->register();
 PVE::Network::SDN::Dhcp::Dnsmasq->init();
 
 sub add_mapping {
-my ($vmid, $vnet, $mac) = @_;
+my ($vmid, $vnetid, $mac, $ip) = @_;
 
-my $vnet_config = PVE::Network::SDN::Vnets::get_vnet($vnet, 1);
-return if !$vnet_config;
+my $subnets = PVE::Network::SDN::Vnets::get_subnets($vnetid, 1);
+my $vnet = PVE::Network::SDN::Vnets::get_vnet($vnetid);
+my $zoneid = $vnet->{zone};
+my $zone = PVE::Network::SDN::Zones::get_zone($zoneid);
 
-my $subnets = PVE::Network::SDN::Vnets::get_subnets($vnet, 1);
+my $dhcptype = $zone->{dhcp};
+return if !$dhcptype;
 
-for my $subnet_id (keys %{$subnets}) {
-   my $subnet_config = $subnets->{$subnet_id};
-
-   next if !$subnet_config->{'dhcp-range'};
-
-   foreach my $dhcp_range (@{$subnet_config->{'dhcp-range'}}) {
-   my $dhcp_config = 
PVE::Network::SDN::Dhcp::get_dhcp($dhcp_range->{server});
-
-   if (!$dhcp_config) {
-   warn "Cannot find configuration for DHCP server 
$dhcp_range->{server}";
-   next;
-   }
-
-   my $ipam_plugin = PVE::Network::SDN::Ipams::Plugin->lookup('pve');
-
-   my $data = {
-   vmid => $vmid,
-   mac => $mac,
-   };
-
-   my $ip = $ipam_plugin->add_dhcp_ip($subnet_config, $dhcp_range, 
$data);
-
-   next if !$ip;
-
-   my $dhcp_plugin = 
PVE::Network::SDN::Dhcp::Plugin->lookup($dhcp_config->{type});
-   $dhcp_plugin->add_ip_mapping($dhcp_config, $mac, $ip);
-
-   return $ip;
-   }
-}
+my $dhcp_plugin = PVE::Network::SDN::Dhcp::Plugin->lookup($dhcptype);
+$dhcp_plugin->add_ip_mapping($zoneid, $mac, $ip);
 }
 
 sub remove_mapping {
-my ($vnet, $mac) = @_;
-
-my $vnet_config = PVE::Network::SDN::Vnets::get_vnet($vnet, 1);
-return if !$vnet_config;
-
-my $subnets = PVE::Network::SDN::Vnets::get_subnets($vnet, 1);
-
-for my $subnet_id (keys %{$subnets}) {
-   my $subnet_config = $subnets->{$subnet_id};
-   next if !$subnet_config->{'dhcp-range'};
+my ($vnetid, $mac) = @_;
 
-   my $ipam_plugin = PVE::Network::SDN::Ipams::Plugin->lookup('pve');
-   $ipam_plugin->del_dhcp_ip($subnet_config, $mac);
+my $subnets = PVE::Network::SDN::Vnets::get_subnets($vnetid, 1);
+my $vnet = PVE::Network::SDN::Vnets::get_vnet($vnetid);
+my $zoneid = $vnet->{zone};
+my $zone = PVE::Network::SDN::Zones::get_zone($zoneid);
 
-   foreach my $dhcp_range (@{$subnet_config->{'dhcp-range'}}) {
-   my $dhcp_config = 
PVE::Network::SDN::Dhcp::get_dhcp($dhcp_range->{server});
+my $dhcptype = $zone->{dhcp};
+return if !$dhcptype;
 
-   if (!$dhcp_config) {
-   warn "Cannot find configuration for DHCP server 
$dhcp_range->{server}";
-   next;
-   }
-
-   my $dhcp_plugin = 
PVE::Network::SDN::Dhcp::Plugin->lookup($dhcp_config->{type});
-   $dhcp_plugin->del_ip_mapping($dhcp_config, $mac);
-   }
-}
+my $dhcp_plugin = PVE::Network::SDN::Dhcp::Plugin->lookup($dhcptype);
+$dhcp_plugin->del_ip_mapping($zoneid, $mac);
 }
 
 sub regenerate_config {
-- 
2.39.2


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [RFC series pve-network/pve-cluster/qemu-server] DHCP

2023-11-13 Thread Alexandre Derumier
Here my current work, based on wip2 Stefan Hanreich

Changes:

I have removed dhcp.cfg extra file, and now we can simply define dhcptype in 
the zone

So, we'll have 1 dhcp server for each zone, could be in different vrf with same 
subnet reuse.

/etc/pve/sdn/zones.cfg

simple: simpve
dhcp dnsmasq
ipam pve

simple: netbox
dhcp dnsmasq
ipam netbox

/etc/pve/sdn/vnets.cfg

vnet: vnetpve
zone simpve

vnet: vnetbox
zone netbox

/etc/pve/sdn/subnets.cfg

subnet: simple-172.16.0.0-24
vnet netbox
dhcp-range start-address=172.16.0.10,end-address=172.16.0.20
dnszoneprefix odiso.net
gateway 172.16.0.1

subnet: simpve-192.168.2.0-24
vnet vnetpve
dhcp-range start-address=192.168.2.10,end-address=192.168.2.20
dhcp-range start-address=192.168.2.40,end-address=192.168.2.50
gateway 192.168.2.1

subnet: netbox-172.16.0.0-24
vnet vnetbox
gateway 172.16.0.1
dhcp-range start-address=172.16.0.10,end-address=172.16.0.20

subnet: netbox-2a05:71c0::-120
vnet vnetbox
dhcp-range start-address=2a05:71c0::10,end-address=2a05:71c0::20


I have implement netbox plugin to find a new ip in dhcp range (Don't seem 
possible
with phpipam, but we could define a full range with all ips).

I have splitted the ipam add|del , from the dhcp lease reservation.

The ipam add|del ip is done when creating|deleting vm, or add|del a vm nic

The dhcp reservation is done at vm start.

The delete of dhcp reservation is done at vm destroy.

(This can be easily extend for ephemeral ip)

At vm start, we search ip associated with mac address.

To avoid to call ipam each time, I have implemented an extra macs.db file, with 
a
mac-ip hash for fast lookup. This cache is populated with adding an ip in ipam 
(at vm creation, nic add),
it can also be populated at vm_start if mac is not yet cached.  (for example, 
if ip is pre-reserved manually in external ipam)

I have reused/improve my previous ipam code, so ipv6 is supported && dns plugin 
is also used if defined.


I have only implemented calls in qemu-server for now


pve-network:

Alexandre Derumier (9):
  define dhcpplugin in zone
  dhcp : add|del_ip_mapping: only add|del dhcp reservervation
  vnet|subnet: add_next_free_ip : implement dhcprange ipam search
  ipam : add macs.db for fast mac lookup
  ipam : add get_ips_from_mac
  vnets: rename del|add|update_cidr to ip
  vnets: add del_ips_from_mac
  ipams : pveplugin: remove del_dhcp_ip
  dhcp : dnsmasq: add_mapping: remove old mac,ip before append

 src/PVE/API2/Network/SDN/Zones.pm  |   1 +
 src/PVE/Network/SDN.pm |   4 +-
 src/PVE/Network/SDN/Dhcp.pm| 166 ++---
 src/PVE/Network/SDN/Dhcp/Dnsmasq.pm|  50 ---
 src/PVE/Network/SDN/Dhcp/Plugin.pm |  28 +---
 src/PVE/Network/SDN/Ipams.pm   |  80 +-
 src/PVE/Network/SDN/Ipams/NetboxPlugin.pm  |  61 
 src/PVE/Network/SDN/Ipams/PVEPlugin.pm |  80 +-
 src/PVE/Network/SDN/Ipams/PhpIpamPlugin.pm |  29 
 src/PVE/Network/SDN/Ipams/Plugin.pm|  13 ++
 src/PVE/Network/SDN/SubnetPlugin.pm|   4 -
 src/PVE/Network/SDN/Subnets.pm |  37 +++--
 src/PVE/Network/SDN/Vnets.pm   |  88 ++-
 src/PVE/Network/SDN/Zones/SimplePlugin.pm  |   7 +-
 src/test/run_test_subnets.pl   |   8 +-
 src/test/run_test_vnets.pl |   4 +-
 16 files changed, 393 insertions(+), 267 deletions(-)

pve-cluster:

Alexandre Derumier (1):
  add priv/macs.db

 src/PVE/Cluster.pm  | 1 +
 src/pmxcfs/status.c | 1 +
 2 files changed, 2 insertions(+)

qemu-server:

Alexandre Derumier (5):
  don't remove dhcp mapping on stop
  vmnic add|remove : add|del ip in ipam
  vm_start : vm-network-scripts: get ip from ipam and add dhcp
reservation
  api2: create|restore|clone: add_free_ip
  vm_destroy: delete ip from ipam && dhcp

 PVE/API2/Qemu.pm  |  6 +++
 PVE/QemuServer.pm | 72 +++
 vm-network-scripts/pve-bridge |  4 +-
 vm-network-scripts/pve-bridgedown | 19 
 4 files changed, 81 insertions(+), 20 deletions(-)

-- 
2.39.2


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [RFC qemu-server 3/5] vm_start : vm-network-scripts: get ip from ipam and add dhcp reservation

2023-11-13 Thread Alexandre Derumier
Signed-off-by: Alexandre Derumier 
---
 vm-network-scripts/pve-bridge | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/vm-network-scripts/pve-bridge b/vm-network-scripts/pve-bridge
index c6b3ea8..24efaad 100755
--- a/vm-network-scripts/pve-bridge
+++ b/vm-network-scripts/pve-bridge
@@ -10,6 +10,7 @@ use PVE::Network;
 my $have_sdn;
 eval {
 require PVE::Network::SDN::Zones;
+require PVE::Network::SDN::Vnets;
 require PVE::Network::SDN::Dhcp;
 $have_sdn = 1;
 };
@@ -45,7 +46,8 @@ my $net = PVE::QemuServer::parse_net($netconf);
 die "unable to parse network config '$netid'\n" if !$net;
 
 if ($have_sdn) {
-PVE::Network::SDN::Dhcp::add_mapping($net->{bridge}, $net->{macaddr});
+my ($ip4, $ip6) = 
PVE::Network::SDN::Vnets::get_ips_from_mac($net->{bridge}, $net->{macaddr});
+PVE::Network::SDN::Dhcp::add_mapping($net->{bridge}, $net->{macaddr}, 
$ip4, $ip6);
 
 PVE::Network::SDN::Zones::tap_create($iface, $net->{bridge});
 PVE::Network::SDN::Zones::tap_plug($iface, $net->{bridge}, $net->{tag}, 
$net->{firewall}, $net->{trunks}, $net->{rate});
-- 
2.39.2


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [RFC pve-cluster 1/1] add priv/macs.db

2023-11-13 Thread Alexandre Derumier
use to cache mac-ip list association.

can be use by external ipam, firewall,etc for fast lookup

Signed-off-by: Alexandre Derumier 
---
 src/PVE/Cluster.pm  | 1 +
 src/pmxcfs/status.c | 1 +
 2 files changed, 2 insertions(+)

diff --git a/src/PVE/Cluster.pm b/src/PVE/Cluster.pm
index cfa2583..80c4bc0 100644
--- a/src/PVE/Cluster.pm
+++ b/src/PVE/Cluster.pm
@@ -62,6 +62,7 @@ my $observed = {
 'priv/token.cfg' => 1,
 'priv/acme/plugins.cfg' => 1,
 'priv/ipam.db' => 1,
+'priv/macs.db' => 1,
 '/qemu-server/' => 1,
 '/openvz/' => 1,
 '/lxc/' => 1,
diff --git a/src/pmxcfs/status.c b/src/pmxcfs/status.c
index c8094ac..078602e 100644
--- a/src/pmxcfs/status.c
+++ b/src/pmxcfs/status.c
@@ -89,6 +89,7 @@ static memdb_change_t memdb_change_array[] = {
{ .path = "priv/tfa.cfg" },
{ .path = "priv/token.cfg" },
{ .path = "priv/ipam.db" },
+   { .path = "priv/macs.db" },
{ .path = "datacenter.cfg" },
{ .path = "vzdump.cron" },
{ .path = "vzdump.conf" },
-- 
2.39.2


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [RFC pve-network 8/9] ipams : pveplugin: remove del_dhcp_ip

2023-11-13 Thread Alexandre Derumier
Signed-off-by: Alexandre Derumier 
---
 src/PVE/Network/SDN/Ipams/PVEPlugin.pm | 32 --
 1 file changed, 32 deletions(-)

diff --git a/src/PVE/Network/SDN/Ipams/PVEPlugin.pm 
b/src/PVE/Network/SDN/Ipams/PVEPlugin.pm
index 0bc2b65..776eff8 100644
--- a/src/PVE/Network/SDN/Ipams/PVEPlugin.pm
+++ b/src/PVE/Network/SDN/Ipams/PVEPlugin.pm
@@ -189,38 +189,6 @@ sub add_range_next_freeip {
 });
 }
 
-sub del_dhcp_ip {
-my ($class, $subnet, $mac) = @_;
-
-my $cidr = $subnet->{cidr};
-my $zone = $subnet->{zone};
-
-my $returned_ip = undef;
-
-cfs_lock_file($ipamdb_file, undef, sub {
-   my $db = read_db();
-
-   die "zone $zone don't exist in ipam db" if !$db->{zones}->{$zone};
-   my $dbzone = $db->{zones}->{$zone};
-
-   die "subnet $cidr don't exist in ipam db" if 
!$dbzone->{subnets}->{$cidr};
-   my $dbsubnet = $dbzone->{subnets}->{$cidr};
-
-   foreach my $ip_address (keys %{$dbsubnet->{ips}}) {
-   my $data = $dbsubnet->{ips}->{$ip_address};
-   next if !$data->{mac} || $data->{mac} ne $mac;
-
-   delete $dbsubnet->{ips}->{$ip_address};
-   write_db($db);
-
-   $returned_ip = $ip_address;
-   }
-});
-die "$@" if $@;
-
-return $returned_ip;
-}
-
 sub del_ip {
 my ($class, $plugin_config, $subnetid, $subnet, $ip) = @_;
 
-- 
2.39.2


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [RFC qemu-server 1/5] don't remove dhcp mapping on stop

2023-11-13 Thread Alexandre Derumier
Signed-off-by: Alexandre Derumier 
---
 vm-network-scripts/pve-bridgedown | 19 ---
 1 file changed, 19 deletions(-)

diff --git a/vm-network-scripts/pve-bridgedown 
b/vm-network-scripts/pve-bridgedown
index a220660..d18d88f 100755
--- a/vm-network-scripts/pve-bridgedown
+++ b/vm-network-scripts/pve-bridgedown
@@ -4,13 +4,6 @@ use strict;
 use warnings;
 use PVE::Network;
 
-my $have_sdn;
-eval {
-require PVE::Network::SDN::Zones;
-require PVE::Network::SDN::Dhcp;
-$have_sdn = 1;
-};
-
 my $iface = shift;
 
 die "no interface specified\n" if !$iface;
@@ -18,18 +11,6 @@ die "no interface specified\n" if !$iface;
 die "got strange interface name '$iface'\n" 
 if $iface !~ m/^tap(\d+)i(\d+)$/;
 
-my $vmid = $1;
-my $netid = "net$2";
-
-my $conf = PVE::QemuConfig->load_config($vmid);
-
-my $netconf = $conf->{$netid};
-my $net = PVE::QemuServer::parse_net($netconf);
-
-if ($have_sdn) {
-PVE::Network::SDN::Dhcp::remove_mapping($net->{bridge}, $net->{macaddr});
-}
-
 PVE::Network::tap_unplug($iface);
 
 exit 0;
-- 
2.39.2


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [RFC pve-network 5/9] ipam : add get_ips_from_mac

2023-11-13 Thread Alexandre Derumier
First look ip mac.db cache
if not, lookup in ipam , and cache result in mac.db

Signed-off-by: Alexandre Derumier 
---
 src/PVE/Network/SDN/Dhcp.pm|  8 ++
 src/PVE/Network/SDN/Ipams.pm   | 19 +++--
 src/PVE/Network/SDN/Ipams/NetboxPlugin.pm  | 25 +
 src/PVE/Network/SDN/Ipams/PVEPlugin.pm | 32 ++
 src/PVE/Network/SDN/Ipams/PhpIpamPlugin.pm | 29 
 src/PVE/Network/SDN/Ipams/Plugin.pm|  6 
 src/PVE/Network/SDN/Vnets.pm   | 11 
 7 files changed, 123 insertions(+), 7 deletions(-)

diff --git a/src/PVE/Network/SDN/Dhcp.pm b/src/PVE/Network/SDN/Dhcp.pm
index 1c32fec..b3c2751 100644
--- a/src/PVE/Network/SDN/Dhcp.pm
+++ b/src/PVE/Network/SDN/Dhcp.pm
@@ -6,7 +6,6 @@ use warnings;
 use PVE::Cluster qw(cfs_read_file);
 
 use PVE::Network::SDN;
-use PVE::Network::SDN::Ipams::Plugin;
 use PVE::Network::SDN::SubnetPlugin;
 use PVE::Network::SDN::Dhcp qw(config);
 use PVE::Network::SDN::Subnets qw(sdn_subnets_config config);
@@ -21,9 +20,8 @@ PVE::Network::SDN::Dhcp::Dnsmasq->register();
 PVE::Network::SDN::Dhcp::Dnsmasq->init();
 
 sub add_mapping {
-my ($vmid, $vnetid, $mac, $ip) = @_;
+my ($vnetid, $mac, $ip4, $ip6) = @_;
 
-my $subnets = PVE::Network::SDN::Vnets::get_subnets($vnetid, 1);
 my $vnet = PVE::Network::SDN::Vnets::get_vnet($vnetid);
 my $zoneid = $vnet->{zone};
 my $zone = PVE::Network::SDN::Zones::get_zone($zoneid);
@@ -32,13 +30,13 @@ sub add_mapping {
 return if !$dhcptype;
 
 my $dhcp_plugin = PVE::Network::SDN::Dhcp::Plugin->lookup($dhcptype);
-$dhcp_plugin->add_ip_mapping($zoneid, $mac, $ip);
+$dhcp_plugin->add_ip_mapping($zoneid, $mac, $ip4) if $ip4;
+$dhcp_plugin->add_ip_mapping($zoneid, $mac, $ip6) if $ip6;
 }
 
 sub remove_mapping {
 my ($vnetid, $mac) = @_;
 
-my $subnets = PVE::Network::SDN::Vnets::get_subnets($vnetid, 1);
 my $vnet = PVE::Network::SDN::Vnets::get_vnet($vnetid);
 my $zoneid = $vnet->{zone};
 my $zone = PVE::Network::SDN::Zones::get_zone($zoneid);
diff --git a/src/PVE/Network/SDN/Ipams.pm b/src/PVE/Network/SDN/Ipams.pm
index a459441..926df90 100644
--- a/src/PVE/Network/SDN/Ipams.pm
+++ b/src/PVE/Network/SDN/Ipams.pm
@@ -98,8 +98,8 @@ sub config {
 }
 
 sub get_plugin_config {
-my ($vnet) = @_;
-my $ipamid = $vnet->{ipam};
+my ($zone) = @_;
+my $ipamid = $zone->{ipam};
 my $ipam_cfg = PVE::Network::SDN::Ipams::config();
 return $ipam_cfg->{ids}->{$ipamid};
 }
@@ -124,5 +124,20 @@ sub complete_sdn_vnet {
 return  $cmdname eq 'add' ? [] : [ 
PVE::Network::SDN::Vnets::sdn_ipams_ids($cfg) ];
 }
 
+sub get_ips_from_mac {
+my ($mac, $zoneid, $zone) = @_;
+
+my $macdb = read_macdb();
+return ($macdb->{macs}->{$mac}->{ip4}, $macdb->{macs}->{$mac}->{ip6}) if 
$macdb->{macs}->{$mac};
+
+my $plugin_config = get_plugin_config($zone);
+my $plugin = 
PVE::Network::SDN::Ipams::Plugin->lookup($plugin_config->{type});
+($macdb->{macs}->{$mac}->{ip4}, $macdb->{macs}->{$mac}->{ip6}) = 
$plugin->get_ips_from_mac($plugin_config, $mac, $zoneid);
+
+write_macdb($macdb);
+
+return ($macdb->{macs}->{$mac}->{ip4}, $macdb->{macs}->{$mac}->{ip6});
+}
+
 1;
 
diff --git a/src/PVE/Network/SDN/Ipams/NetboxPlugin.pm 
b/src/PVE/Network/SDN/Ipams/NetboxPlugin.pm
index 2099a7f..e6cc647 100644
--- a/src/PVE/Network/SDN/Ipams/NetboxPlugin.pm
+++ b/src/PVE/Network/SDN/Ipams/NetboxPlugin.pm
@@ -198,6 +198,31 @@ sub del_ip {
 }
 }
 
+sub get_ips_from_mac {
+my ($class, $plugin_config, $mac, $zoneid) = @_;
+
+my $url = $plugin_config->{url};
+my $token = $plugin_config->{token};
+my $headers = ['Content-Type' => 'application/json; charset=UTF-8', 
'Authorization' => "token $token"];
+
+my $ip4 = undef;
+my $ip6 = undef;
+
+my $data = PVE::Network::SDN::api_request("GET", 
"$url/ipam/ip-addresses/?description__ic=$mac", $headers);
+for my $ip (@{$data->{results}}) {
+   if ($ip->{family}->{value} == 4 && !$ip4) {
+   ($ip4, undef) = split(/\//, $ip->{address});
+   }
+
+   if ($ip->{family}->{value} == 6 && !$ip6) {
+   ($ip6, undef) = split(/\//, $ip->{address});
+   }
+}
+
+return ($ip4, $ip6);
+}
+
+
 sub verify_api {
 my ($class, $plugin_config) = @_;
 
diff --git a/src/PVE/Network/SDN/Ipams/PVEPlugin.pm 
b/src/PVE/Network/SDN/Ipams/PVEPlugin.pm
index 5790715..0bc2b65 100644
--- a/src/PVE/Network/SDN/Ipams/PVEPlugin.pm
+++ b/src/PVE/Network/SDN/Ipams/PVEPlugin.pm
@@ -242,6 +242,38 @@ sub del_ip {
 die "$@" if $@;
 }
 
+sub get_ips_from_mac {
+my ($class, $plugin_config, $mac, $zoneid) = @_;
+
+#just in case, as this should already be cached in local macs.db
+
+my $ip4 = undef;
+my $ip6 = undef;
+
+my $db = read_db();
+die "zone $zoneid don't exist in ipam db" if !$db->{zones}->{$zoneid};
+my $dbzone = $db->{zones}->{$zoneid};
+my $subnets = $dbzone->{s

[pve-devel] [RFC pve-network 3/9] vnet|subnet: add_next_free_ip : implement dhcprange ipam search

2023-11-13 Thread Alexandre Derumier
Signed-off-by: Alexandre Derumier 
---
 src/PVE/Network/SDN/Ipams/NetboxPlugin.pm | 36 
 src/PVE/Network/SDN/Ipams/PVEPlugin.pm| 12 +++
 src/PVE/Network/SDN/Ipams/Plugin.pm   |  7 
 src/PVE/Network/SDN/Subnets.pm| 21 +---
 src/PVE/Network/SDN/Vnets.pm  | 40 +++
 src/test/run_test_subnets.pl  |  2 +-
 src/test/run_test_vnets.pl|  4 +--
 7 files changed, 88 insertions(+), 34 deletions(-)

diff --git a/src/PVE/Network/SDN/Ipams/NetboxPlugin.pm 
b/src/PVE/Network/SDN/Ipams/NetboxPlugin.pm
index f0e7168..2099a7f 100644
--- a/src/PVE/Network/SDN/Ipams/NetboxPlugin.pm
+++ b/src/PVE/Network/SDN/Ipams/NetboxPlugin.pm
@@ -151,6 +151,33 @@ sub add_next_freeip {
 return $ip;
 }
 
+sub add_range_next_freeip {
+my ($class, $plugin_config, $subnet, $range, $data, $noerr) = @_;
+
+my $url = $plugin_config->{url};
+my $token = $plugin_config->{token};
+my $headers = ['Content-Type' => 'application/json; charset=UTF-8', 
'Authorization' => "token $token"];
+
+my $internalid = get_iprange_id($url, $range, $headers);
+my $description = "mac:$data->{mac}" if $data->{mac};
+
+my $params = { dns_name => $data->{hostname}, description => $description 
};
+
+my $ip = undef;
+eval {
+   my $result = PVE::Network::SDN::api_request("POST", 
"$url/ipam/ip-ranges/$internalid/available-ips/", $headers, $params);
+   $ip = $result->{address};
+   print "found ip free $ip in range 
$range->{'start-address'}-$range->{'end-address'}\n" if $ip;
+};
+
+if ($@) {
+   die "can't find free ip in range 
$range->{'start-address'}-$range->{'end-address'}: $@" if !$noerr;
+}
+
+return $ip;
+
+}
+
 sub del_ip {
 my ($class, $plugin_config, $subnetid, $subnet, $ip, $noerr) = @_;
 
@@ -204,6 +231,15 @@ sub get_prefix_id {
 return $internalid;
 }
 
+sub get_iprange_id {
+my ($url, $range, $headers) = @_;
+
+my $result = PVE::Network::SDN::api_request("GET", 
"$url/ipam/ip-ranges/?start_address=$range->{'start-address'}&end_address=$range->{'end-address'}",
 $headers);
+my $data = @{$result->{results}}[0];
+my $internalid = $data->{id};
+return $internalid;
+}
+
 sub get_ip_id {
 my ($url, $ip, $headers) = @_;
 my $result = PVE::Network::SDN::api_request("GET", 
"$url/ipam/ip-addresses/?q=$ip", $headers);
diff --git a/src/PVE/Network/SDN/Ipams/PVEPlugin.pm 
b/src/PVE/Network/SDN/Ipams/PVEPlugin.pm
index fcc8282..37b47e4 100644
--- a/src/PVE/Network/SDN/Ipams/PVEPlugin.pm
+++ b/src/PVE/Network/SDN/Ipams/PVEPlugin.pm
@@ -110,7 +110,7 @@ sub update_ip {
 }
 
 sub add_next_freeip {
-my ($class, $plugin_config, $subnetid, $subnet, $hostname, $mac, 
$description) = @_;
+my ($class, $plugin_config, $subnetid, $subnet, $hostname, $mac, 
$description, $noerr) = @_;
 
 my $cidr = $subnet->{cidr};
 my $network = $subnet->{network};
@@ -156,8 +156,8 @@ sub add_next_freeip {
 return "$freeip/$mask";
 }
 
-sub add_dhcp_ip {
-my ($class, $subnet, $dhcp_range, $data) = @_;
+sub add_range_next_freeip {
+my ($class, $plugin_config, $subnet, $range, $data, $noerr) = @_;
 
 my $cidr = $subnet->{cidr};
 my $zone = $subnet->{zone};
@@ -171,8 +171,8 @@ sub add_dhcp_ip {
my $dbsubnet = $dbzone->{subnets}->{$cidr};
die "subnet '$cidr' doesn't exist in IPAM DB\n" if !$dbsubnet;
 
-   my $ip = new Net::IP ("$dhcp_range->{'start-address'} - 
$dhcp_range->{'end-address'}")
-   or die "Invalid IP address(es) in DHCP Range!\n";
+   my $ip = new Net::IP ("$range->{'start-address'} - 
$range->{'end-address'}")
+   or die "Invalid IP address(es) in Range!\n";
 
do {
my $ip_address = $ip->ip();
@@ -184,7 +184,7 @@ sub add_dhcp_ip {
}
} while (++$ip);
 
-   die "No free IP left in DHCP Range 
$dhcp_range->{'start-address'}:$dhcp_range->{'end-address'}}\n";
+   die "No free IP left in Range 
$range->{'start-address'}:$range->{'end-address'}}\n";
 });
 }
 
diff --git a/src/PVE/Network/SDN/Ipams/Plugin.pm 
b/src/PVE/Network/SDN/Ipams/Plugin.pm
index c96eeda..4d85b81 100644
--- a/src/PVE/Network/SDN/Ipams/Plugin.pm
+++ b/src/PVE/Network/SDN/Ipams/Plugin.pm
@@ -98,6 +98,13 @@ sub add_next_freeip {
 die "please implement inside plugin";
 }
 
+
+sub add_range_next_freeip {
+my ($class, $plugin_config, $subnet, $range, $data, $noerr) = @_;
+
+die "please implement inside plugin";
+}
+
 sub del_ip {
 my ($class, $plugin_config, $subnetid, $subnet, $ip, $noerr) = @_;
 
diff --git a/src/PVE/Network/SDN/Subnets.pm b/src/PVE/Network/SDN/Subnets.pm
index dd9e697..9f953a6 100644
--- a/src/PVE/Network/SDN/Subnets.pm
+++ b/src/PVE/Network/SDN/Subnets.pm
@@ -202,8 +202,8 @@ sub del_subnet {
 $plugin->del_subnet($plugin_config, $subnetid, $subnet);
 }
 
-sub next_free_ip {
-my ($zone, $subnetid, $subnet, $hostname, $mac, $description, $ski

[pve-devel] [RFC pve-network 1/9] define dhcpplugin in zone

2023-11-13 Thread Alexandre Derumier
simple: zone1
ipam pve
dhcp dnsmasq

simple: zone2
ipam pve
dhcp dnsmasq

This generate 1 dhcp by zone/vrf.

Don't use dhcp.cfg anymore
It's reuse node filtering from zone.

same subnets in 2 differents zones can't use
same dhcp server

Signed-off-by: Alexandre Derumier 
---
 src/PVE/API2/Network/SDN/Zones.pm |  1 +
 src/PVE/Network/SDN.pm|  4 +-
 src/PVE/Network/SDN/Dhcp.pm   | 91 +++
 src/PVE/Network/SDN/Dhcp/Dnsmasq.pm   | 32 
 src/PVE/Network/SDN/Dhcp/Plugin.pm| 28 ++-
 src/PVE/Network/SDN/SubnetPlugin.pm   |  4 -
 src/PVE/Network/SDN/Zones/SimplePlugin.pm |  7 +-
 7 files changed, 54 insertions(+), 113 deletions(-)

diff --git a/src/PVE/API2/Network/SDN/Zones.pm 
b/src/PVE/API2/Network/SDN/Zones.pm
index 4c8b7e1..1c3356e 100644
--- a/src/PVE/API2/Network/SDN/Zones.pm
+++ b/src/PVE/API2/Network/SDN/Zones.pm
@@ -99,6 +99,7 @@ __PACKAGE__->register_method ({
reversedns => { type => 'string', optional => 1},
dnszone => { type => 'string', optional => 1},
ipam => { type => 'string', optional => 1},
+   dhcp => { type => 'string', optional => 1},
pending => { optional => 1},
state => { type => 'string', optional => 1},
nodes => { type => 'string', optional => 1},
diff --git a/src/PVE/Network/SDN.pm b/src/PVE/Network/SDN.pm
index 5c059bc..c306527 100644
--- a/src/PVE/Network/SDN.pm
+++ b/src/PVE/Network/SDN.pm
@@ -150,15 +150,13 @@ sub commit_config {
 my $zones_cfg = PVE::Network::SDN::Zones::config();
 my $controllers_cfg = PVE::Network::SDN::Controllers::config();
 my $subnets_cfg = PVE::Network::SDN::Subnets::config();
-my $dhcp_cfg = PVE::Network::SDN::Dhcp::config();
 
 my $vnets = { ids => $vnets_cfg->{ids} };
 my $zones = { ids => $zones_cfg->{ids} };
 my $controllers = { ids => $controllers_cfg->{ids} };
 my $subnets = { ids => $subnets_cfg->{ids} };
-my $dhcp = { ids => $dhcp_cfg->{ids} };
 
-$cfg = { version => $version, vnets => $vnets, zones => $zones, 
controllers => $controllers, subnets => $subnets, dhcps => $dhcp };
+$cfg = { version => $version, vnets => $vnets, zones => $zones, 
controllers => $controllers, subnets => $subnets };
 
 cfs_write_file($running_cfg, $cfg);
 }
diff --git a/src/PVE/Network/SDN/Dhcp.pm b/src/PVE/Network/SDN/Dhcp.pm
index b92c73a..e4c4078 100644
--- a/src/PVE/Network/SDN/Dhcp.pm
+++ b/src/PVE/Network/SDN/Dhcp.pm
@@ -20,41 +20,6 @@ PVE::Network::SDN::Dhcp::Plugin->init();
 PVE::Network::SDN::Dhcp::Dnsmasq->register();
 PVE::Network::SDN::Dhcp::Dnsmasq->init();
 
-sub config {
-my ($running) = @_;
-
-if ($running) {
-   my $cfg = PVE::Network::SDN::running_config();
-   return $cfg->{dhcps};
-}
-
-return cfs_read_file('sdn/dhcp.cfg');
-}
-
-sub sdn_dhcps_config {
-my ($cfg, $id, $noerr) = @_;
-
-die "No DHCP ID specified!\n" if !$id;
-
-my $dhcp_config = $cfg->{ids}->{$id};
-die "SDN DHCP '$id' does not exist!\n" if (!$noerr && !$dhcp_config);
-
-if ($dhcp_config) {
-   $dhcp_config->{id} = $id;
-}
-
-return $dhcp_config;
-}
-
-sub get_dhcp {
-my ($dhcp_id, $running) = @_;
-
-return if !$dhcp_id;
-
-my $cfg = PVE::Network::SDN::Dhcp::config($running);
-return PVE::Network::SDN::Dhcp::sdn_dhcps_config($cfg, $dhcp_id, 1);
-}
-
 sub add_mapping {
 my ($vmid, $vnet, $mac) = @_;
 
@@ -127,58 +92,52 @@ sub remove_mapping {
 sub regenerate_config {
 my ($reload) = @_;
 
-my $dhcps = PVE::Network::SDN::Dhcp::config();
-my $subnets = PVE::Network::SDN::Subnets::config();
+my $cfg = PVE::Network::SDN::running_config();
 
-my $plugins = PVE::Network::SDN::Dhcp::Plugin->lookup_types();
+my $zone_cfg = $cfg->{zones};
+my $subnet_cfg = $cfg->{subnets};
+return if !$zone_cfg && !$subnet_cfg;
 
 my $nodename = PVE::INotify::nodename();
 
+my $plugins = PVE::Network::SDN::Dhcp::Plugin->lookup_types();
+
 foreach my $plugin_name (@$plugins) {
my $plugin = PVE::Network::SDN::Dhcp::Plugin->lookup($plugin_name);
-
eval { $plugin->before_regenerate() };
die "Could not run before_regenerate for DHCP plugin $plugin_name $@\n" 
if $@;
 }
 
-foreach my $dhcp_id (keys %{$dhcps->{ids}}) {
-   my $dhcp_config = PVE::Network::SDN::Dhcp::sdn_dhcps_config($dhcps, 
$dhcp_id);
-   my $plugin = 
PVE::Network::SDN::Dhcp::Plugin->lookup($dhcp_config->{type});
+foreach my $zoneid (sort keys %{$zone_cfg->{ids}}) {
+my $zone = $zone_cfg->{ids}->{$zoneid};
+next if defined($zone->{nodes}) && !$zone->{nodes}->{$nodename};
 
-   eval { $plugin->before_configure($dhcp_config) };
-   die "Could not run before_configure for DHCP server $dhcp_id $@\n" if 
$@;
-}
+   m

[pve-devel] [RFC qemu-server 5/5] vm_destroy: delete ip from ipam && dhcp

2023-11-13 Thread Alexandre Derumier
Signed-off-by: Alexandre Derumier 
---
 PVE/QemuServer.pm | 19 +++
 1 file changed, 19 insertions(+)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 511f644..e4cc80d 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -2339,6 +2339,9 @@ sub destroy_vm {
});
 }
 
+eval { delete_ifaces_ipams_ips($conf, $vmid)};
+warn $@ if $@;
+
 if (defined $replacement_conf) {
PVE::QemuConfig->write_config($vmid, $replacement_conf);
 } else {
@@ -8639,4 +8642,20 @@ sub create_ifaces_ipams_ips {
 }
 }
 
+sub delete_ifaces_ipams_ips {
+my ($conf, $vmid) = @_;
+
+return if !$have_sdn;
+
+foreach my $opt (keys %$conf) {
+   if ($opt =~ m/^net(\d+)$/) {
+   my $net = PVE::QemuServer::parse_net($conf->{$opt});
+   eval { PVE::Network::SDN::Dhcp::remove_mapping($net->{bridge}, 
$net->{macaddr}) };
+   warn $@ if $@;
+   eval { PVE::Network::SDN::Vnets::del_ips_from_mac($net->{bridge}, 
$net->{macaddr}, $conf->{name}) };
+   warn $@ if $@;
+   }
+}
+}
+
 1;
-- 
2.39.2


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [RFC qemu-server 4/5] api2: create|restore|clone: add_free_ip

2023-11-13 Thread Alexandre Derumier
Signed-off-by: Alexandre Derumier 
---
 PVE/API2/Qemu.pm  |  6 ++
 PVE/QemuServer.pm | 15 +++
 2 files changed, 21 insertions(+)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 38bdaab..a0f8243 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -991,6 +991,8 @@ __PACKAGE__->register_method({
eval { PVE::QemuServer::template_create($vmid, 
$restored_conf) };
warn $@ if $@;
}
+
+   PVE::QemuServer::create_ifaces_ipams_ips($restored_conf, $vmid) 
if $unique;
};
 
# ensure no old replication state are exists
@@ -1066,6 +1068,8 @@ __PACKAGE__->register_method({
}
 
PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
+
+   PVE::QemuServer::create_ifaces_ipams_ips($conf, $vmid);
};
 
PVE::QemuConfig->lock_config_full($vmid, 1, $realcmd);
@@ -3763,6 +3767,8 @@ __PACKAGE__->register_method({
 
PVE::QemuConfig->write_config($newid, $newconf);
 
+   PVE::QemuServer::create_ifaces_ipams_ips($newconf, $vmid);
+
if ($target) {
# always deactivate volumes - avoid lvm LVs to be active on 
several nodes
PVE::Storage::deactivate_volumes($storecfg, $vollist, 
$snapname) if !$running;
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 5c109b1..511f644 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -8624,4 +8624,19 @@ sub del_nets_bridge_fdb {
 }
 }
 
+sub create_ifaces_ipams_ips {
+my ($conf, $vmid) = @_;
+
+return if !$have_sdn;
+
+foreach my $opt (keys %$conf) {
+if ($opt =~ m/^net(\d+)$/) {
+my $value = $conf->{$opt};
+my $net = PVE::QemuServer::parse_net($value);
+eval { 
PVE::Network::SDN::Vnets::add_next_free_cidr($net->{bridge}, $conf->{name}, 
$net->{macaddr}, "vmid: $vmid", undef, 1) };
+warn $@ if $@;
+}
+}
+}
+
 1;
-- 
2.39.2


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [RFC pve-network 4/9] ipam : add macs.db for fast mac lookup

2023-11-13 Thread Alexandre Derumier
Signed-off-by: Alexandre Derumier 
---
 src/PVE/Network/SDN/Ipams.pm   | 61 +-
 src/PVE/Network/SDN/Ipams/PVEPlugin.pm |  4 +-
 src/PVE/Network/SDN/Subnets.pm |  8 +++-
 src/test/run_test_subnets.pl   |  6 +++
 4 files changed, 75 insertions(+), 4 deletions(-)

diff --git a/src/PVE/Network/SDN/Ipams.pm b/src/PVE/Network/SDN/Ipams.pm
index e8a4b0b..a459441 100644
--- a/src/PVE/Network/SDN/Ipams.pm
+++ b/src/PVE/Network/SDN/Ipams.pm
@@ -4,9 +4,10 @@ use strict;
 use warnings;
 
 use JSON;
+use Net::IP;
 
 use PVE::Tools qw(extract_param dir_glob_regex run_command);
-use PVE::Cluster qw(cfs_read_file cfs_write_file cfs_lock_file);
+use PVE::Cluster qw(cfs_register_file cfs_read_file cfs_write_file 
cfs_lock_file);
 use PVE::Network;
 
 use PVE::Network::SDN::Ipams::PVEPlugin;
@@ -19,6 +20,64 @@ PVE::Network::SDN::Ipams::NetboxPlugin->register();
 PVE::Network::SDN::Ipams::PhpIpamPlugin->register();
 PVE::Network::SDN::Ipams::Plugin->init();
 
+my $macdb_filename = 'priv/macs.db';
+
+cfs_register_file($macdb_filename, \&json_reader, \&json_writer);
+
+sub json_reader {
+my ($filename, $data) = @_;
+
+return defined($data) && length($data) > 0 ? decode_json($data) : {};
+}
+
+sub json_writer {
+my ($filename, $data) = @_;
+
+return encode_json($data);
+}
+
+sub read_macdb {
+my () = @_;
+
+return cfs_read_file($macdb_filename);
+}
+
+sub write_macdb {
+my ($data) = @_;
+
+cfs_write_file($macdb_filename, $data);
+}
+
+sub add_cache_mac_ip {
+my ($mac, $ip) = @_;
+
+cfs_lock_file($macdb_filename, undef, sub {
+   my $db = read_macdb();
+   if (Net::IP::ip_is_ipv4($ip)) {
+   $db->{macs}->{$mac}->{ip4} = $ip;
+   } else {
+   $db->{macs}->{$mac}->{ip6} = $ip;
+   }
+   write_macdb($db);
+});
+warn "$@" if $@;
+}
+
+sub del_cache_mac_ip {
+my ($mac, $ip) = @_;
+
+cfs_lock_file($macdb_filename, undef, sub {
+   my $db = read_macdb();
+   if (Net::IP::ip_is_ipv4($ip)) {
+   delete $db->{macs}->{$mac}->{ip4};
+   } else {
+   delete $db->{macs}->{$mac}->{ip6};
+   }
+delete $db->{macs}->{$mac} if !defined($db->{macs}->{$mac}->{ip4}) && 
!defined($db->{macs}->{$mac}->{ip6});
+   write_macdb($db);
+});
+warn "$@" if $@;
+}
 
 sub sdn_ipams_config {
 my ($cfg, $id, $noerr) = @_;
diff --git a/src/PVE/Network/SDN/Ipams/PVEPlugin.pm 
b/src/PVE/Network/SDN/Ipams/PVEPlugin.pm
index 37b47e4..5790715 100644
--- a/src/PVE/Network/SDN/Ipams/PVEPlugin.pm
+++ b/src/PVE/Network/SDN/Ipams/PVEPlugin.pm
@@ -173,13 +173,14 @@ sub add_range_next_freeip {
 
my $ip = new Net::IP ("$range->{'start-address'} - 
$range->{'end-address'}")
or die "Invalid IP address(es) in Range!\n";
+   my $mac = $data->{mac};
 
do {
my $ip_address = $ip->ip();
if (!$dbsubnet->{ips}->{$ip_address}) {
$dbsubnet->{ips}->{$ip_address} = $data;
write_db($db);
-
+   
return $ip_address;
}
} while (++$ip);
@@ -236,7 +237,6 @@ sub del_ip {
 
die "IP '$ip' does not exist in IPAM DB\n" if 
!defined($dbsubnet->{ips}->{$ip});
delete $dbsubnet->{ips}->{$ip};
-
write_db($db);
 });
 die "$@" if $@;
diff --git a/src/PVE/Network/SDN/Subnets.pm b/src/PVE/Network/SDN/Subnets.pm
index 9f953a6..b2125a1 100644
--- a/src/PVE/Network/SDN/Subnets.pm
+++ b/src/PVE/Network/SDN/Subnets.pm
@@ -240,6 +240,9 @@ sub add_next_free_ip {
};
 
die $@ if $@;
+
+   eval { PVE::Network::SDN::Ipams::add_cache_mac_ip($mac, $ip); };
+   warn $@ if $@;
 }
 
 eval {
@@ -364,7 +367,7 @@ sub update_ip {
 }
 
 sub del_ip {
-my ($zone, $subnetid, $subnet, $ip, $hostname, $skipdns) = @_;
+my ($zone, $subnetid, $subnet, $ip, $hostname, $mac, $skipdns) = @_;
 
 return if !$subnet || !$ip;
 
@@ -389,6 +392,9 @@ sub del_ip {
my $plugin_config = $ipam_cfg->{ids}->{$ipamid};
my $plugin = 
PVE::Network::SDN::Ipams::Plugin->lookup($plugin_config->{type});
$plugin->del_ip($plugin_config, $subnetid, $subnet, $ip);
+
+   eval { PVE::Network::SDN::Ipams::del_cache_mac_ip($mac, $ip); };
+   warn $@ if $@;
 }
 
 eval {
diff --git a/src/test/run_test_subnets.pl b/src/test/run_test_subnets.pl
index 9692f4c..c98359a 100755
--- a/src/test/run_test_subnets.pl
+++ b/src/test/run_test_subnets.pl
@@ -109,6 +109,12 @@ foreach my $path (@plugins) {
my $ipam_config = read_sdn_config ("$path/ipam_config");
return $ipam_config;
},
+   add_cache_mac_ip => sub {
+   return;
+   },
+   del_cache_mac_ip => sub {
+   return;
+   }
 );
 
 ## add_subnet
-- 
2.39.2


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [RFC pve-network 6/9] vnets: rename del|add|update_cidr to ip

2023-11-13 Thread Alexandre Derumier
Signed-off-by: Alexandre Derumier 
---
 src/PVE/Network/SDN/Subnets.pm |  4 +---
 src/PVE/Network/SDN/Vnets.pm   | 27 ---
 2 files changed, 13 insertions(+), 18 deletions(-)

diff --git a/src/PVE/Network/SDN/Subnets.pm b/src/PVE/Network/SDN/Subnets.pm
index b2125a1..905ec77 100644
--- a/src/PVE/Network/SDN/Subnets.pm
+++ b/src/PVE/Network/SDN/Subnets.pm
@@ -93,14 +93,12 @@ sub get_subnet {
 }
 
 sub find_ip_subnet {
-my ($ip, $mask, $subnets) = @_;
+my ($ip, $subnets) = @_;
 
 my $subnet = undef;
 my $subnetid = undef;
 
 foreach my $id (sort keys %{$subnets}) {
-
-   next if $mask ne $subnets->{$id}->{mask};
my $cidr = $subnets->{$id}->{cidr};
my $subnet_matcher = subnet_matcher($cidr);
next if !$subnet_matcher->($ip);
diff --git a/src/PVE/Network/SDN/Vnets.pm b/src/PVE/Network/SDN/Vnets.pm
index 9ba1a1e..2f42da6 100644
--- a/src/PVE/Network/SDN/Vnets.pm
+++ b/src/PVE/Network/SDN/Vnets.pm
@@ -80,18 +80,15 @@ sub get_subnets {
 return $subnets;
 }
 
-sub get_subnet_from_vnet_cidr {
-my ($vnetid, $cidr) = @_;
+sub get_subnet_from_vnet_ip {
+my ($vnetid, $ip) = @_;
 
 my $subnets = PVE::Network::SDN::Vnets::get_subnets($vnetid, 1);
 my $vnet = PVE::Network::SDN::Vnets::get_vnet($vnetid);
 my $zoneid = $vnet->{zone};
 my $zone = PVE::Network::SDN::Zones::get_zone($zoneid);
 
-my ($ip, $mask) = split(/\//, $cidr);
-die "ip address is not in cidr format" if !$mask;
-
-my ($subnetid, $subnet) = PVE::Network::SDN::Subnets::find_ip_subnet($ip, 
$mask, $subnets);
+my ($subnetid, $subnet) = PVE::Network::SDN::Subnets::find_ip_subnet($ip, 
$subnets);
 
 return ($zone, $subnetid, $subnet, $ip);
 }
@@ -128,30 +125,30 @@ sub add_next_free_cidr {
 }
 }
 
-sub add_cidr {
-my ($vnetid, $cidr, $hostname, $mac, $description, $skipdns) = @_;
+sub add_ip {
+my ($vnetid, $ip, $hostname, $mac, $description, $skipdns) = @_;
 
 return if !$vnetid;
 
-my ($zone, $subnetid, $subnet, $ip) = 
PVE::Network::SDN::Vnets::get_subnet_from_vnet_cidr($vnetid, $cidr);
+my ($zone, $subnetid, $subnet) = 
PVE::Network::SDN::Vnets::get_subnet_from_vnet_ip($vnetid, $ip);
 PVE::Network::SDN::Subnets::add_ip($zone, $subnetid, $subnet, $ip, 
$hostname, $mac, $description, undef, $skipdns);
 }
 
-sub update_cidr {
-my ($vnetid, $cidr, $hostname, $oldhostname, $mac, $description, $skipdns) 
= @_;
+sub update_ip {
+my ($vnetid, $ip, $hostname, $oldhostname, $mac, $description, $skipdns) = 
@_;
 
 return if !$vnetid;
 
-my ($zone, $subnetid, $subnet, $ip) = 
PVE::Network::SDN::Vnets::get_subnet_from_vnet_cidr($vnetid, $cidr);
+my ($zone, $subnetid, $subnet) = 
PVE::Network::SDN::Vnets::get_subnet_from_vnet_ip($vnetid, $ip);
 PVE::Network::SDN::Subnets::update_ip($zone, $subnetid, $subnet, $ip, 
$hostname, $oldhostname, $mac, $description, $skipdns);
 }
 
-sub del_cidr {
-my ($vnetid, $cidr, $hostname, $skipdns) = @_;
+sub del_ip {
+my ($vnetid, $ip, $hostname, $skipdns) = @_;
 
 return if !$vnetid;
 
-my ($zone, $subnetid, $subnet, $ip) = 
PVE::Network::SDN::Vnets::get_subnet_from_vnet_cidr($vnetid, $cidr);
+my ($zone, $subnetid, $subnet) = 
PVE::Network::SDN::Vnets::get_subnet_from_vnet_ip($vnetid, $ip);
 PVE::Network::SDN::Subnets::del_ip($zone, $subnetid, $subnet, $ip, 
$hostname, $skipdns);
 }
 
-- 
2.39.2


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [RFC qemu-server 2/5] vmnic add|remove : add|del ip in ipam

2023-11-13 Thread Alexandre Derumier
Signed-off-by: Alexandre Derumier 
---
 PVE/QemuServer.pm | 38 +++
 vm-network-scripts/pve-bridge |  2 +-
 2 files changed, 39 insertions(+), 1 deletion(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 31e3919..5c109b1 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -64,6 +64,8 @@ use PVE::QemuServer::USB;
 my $have_sdn;
 eval {
 require PVE::Network::SDN::Zones;
+require PVE::Network::SDN::Vnets;
+require PVE::Network::SDN::Dhcp;
 $have_sdn = 1;
 };
 
@@ -4991,6 +4993,11 @@ sub vmconfig_hotplug_pending {
} elsif ($opt =~ m/^net(\d+)$/) {
die "skip\n" if !$hotplug_features->{network};
vm_deviceunplug($vmid, $conf, $opt);
+   if($have_sdn) {
+   my $net = PVE::QemuServer::parse_net($conf->{$opt});
+   PVE::Network::SDN::Dhcp::remove_mapping($net->{bridge}, 
$net->{macaddr});
+   PVE::Network::SDN::Vnets::del_ips_from_mac($net->{bridge}, 
$net->{macaddr}, $conf->{name});
+   }
} elsif (is_valid_drivename($opt)) {
die "skip\n" if !$hotplug_features->{disk} || $opt =~ 
m/(ide|sata)(\d+)/;
vm_deviceunplug($vmid, $conf, $opt);
@@ -5196,6 +5203,12 @@ sub vmconfig_apply_pending {
die "internal error";
} elsif (defined($conf->{$opt}) && is_valid_drivename($opt)) {
vmconfig_delete_or_detach_drive($vmid, $storecfg, $conf, $opt, 
$force);
+   } elsif (defined($conf->{$opt}) && $opt =~ m/^net\d+$/) {
+   if($have_sdn) {
+   my $net = PVE::QemuServer::parse_net($conf->{$opt});
+   PVE::Network::SDN::Dhcp::remove_mapping($net->{bridge}, 
$net->{macaddr});
+   PVE::Network::SDN::Vnets::del_ips_from_mac($net->{bridge}, 
$net->{macaddr}, $conf->{name});
+   }
}
};
if (my $err = $@) {
@@ -5215,6 +5228,21 @@ sub vmconfig_apply_pending {
eval {
if (defined($conf->{$opt}) && is_valid_drivename($opt)) {
vmconfig_register_unused_drive($storecfg, $vmid, $conf, 
parse_drive($opt, $conf->{$opt}))
+   } elsif (defined($conf->{pending}->{$opt}) && $opt =~ m/^net\d+$/) {
+   if($have_sdn) {
+my $new_net = 
PVE::QemuServer::parse_net($conf->{pending}->{$opt});
+   if ($conf->{$opt}){
+   my $old_net = PVE::QemuServer::parse_net($conf->{$opt});
+   
+   if ($old_net->{bridge} ne $new_net->{bridge} ||
+   $old_net->{macaddr} ne $new_net->{macaddr}) {
+   
PVE::Network::SDN::Dhcp::remove_mapping($old_net->{bridge}, 
$old_net->{macaddr});
+   
PVE::Network::SDN::Vnets::del_ips_from_mac($old_net->{bridge}, 
$old_net->{macaddr}, $conf->{name});
+   }
+  }
+  #fixme: reuse ip if mac change && same bridge 
+  
PVE::Network::SDN::Vnets::add_next_free_cidr($new_net->{bridge}, $conf->{name}, 
$new_net->{macaddr}, "vmid:$vmid", undef, 1);
+   }
}
};
if (my $err = $@) {
@@ -5258,6 +5286,13 @@ sub vmconfig_update_net {
 # for non online change, we try to hot-unplug
die "skip\n" if !$hotplug;
vm_deviceunplug($vmid, $conf, $opt);
+
+   # fixme: force device_unplug on bridge change if mac is present in 
dhcp, to force guest os to retrieve a new ip
+   if($have_sdn) {
+   PVE::Network::SDN::Dhcp::remove_mapping($oldnet->{bridge}, 
$oldnet->{macaddr});
+   PVE::Network::SDN::Vnets::del_ips_from_mac($oldnet->{bridge}, 
$oldnet->{macaddr}, $conf->{name});
+   }
+
} else {
 
die "internal error" if $opt !~ m/net(\d+)/;
@@ -5289,6 +5324,9 @@ sub vmconfig_update_net {
 }
 
 if ($hotplug) {
+   if ($have_sdn) {
+   PVE::Network::SDN::Vnets::add_next_free_cidr($newnet->{bridge}, 
$conf->{name}, $newnet->{macaddr}, "vmid:$vmid", undef, 1);
+   }
vm_deviceplug($storecfg, $conf, $vmid, $opt, $newnet, $arch, 
$machine_type);
 } else {
die "skip\n";
diff --git a/vm-network-scripts/pve-bridge b/vm-network-scripts/pve-bridge
index 5c8acdf..c6b3ea8 100755
--- a/vm-network-scripts/pve-bridge
+++ b/vm-network-scripts/pve-bridge
@@ -45,7 +45,7 @@ my $net = PVE::QemuServer::parse_net($netconf);
 die "unable to parse network config '$netid'\n" if !$net;
 
 if ($have_sdn) {
-PVE::Network::SDN::Dhcp::add_mapping($vmid, $net->{bridge}, 
$net->{macaddr});
+PVE::Network::SDN::Dhcp::add_mapping($net->{bridge}, $net->{macaddr});
 
 PVE::Network::SDN::Zones::tap_create($iface, $net->{bridge});
 PVE::Network::SDN::Zones::tap_plug($iface, $net->{bridge}, $net->{tag}, 
$net->{firewall}, $net->{trunks}, $net->{rat

[pve-devel] [RFC pve-network 9/9] dhcp : dnsmasq: add_mapping: remove old mac, ip before append

2023-11-13 Thread Alexandre Derumier
Signed-off-by: Alexandre Derumier 
---
 src/PVE/Network/SDN/Dhcp/Dnsmasq.pm | 18 +++---
 1 file changed, 15 insertions(+), 3 deletions(-)

diff --git a/src/PVE/Network/SDN/Dhcp/Dnsmasq.pm 
b/src/PVE/Network/SDN/Dhcp/Dnsmasq.pm
index 64895ef..21a6ddd 100644
--- a/src/PVE/Network/SDN/Dhcp/Dnsmasq.pm
+++ b/src/PVE/Network/SDN/Dhcp/Dnsmasq.pm
@@ -54,12 +54,24 @@ sub del_ip_mapping {
 
 sub add_ip_mapping {
 my ($class, $dhcpid, $mac, $ip) = @_;
+
 my $ethers_file = "$DNSMASQ_CONFIG_ROOT/$dhcpid/ethers";
+my $ethers_tmp_file = "$ethers_file.tmp";
 
 my $appendFn = sub {
-   open(my $fh, '>>', $ethers_file) or die "Could not open file 
'$ethers_file' $!\n";
-   print $fh "$mac,$ip\n";
-   close $fh;
+   open(my $in, '<', $ethers_file) or die "Could not open file 
'$ethers_file' $!\n";
+   open(my $out, '>', $ethers_tmp_file) or die "Could not open file 
'$ethers_tmp_file' $!\n";
+
+while (my $line = <$in>) {
+   next if $line =~ m/^$mac/;
+   print $out $line;
+   }
+
+   print $out "$mac,$ip\n";
+   close $in;
+   close $out;
+   move $ethers_tmp_file, $ethers_file;
+   chmod 0644, $ethers_file;
 };
 
 PVE::Tools::lock_file($ethers_file, 10, $appendFn);
-- 
2.39.2


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [RFC pve-network 7/9] vnets: add del_ips_from_mac

2023-11-13 Thread Alexandre Derumier
Signed-off-by: Alexandre Derumier 
---
 src/PVE/Network/SDN/Subnets.pm |  4 ++--
 src/PVE/Network/SDN/Vnets.pm   | 12 ++--
 2 files changed, 12 insertions(+), 4 deletions(-)

diff --git a/src/PVE/Network/SDN/Subnets.pm b/src/PVE/Network/SDN/Subnets.pm
index 905ec77..2bd1ec8 100644
--- a/src/PVE/Network/SDN/Subnets.pm
+++ b/src/PVE/Network/SDN/Subnets.pm
@@ -257,7 +257,7 @@ sub add_next_free_ip {
#rollback
my $err = $@;
eval {
-   PVE::Network::SDN::Subnets::del_ip($zone, $subnetid, $subnet, $ip, 
$hostname)
+   PVE::Network::SDN::Subnets::del_ip($zone, $subnetid, $subnet, $ip, 
$hostname, $mac)
};
die $err;
 }
@@ -311,7 +311,7 @@ sub add_ip {
#rollback
my $err = $@;
eval {
-   PVE::Network::SDN::Subnets::del_ip($zone, $subnetid, $subnet, $ip, 
$hostname)
+   PVE::Network::SDN::Subnets::del_ip($zone, $subnetid, $subnet, $ip, 
$hostname, $mac)
};
die $err;
 }
diff --git a/src/PVE/Network/SDN/Vnets.pm b/src/PVE/Network/SDN/Vnets.pm
index 2f42da6..6047c98 100644
--- a/src/PVE/Network/SDN/Vnets.pm
+++ b/src/PVE/Network/SDN/Vnets.pm
@@ -144,12 +144,12 @@ sub update_ip {
 }
 
 sub del_ip {
-my ($vnetid, $ip, $hostname, $skipdns) = @_;
+my ($vnetid, $ip, $hostname, $mac, $skipdns) = @_;
 
 return if !$vnetid;
 
 my ($zone, $subnetid, $subnet) = 
PVE::Network::SDN::Vnets::get_subnet_from_vnet_ip($vnetid, $ip);
-PVE::Network::SDN::Subnets::del_ip($zone, $subnetid, $subnet, $ip, 
$hostname, $skipdns);
+PVE::Network::SDN::Subnets::del_ip($zone, $subnetid, $subnet, $ip, 
$hostname, $mac, $skipdns);
 }
 
 sub get_ips_from_mac {
@@ -165,4 +165,12 @@ sub get_ips_from_mac {
 return PVE::Network::SDN::Ipams::get_ips_from_mac($mac, $zoneid, $zone);
 }
 
+sub del_ips_from_mac {
+my ($vnetid, $mac, $hostname) = @_;
+
+my ($ip4, $ip6) = PVE::Network::SDN::Vnets::get_ips_from_mac($vnetid, 
$mac);
+PVE::Network::SDN::Vnets::del_ip($vnetid, $ip4, $hostname, $mac) if $ip4;
+PVE::Network::SDN::Vnets::del_ip($vnetid, $ip6, $hostname, $mac) if $ip6;
+}
+
 1;
-- 
2.39.2


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [PATCH qemu-server 1/1] fix #5033: api: Add target-filename option to move_disk reassign

2023-11-13 Thread Thomas Lamprecht
Am 13/11/2023 um 10:38 schrieb Fabian Grünbichler:
> On November 10, 2023 2:07 pm, Fiona Ebner wrote:
>> I'd rather go with your current approach than a new endpoint. Having
>> said that, another issue is that custom naming is not a first-class
>> feature currently, e.g. live-migration of a local disk or moving a disk
>> to a different storage or restoring from a backup will just throw away
>> the custom name. If we add more explicit support for custom names here,
>> then it's natural that users expect it to "just work" in all scenarios.
>>
>> So I feel like it's a question of whether we want to better support
>> custom names in general.
>>
>> There also is an open feature request to support comment fields for
>> disks (and other hardware items):
>> https://bugzilla.proxmox.com/show_bug.cgi?id=2672 and IMHO that would be
>> a cleaner solution than encoding custom information in the disk name itself.
>>
>> Opinions from other devs?
> 
> there have been similar requests in the past, my preference would be:
> - expose "human readable" custom name more prominently in places where new
>   volumes are created, not just on the CLI/API only storage layer
> - keep the name on operations that copy/move a volume
> 
> if a conflict arises for the second part, we could simply bump the
> number-suffix until we find a free "slot", just like we do now for
> vm-XXX-disk-N. this would require a coordinated change through the
> storage plugins, although it would be possible to fall back to the old
> behaviour ("custom name" is lost) in PVE::Storage for (external) plugins
> that don't support this feature/API (yet).
> 
> for doing a strict match/integration into external tooling, some sort of
> comment or attribute would be better, as that could be easily preserved
> on "move" type operations, and the external tooling would need to ensure
> it is set for "create" type operations, and updated for "copy" type
> operations if that is required (e.g., if it is supposed to be unique).
> 
> one could argue that a comment is all that is needed for human readable
> labelling as well, but there are lots of people that *really* want
> 
> vm-XXX-system-0.raw and vm-XXX-data-0.raw (or 'db', or ..)
> 
> encoded directly in the volume (and thus file/..) name, since that is
> then also visible on the storage layer (in PVE, but also directly when
> doing disaster recovery/..), and I kind of see value in that as well.

I.e., two things

- a comment that is displayed only and has no duplication checks and a
  relatively flexible format (maxLength 64 and /^[\S ]+/) saved as, e.g.,
  URL-encoded. This is realtively easy to do, and someone already tried
  but went IMO way overboard and added comment fields for everything,
  which just does not makes sense for most options but disks and nics.

- a alias, that is checked unique per VMID (via config) and is actually
  inside the volume name. This is a bit harder to get nicely right,
  especially if one wants to do queries on that information.
  Storage move's would need some handling for (non-recommended) sharing
  of storage between clusters, or just left-over volumes. Similar for
  disk-movements to other guests, but as the counter would stay it wouldn't
  be that hard.
  My question here is is duch a thing is really worth it, as having that
  encoded is one thing, but the user actually needs to have the foresight
  to set the correct alias so that they have some value from it listing
  volumes at the storage-layer in the disaster case. And having the config
  around, they could to that mapping too if they used comments.

So, I'd not go for the alias/description/... in volume name for now,
rather do the per-disk comment first, which is way simpler to maintain
forever and provides not that less benefits, while being quite a bit
less work.


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH manager 1/2] ui: factor out standalone node check

2023-11-13 Thread Thomas Lamprecht
Am 13/11/2023 um 09:59 schrieb Dominik Csapak:
> into Utils and use it where we manually checked that
> 
> Signed-off-by: Dominik Csapak 
> ---
> I put it into utils since i did not find a better place. Could have put it
> in the ResourceStore, but coupling those things seemed wrong to me.
> 
>  www/manager6/Utils.js | 4 
>  www/manager6/form/ComboBoxSetStoreNode.js | 2 +-
>  www/manager6/grid/Replication.js  | 4 ++--
>  www/manager6/lxc/CmdMenu.js   | 2 +-
>  www/manager6/lxc/Config.js| 2 +-
>  www/manager6/menu/TemplateMenu.js | 2 +-
>  www/manager6/qemu/CmdMenu.js  | 2 +-
>  www/manager6/qemu/Config.js   | 2 +-
>  www/manager6/storage/LVMEdit.js   | 2 +-
>  9 files changed, 13 insertions(+), 9 deletions(-)
> 
>

applied series, thanks!


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v2 pve-manager 1/2] api: add suspendall endpoint

2023-11-13 Thread Hannes Laimer
Signed-off-by: Hannes Laimer 
---
 PVE/API2/Nodes.pm | 124 ++
 1 file changed, 124 insertions(+)

diff --git a/PVE/API2/Nodes.pm b/PVE/API2/Nodes.pm
index a73fca3f..0956eb0a 100644
--- a/PVE/API2/Nodes.pm
+++ b/PVE/API2/Nodes.pm
@@ -289,6 +289,7 @@ __PACKAGE__->register_method ({
{ name => 'stopall' },
{ name => 'storage' },
{ name => 'subscription' },
+   { name => 'suspendall' },
{ name => 'syslog' },
{ name => 'tasks' },
{ name => 'termproxy' },
@@ -2011,6 +2012,129 @@ __PACKAGE__->register_method ({
return $rpcenv->fork_worker('stopall', undef, $authuser, $code);
 }});
 
+my $create_suspend_worker = sub {
+my ($nodename, $vmid) = @_;
+return if !PVE::QemuServer::check_running($vmid, 1);
+print STDERR "Suspending VM $vmid\n";
+return PVE::API2::Qemu->vm_suspend(
+   { node => $nodename, vmid => $vmid, todisk => 1 }
+);
+};
+
+__PACKAGE__->register_method ({
+name => 'suspendall',
+path => 'suspendall',
+method => 'POST',
+protected => 1,
+permissions => {
+   description => "The 'VM.PowerMgmt' permission is required on '/' or on 
'/vms/' for each"
+   ." ID passed via the 'vms' parameter. Additionally, you need 
'VM.Config.Disk' on the"
+   ." '/vms/{vmid}' path and 'Datastore.AllocateSpace' for the 
configured state-storage(s)",
+   user => 'all',
+},
+proxyto => 'node',
+description => "Suspend all VMs.",
+parameters => {
+   additionalProperties => 0,
+   properties => {
+   node => get_standard_option('pve-node'),
+   vms => {
+   description => "Only consider Guests with these IDs.",
+   type => 'string',  format => 'pve-vmid-list',
+   optional => 1,
+   },
+   },
+},
+returns => {
+   type => 'string',
+},
+code => sub {
+   my ($param) = @_;
+
+   my $rpcenv = PVE::RPCEnvironment::get();
+   my $authuser = $rpcenv->get_user();
+
+   # we cannot really check access to the state-storage here, that's 
happening per worker.
+   if (!$rpcenv->check($authuser, "/", [ 'VM.PowerMgmt', 'VM.Config.Disk' 
], 1)) {
+   my @vms = PVE::Tools::split_list($param->{vms});
+   if (scalar(@vms) > 0) {
+   $rpcenv->check($authuser, "/vms/$_", [ 'VM.PowerMgmt' ]) for 
@vms;
+   } else {
+   raise_perm_exc("/, VM.PowerMgmt  && VM.Config.Disk");
+   }
+   }
+
+   my $nodename = $param->{node};
+   $nodename = PVE::INotify::nodename() if $nodename eq 'localhost';
+
+   my $code = sub {
+
+   $rpcenv->{type} = 'priv'; # to start tasks in background
+
+   my $stopList = $get_start_stop_list->($nodename, undef, 
$param->{vms});
+
+   my $cpuinfo = PVE::ProcFSTools::read_cpuinfo();
+   my $datacenterconfig = cfs_read_file('datacenter.cfg');
+   # if not set by user spawn max cpu count number of workers
+   my $maxWorkers =  $datacenterconfig->{max_workers} || 
$cpuinfo->{cpus};
+
+   for my $order (sort {$b <=> $a} keys %$stopList) {
+   my $vmlist = $stopList->{$order};
+   my $workers = {};
+
+   my $finish_worker = sub {
+   my $pid = shift;
+   my $worker = delete $workers->{$pid} || return;
+
+   syslog('info', "end task $worker->{upid}");
+   };
+
+   for my $vmid (sort {$b <=> $a} keys %$vmlist) {
+   my $d = $vmlist->{$vmid};
+   if ($d->{type} eq 'lxc') {
+   print STDERR "Skipping $vmid, only VMs can be 
suspended\n";
+   next;
+   }
+   my $upid = eval {
+   $create_suspend_worker->($nodename, $vmid)
+   };
+   warn $@ if $@;
+   next if !$upid;
+
+   my $task = PVE::Tools::upid_decode($upid, 1);
+   next if !$task;
+
+   my $pid = $task->{pid};
+
+   $workers->{$pid} = { type => $d->{type}, upid => $upid, 
vmid => $vmid };
+   while (scalar(keys %$workers) >= $maxWorkers) {
+   foreach my $p (keys %$workers) {
+   if (!PVE::ProcFSTools::check_process_running($p)) {
+   $finish_worker->($p);
+   }
+   }
+   sleep(1);
+   }
+   }
+   while (scalar(keys %$workers)) {
+   for my $p (keys %$workers) {
+   if (!PVE::ProcFSTools::check_process_running($p)) {
+   $finish_worker->($p);
+   }
+   }
+   sleep(1);
+   }
+   

[pve-devel] [PATCH v2 pve-manager 0/2] add bulk suspend

2023-11-13 Thread Hannes Laimer
Adds support for bulk suspending VMs as it already exists for stop.

v2, thanks @Thomas:
* api: skip CTs + fix permission checks
* ui: disable `suspendall` button if user isn;t allowed to use it
* ui: use new bulk action filtering

Hannes Laimer (2):
  api: add suspendall endpoint
  ui: add bulk suspend support

 PVE/API2/Nodes.pm | 124 ++
 www/manager6/Utils.js |   1 +
 www/manager6/node/CmdMenu.js  |  15 
 www/manager6/node/Config.js   |  14 
 www/manager6/window/BulkAction.js |   5 +-
 5 files changed, 157 insertions(+), 2 deletions(-)

-- 
2.39.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v2 pve-manager 2/2] ui: add bulk suspend support

2023-11-13 Thread Hannes Laimer
Signed-off-by: Hannes Laimer 
---
 www/manager6/Utils.js |  1 +
 www/manager6/node/CmdMenu.js  | 15 +++
 www/manager6/node/Config.js   | 14 ++
 www/manager6/window/BulkAction.js |  5 +++--
 4 files changed, 33 insertions(+), 2 deletions(-)

diff --git a/www/manager6/Utils.js b/www/manager6/Utils.js
index be30393e..139064d1 100644
--- a/www/manager6/Utils.js
+++ b/www/manager6/Utils.js
@@ -1996,6 +1996,7 @@ Ext.define('PVE.Utils', {
spiceshell: ['', gettext('Shell') + ' (Spice)'],
startall: ['', gettext('Bulk start VMs and Containers')],
stopall: ['', gettext('Bulk shutdown VMs and Containers')],
+   suspendall: ['', gettext('Suspend all VMs')],
unknownimgdel: ['', gettext('Destroy image from unknown guest')],
wipedisk: ['Device', gettext('Wipe Disk')],
vncproxy: ['VM/CT', gettext('Console')],
diff --git a/www/manager6/node/CmdMenu.js b/www/manager6/node/CmdMenu.js
index dc56ef08..956adc49 100644
--- a/www/manager6/node/CmdMenu.js
+++ b/www/manager6/node/CmdMenu.js
@@ -56,6 +56,20 @@ Ext.define('PVE.node.CmdMenu', {
});
},
},
+   {
+   text: gettext('Bulk Suspend'),
+   itemId: 'bulksuspend',
+   iconCls: 'fa fa-fw fa-download',
+   handler: function() {
+   Ext.create('PVE.window.BulkAction', {
+   nodename: this.up('menu').nodename,
+   title: gettext('Bulk Suspend'),
+   btnText: gettext('Suspend'),
+   action: 'suspendall',
+   autoShow: true,
+   });
+   },
+   },
{
text: gettext('Bulk Migrate'),
itemId: 'bulkmigrate',
@@ -129,6 +143,7 @@ Ext.define('PVE.node.CmdMenu', {
if (!caps.vms['VM.PowerMgmt']) {
me.getComponent('bulkstart').setDisabled(true);
me.getComponent('bulkstop').setDisabled(true);
+   me.getComponent('bulksuspend').setDisabled(true);
}
if (!caps.nodes['Sys.PowerMgmt']) {
me.getComponent('wakeonlan').setDisabled(true);
diff --git a/www/manager6/node/Config.js b/www/manager6/node/Config.js
index 6ed2172a..ac5e6b25 100644
--- a/www/manager6/node/Config.js
+++ b/www/manager6/node/Config.js
@@ -65,6 +65,20 @@ Ext.define('PVE.node.Config', {
});
},
},
+   {
+   text: gettext('Bulk Suspend'),
+   iconCls: 'fa fa-fw fa-download',
+   disabled: !caps.vms['VM.PowerMgmt'],
+   handler: function() {
+   Ext.create('PVE.window.BulkAction', {
+   autoShow: true,
+   nodename: nodename,
+   title: gettext('Bulk Suspend'),
+   btnText: gettext('Suspend'),
+   action: 'suspendall',
+   });
+   },
+   },
{
text: gettext('Bulk Migrate'),
iconCls: 'fa fa-fw fa-send-o',
diff --git a/www/manager6/window/BulkAction.js 
b/www/manager6/window/BulkAction.js
index 5f76ef7a..c8132753 100644
--- a/www/manager6/window/BulkAction.js
+++ b/www/manager6/window/BulkAction.js
@@ -10,7 +10,7 @@ Ext.define('PVE.window.BulkAction', {
 },
 border: false,
 
-// the action to set, currently there are: `startall`, `migrateall`, 
`stopall`
+// the action to set, currently there are: `startall`, `migrateall`, 
`stopall`, `suspendall`
 action: undefined,
 
 submit: function(params) {
@@ -144,6 +144,7 @@ Ext.define('PVE.window.BulkAction', {
};
 
let defaultStatus = me.action === 'migrateall' ? '' : me.action === 
'startall' ? 'stopped' : 'running';
+   let defaultType = me.action === 'suspendall' ? 'qemu' : '';
 
let statusMap = [];
let poolMap = [];
@@ -318,7 +319,7 @@ Ext.define('PVE.window.BulkAction', {
fieldLabel: gettext("Type"),
emptyText: gettext('All'),
editable: false,
-   value: '',
+   value: defaultType,
store: [
['', gettext('All')],
['lxc', gettext('CT')],
-- 
2.39.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v4 container 1/1] Add device passthrough

2023-11-13 Thread Filip Schauer
Add a dev[n] argument to the container config to pass devices through to
a container. A device can be passed by its path. Additionally the access
mode, uid and gid can be specified through their respective properties.

Signed-off-by: Filip Schauer 
---
 src/PVE/LXC.pm| 57 +++-
 src/PVE/LXC/Config.pm | 93 +++
 src/PVE/LXC/Tools.pm  | 23 +++---
 src/lxc-pve-autodev-hook  | 18 +++-
 src/lxc-pve-prestart-hook | 60 -
 5 files changed, 242 insertions(+), 9 deletions(-)

diff --git a/src/PVE/LXC.pm b/src/PVE/LXC.pm
index 7ec816b..242b2a0 100644
--- a/src/PVE/LXC.pm
+++ b/src/PVE/LXC.pm
@@ -5,7 +5,7 @@ use warnings;
 
 use Cwd qw();
 use Errno qw(ELOOP ENOTDIR EROFS ECONNREFUSED EEXIST);
-use Fcntl qw(O_RDONLY O_WRONLY O_NOFOLLOW O_DIRECTORY);
+use Fcntl qw(O_RDONLY O_WRONLY O_NOFOLLOW O_DIRECTORY :mode);
 use File::Path;
 use File::Spec;
 use IO::Poll qw(POLLIN POLLHUP);
@@ -639,6 +639,31 @@ sub update_lxc_config {
$raw .= "lxc.mount.auto = sys:mixed\n";
 }
 
+PVE::LXC::Config->foreach_passthrough_device($conf, sub {
+   my ($key, $device) = @_;
+
+   die "Path is not defined for passthrough device $key"
+   unless (defined($device->{path}));
+
+   my $absolute_path = $device->{path};
+
+   die "Device $absolute_path does not exist\n"
+   unless (-e $absolute_path);
+
+   die "$absolute_path is not a device\n"
+   unless (-b $absolute_path || -c $absolute_path);
+
+   my ($mode, $rdev) = (stat($absolute_path))[2, 6];
+
+   die "Could not get mode or device ID of $absolute_path\n"
+   if (!defined($mode) || !defined($rdev));
+
+   my $major = PVE::Tools::dev_t_major($rdev);
+   my $minor = PVE::Tools::dev_t_minor($rdev);
+   my $device_type_char = S_ISBLK($mode) ? 'b' : 'c';
+   $raw .= "lxc.cgroup2.devices.allow = $device_type_char $major:$minor 
rw\n";
+});
+
 # WARNING: DO NOT REMOVE this without making sure that loop device nodes
 # cannot be exposed to the container with r/w access (cgroup perms).
 # When this is enabled mounts will still remain in the monitor's namespace
@@ -1318,6 +1343,8 @@ sub check_ct_modify_config_perm {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, 
['VM.Config.Network']);
check_bridge_access($rpcenv, $authuser, $oldconf->{$opt}) if 
$oldconf->{$opt};
check_bridge_access($rpcenv, $authuser, $newconf->{$opt}) if 
$newconf->{$opt};
+   } elsif ($opt =~ m/^dev\d+$/) {
+   raise_perm_exc("configuring device passthrough is only allowed for 
root\@pam");
} elsif ($opt eq 'nameserver' || $opt eq 'searchdomain' || $opt eq 
'hostname') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, 
['VM.Config.Network']);
} elsif ($opt eq 'features') {
@@ -2367,6 +2394,34 @@ sub validate_id_maps {
 }
 }
 
+sub map_ct_id_to_host {
+my ($id, $id_map, $id_type) = @_;
+
+foreach my $mapping (@$id_map) {
+   my ($type, $ct, $host, $length) = @$mapping;
+
+   next if ($type ne $id_type);
+
+   if ($id >= int($ct) && $id < ($ct + $length)) {
+   return $host - $ct + $id;
+   }
+}
+
+return $id;
+}
+
+sub map_ct_uid_to_host {
+my ($uid, $id_map) = @_;
+
+return map_ct_id_to_host($uid, $id_map, 'u');
+}
+
+sub map_ct_gid_to_host {
+my ($gid, $id_map) = @_;
+
+return map_ct_id_to_host($gid, $id_map, 'g');
+}
+
 sub userns_command {
 my ($id_map) = @_;
 if (@$id_map) {
diff --git a/src/PVE/LXC/Config.pm b/src/PVE/LXC/Config.pm
index 56e1f10..9f325f2 100644
--- a/src/PVE/LXC/Config.pm
+++ b/src/PVE/LXC/Config.pm
@@ -29,6 +29,7 @@ mkdir $lockdir;
 mkdir "/etc/pve/nodes/$nodename/lxc";
 my $MAX_MOUNT_POINTS = 256;
 my $MAX_UNUSED_DISKS = $MAX_MOUNT_POINTS;
+my $MAX_DEVICES = 256;
 
 # BEGIN implemented abstract methods from PVE::AbstractConfig
 
@@ -908,6 +909,71 @@ for (my $i = 0; $i < $MAX_UNUSED_DISKS; $i++) {
 }
 }
 
+PVE::JSONSchema::register_format('pve-lxc-dev-string', 
\&verify_lxc_dev_string);
+sub verify_lxc_dev_string {
+my ($dev, $noerr) = @_;
+
+if (
+   $dev =~ m@/\.\.?/@ ||
+   $dev =~ m@/\.\.?$@ ||
+   $dev !~ m!^/dev/!
+) {
+   return undef if $noerr;
+   die "$dev is not a valid device path\n";
+}
+
+return $dev;
+}
+
+PVE::JSONSchema::register_format('file-access-mode-string', 
\&verify_file_access_mode);
+sub verify_file_access_mode {
+my ($mode, $noerr) = @_;
+
+if ($mode !~ /^[0-7]*$/) {
+   return undef if $noerr;
+   die "$mode is not a valid file access mode\n";
+}
+
+return $mode;
+}
+
+my $dev_desc = {
+path => {
+   optional => 1,
+   type => 'string',
+   default_key => 1,
+   format => 'pve-lxc-dev-string',
+   format_description => 'Path',
+   description => 'Device to pass through to the container',
+   verbose_description => 'P

[pve-devel] [PATCH v4 common 1/2] tools: Add mknod syscall

2023-11-13 Thread Filip Schauer
Signed-off-by: Filip Schauer 
---
 src/PVE/Syscall.pm | 1 +
 src/PVE/Tools.pm   | 5 +
 2 files changed, 6 insertions(+)

diff --git a/src/PVE/Syscall.pm b/src/PVE/Syscall.pm
index 4c0b9cf..2a423e8 100644
--- a/src/PVE/Syscall.pm
+++ b/src/PVE/Syscall.pm
@@ -16,6 +16,7 @@ BEGIN {
openat => &SYS_openat,
close => &SYS_close,
mkdirat => &SYS_mkdirat,
+   mknod => &SYS_mknod,
faccessat => &SYS_faccessat,
setresuid => &SYS_setresuid,
fchownat => &SYS_fchownat,
diff --git a/src/PVE/Tools.pm b/src/PVE/Tools.pm
index c91e933..fbb6773 100644
--- a/src/PVE/Tools.pm
+++ b/src/PVE/Tools.pm
@@ -1720,6 +1720,11 @@ sub mkdirat($$$) {
 return syscall(PVE::Syscall::mkdirat, int($dirfd), $name, int($mode)) == 0;
 }
 
+sub mknod($$$) {
+my ($filename, $mode, $dev) = @_;
+return syscall(PVE::Syscall::SYS_mknod, $filename, int($mode), int($dev)) 
== 0;
+}
+
 sub fchownat($) {
 my ($dirfd, $pathname, $owner, $group, $flags) = @_;
 return syscall(
-- 
2.39.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v4 many] Add container device passthrough

2023-11-13 Thread Filip Schauer
Changes since v1:
* mknod the devices in /var/lib/lxc/$vmid/passthrough and setup proper
  permissions instead of bind mounting the devices from /dev directly
* Add support for USB mapping
* Add foreach_passthrough_device helper function

Changes since v2:
* Remove support for USB mapping
* Add mknod syscall
* Setup a tmpfs to mknod the devices to instead of writing to the host
  file system
* Write passthrough dev list to /var/lib/lxc/$vmid/passthrough_devices
* Move sub foreach_passthrough_device out of the AbstractConfig into the
  container config
* Add mode, uid and gid properties to passthrough device config

Changes since v3:
* Allow only root user to configure device passthrough
* Verify format of device passthrough mode
* Add mount flag constants
* Remove redundant error checks
* Add missing error checks
* Optimize file creation by using mknod instead of touch
* Correctly handle id mapping with multiple ranges

pve-common:

Filip Schauer (2):
  tools: Add mknod syscall
  tools: Add mount flag constants

 src/PVE/Syscall.pm |  1 +
 src/PVE/Tools.pm   | 36 
 2 files changed, 37 insertions(+)

pve-container:

Filip Schauer (1):
  Add device passthrough

 src/PVE/LXC.pm| 57 +++-
 src/PVE/LXC/Config.pm | 93 +++
 src/PVE/LXC/Tools.pm  | 23 +++---
 src/lxc-pve-autodev-hook  | 18 +++-
 src/lxc-pve-prestart-hook | 60 -
 5 files changed, 242 insertions(+), 9 deletions(-)


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH v4 common 2/2] tools: Add mount flag constants

2023-11-13 Thread Filip Schauer
Signed-off-by: Filip Schauer 
---
 src/PVE/Tools.pm | 31 +++
 1 file changed, 31 insertions(+)

diff --git a/src/PVE/Tools.pm b/src/PVE/Tools.pm
index fbb6773..b3af2c6 100644
--- a/src/PVE/Tools.pm
+++ b/src/PVE/Tools.pm
@@ -62,6 +62,20 @@ CLONE_NEWIPC
 CLONE_NEWUSER
 CLONE_NEWPID
 CLONE_NEWNET
+MS_RDONLY
+MS_NOSUID
+MS_NODEV
+MS_NOEXEC
+MS_SYNCHRONOUS
+MS_REMOUNT
+MS_MANDLOCK
+MS_DIRSYNC
+MS_NOSYMFOLLOW
+MS_NOATIME
+MS_NODIRATIME
+MS_BIND
+MS_MOVE
+MS_REC
 );
 
 my $pvelogdir = "/var/log/pve";
@@ -110,6 +124,23 @@ use constant {RENAME_NOREPLACE => (1 << 0),
   RENAME_EXCHANGE  => (1 << 1),
   RENAME_WHITEOUT  => (1 << 2)};
 
+use constant {
+MS_RDONLY  => (1),
+MS_NOSUID  => (1 <<  1),
+MS_NODEV   => (1 <<  2),
+MS_NOEXEC  => (1 <<  3),
+MS_SYNCHRONOUS => (1 <<  4),
+MS_REMOUNT => (1 <<  5),
+MS_MANDLOCK=> (1 <<  6),
+MS_DIRSYNC => (1 <<  7),
+MS_NOSYMFOLLOW => (1 <<  8),
+MS_NOATIME => (1 << 10),
+MS_NODIRATIME  => (1 << 11),
+MS_BIND=> (1 << 12),
+MS_MOVE=> (1 << 13),
+MS_REC => (1 << 14),
+};
+
 sub run_with_timeout {
 my ($timeout, $code, @param) = @_;
 
-- 
2.39.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [PATCH v3 container 1/1] Add device passthrough

2023-11-13 Thread Filip Schauer

Patch v4 available:

https://lists.proxmox.com/pipermail/pve-devel/2023-November/060022.html

On 10/11/2023 11:44, Wolfgang Bumiller wrote:

On Tue, Nov 07, 2023 at 02:46:42PM +0100, Filip Schauer wrote:

Add a dev[n] argument to the container config to pass devices through to
a container. A device can be passed by its path. Additionally the access
mode, uid and gid can be specified through their respective properties.

Signed-off-by: Filip Schauer 
---
  src/PVE/LXC.pm| 29 +-
  src/PVE/LXC/Config.pm | 79 +++
  src/PVE/LXC/Tools.pm  | 23 +---
  src/lxc-pve-autodev-hook  | 14 ++-
  src/lxc-pve-prestart-hook | 63 +++
  5 files changed, 201 insertions(+), 7 deletions(-)

diff --git a/src/PVE/LXC.pm b/src/PVE/LXC.pm
index 7ec816b..162f093 100644
--- a/src/PVE/LXC.pm
+++ b/src/PVE/LXC.pm
@@ -5,7 +5,7 @@ use warnings;
  
  use Cwd qw();

  use Errno qw(ELOOP ENOTDIR EROFS ECONNREFUSED EEXIST);
-use Fcntl qw(O_RDONLY O_WRONLY O_NOFOLLOW O_DIRECTORY);
+use Fcntl qw(O_RDONLY O_WRONLY O_NOFOLLOW O_DIRECTORY :mode);
  use File::Path;
  use File::Spec;
  use IO::Poll qw(POLLIN POLLHUP);
@@ -639,6 +639,33 @@ sub update_lxc_config {
$raw .= "lxc.mount.auto = sys:mixed\n";
  }
  
+PVE::LXC::Config->foreach_passthrough_device($conf, sub {

+   my ($key, $device) = @_;
+
+   die "Path is not defined for passthrough device $key"
+   unless (defined($device->{path}));
+
+   my $absolute_path = $device->{path};
+
+   die "Device $absolute_path does not exist\n"
+   unless (-e $absolute_path);
+
+   my ($mode, $rdev) = (stat($absolute_path))[2, 6];
+
+   die "Could not get mode of device $absolute_path\n"
+   if $mode !~ /^([\d]+)$/;
+   $mode = $1;
+
+   die "Could not get device ID of $absolute_path\n"
+   if $rdev !~ /^([\d]+)$/;
+   $rdev = $1;

^ The above 2 checks don't seem necessary. `stat()` cannot return
different types of values there.
Just check them for definedness once together.


+
+   my $major = PVE::Tools::dev_t_major($rdev);
+   my $minor = PVE::Tools::dev_t_minor($rdev);
+   my $device_type_char = S_ISBLK($mode) ? 'b' : 'c';
+   $raw .= "lxc.cgroup2.devices.allow = $device_type_char $major:$minor 
rw\n";
+});
+
  # WARNING: DO NOT REMOVE this without making sure that loop device nodes
  # cannot be exposed to the container with r/w access (cgroup perms).
  # When this is enabled mounts will still remain in the monitor's namespace
diff --git a/src/PVE/LXC/Config.pm b/src/PVE/LXC/Config.pm
index 56e1f10..b4fa147 100644
--- a/src/PVE/LXC/Config.pm
+++ b/src/PVE/LXC/Config.pm
@@ -29,6 +29,7 @@ mkdir $lockdir;
  mkdir "/etc/pve/nodes/$nodename/lxc";
  my $MAX_MOUNT_POINTS = 256;
  my $MAX_UNUSED_DISKS = $MAX_MOUNT_POINTS;
+my $MAX_DEVICES = 256;
  
  # BEGIN implemented abstract methods from PVE::AbstractConfig
  
@@ -908,6 +909,57 @@ for (my $i = 0; $i < $MAX_UNUSED_DISKS; $i++) {

  }
  }
  
+PVE::JSONSchema::register_format('pve-lxc-dev-string', \&verify_lxc_dev_string);

+sub verify_lxc_dev_string {
+my ($dev, $noerr) = @_;
+
+if (
+   $dev =~ m@/\.\.?/@ ||
+   $dev =~ m@/\.\.?$@ ||
+   $dev !~ m!^/dev/!
+) {
+   return undef if $noerr;
+   die "$dev is not a valid device path\n";
+}
+
+return $dev;
+}
+
+my $dev_desc = {
+path => {
+   optional => 1,
+   type => 'string',
+   default_key => 1,
+   format => 'pve-lxc-dev-string',
+   format_description => 'Path',
+   description => 'Device to pass through to the container',
+   verbose_description => 'Path to the device to pass through to the 
container',
+},
+mode => {
+   optional => 1,
+   type => 'integer',
+   description => 'Access mode (in octal) to be set on the device node',
+},
+uid => {
+   optional => 1,
+   type => 'integer',
+   description => 'User ID to be assigned to the device node',
+},
+gid => {
+   optional => 1,
+   type => 'integer',
+   description => 'Group ID to be assigned to the device node',
+},
+};
+
+for (my $i = 0; $i < $MAX_DEVICES; $i++) {
+$confdesc->{"dev$i"} = {
+   optional => 1,
+   type => 'string', format => $dev_desc,
+   description => "Device to pass through to the container",
+}
+}
+
  sub parse_pct_config {
  my ($filename, $raw, $strict) = @_;
  
@@ -1255,6 +1307,21 @@ sub parse_volume {

  return;
  }
  
+sub parse_device {

+my ($class, $device_string, $noerr) = @_;
+
+my $res;
+eval { $res = PVE::JSONSchema::parse_property_string($dev_desc, 
$device_string) };
+if ($@) {
+   return undef if $noerr;
+   die $@;
+}
+
+die "Path has to be defined" unless (defined($res->{path}));
+
+return $res;
+}
+
  sub print_volume {
  my ($class, $key, $volume) = @_;
  
@@ -1762,4 +1829,16 @@ sub

Re: [pve-devel] [RFC series pve-network/pve-cluster/qemu-server] DHCP

2023-11-13 Thread Stefan Hanreich


On 11/13/23 11:04, Alexandre Derumier wrote:
> I have splitted the ipam add|del , from the dhcp lease reservation.
> 
> The ipam add|del ip is done when creating|deleting vm, or add|del a vm nic
> 
> The dhcp reservation is done at vm start.
> 
> The delete of dhcp reservation is done at vm destroy.
> 
> (This can be easily extend for ephemeral ip)
> 
> At vm start, we search ip associated with mac address.

Thanks very much, that looks like a very good solution! From what I can
tell migration would also work with this?

> I have only implemented calls in qemu-server for now

Do you plan on checking out pve-container as well? Or should I look into
this?



I integrated it into my branch and will be giving it a test drive now.

I've been working on a UI integration in the meanwhile and it's getting
along quite well although it will need to cook for another day or two.



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [PATCH qemu-server 1/1] fix #5033: api: Add target-filename option to move_disk reassign

2023-11-13 Thread Markus Ebner via pve-devel
--- Begin Message ---
> But I do have another suggestion too: Should we rather automatically
> preserve the current volume name (just replacing the VM ID) if there is
> no other volume with that name and choose a new name if there is? For
> offline storage migration, we also do it like that (sans replacing the
> VM ID). Then we could get away without needing a new option and users
> specifying it. Would that be enough to cover your use case?

Our VM disks are tagged with UUIDs so we can reliably match them to the
hardware configuration in our database and don't delete any disks just
because they were in the wrong slot. (e.g. because someone manually
fiddled with the config).
We also have VM templates prepared with Cloud-Init installations that
use the default disk naming scheme. Now if we want to reinstall a VM,
we delete the vm's root disk volume, clone the template and move the clone's
disk volume to the target VM. At that step, we want to rename from  to  with the correct UUID that's
already specified for the vm's rootdisk in our database.
So unfortunately, I don't think that would solve our use-case.

> Exposing the rename functionality as part of the storage API instead
> might fit better with the status quo?

Yesterday in: https://bugzilla.proxmox.com/show_bug.cgi?id=5038[1] you
wrote that the storage layer has no access to the guest layer.
So if renaming is instead implemented here, renaming an attached disk
would break the vm similar to how deleting a volume breaks it, right?

Our use-case would then probably have to look something like this:
- clone cloud-init template
- unattach rootdisk in clone
- rename vm-disk-0 to vm--
- request clone's /config/current to search for uuid in unused disks
- move_disk from clone.unused to targetVM.virtio0


Yet another suggestion:
- Do it like you said in move_disk, preserving the appendix and just changing
the vmid.
- Add a rename_disk endpoint at the guest level that is aware of the vm config
and patches it accordingly so nothing breaks.
What do you think about that?

> If we choose a guest-level approach, it should also be implemented for
> containers for consistency.

> Regarding your approach:
> 
> It'd make sense to support using the same VM ID if target-filename is
> specified.

> 
> Also, there is an issue when the file name doesn't match the usual
> naming convention:

Sure, I can do that if you decide on it.

-- 
Best Regards,
Markus Ebner


[1] https://bugzilla.proxmox.com/show_bug.cgi?id=5038
--- End Message ---
___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH manager v3 3/5] api/acme: deprecate tos endpoint in favor of meta

2023-11-13 Thread Thomas Lamprecht
Am 31/10/2023 um 10:05 schrieb Folke Gleumes:
> The ToS endpoint ignored data that is needed to detect if EAB needs to
> be used. Instead of adding a new endpoint that does the same request,
> the tos endpoint is deprecated and replaced by the meta endpoint,
> that returns all information returned by the directory.
> 
> Signed-off-by: Folke Gleumes 
> ---
> No changes in v3
> 
>  PVE/API2/ACMEAccount.pm | 56 -
>  1 file changed, 55 insertions(+), 1 deletion(-)
> 
> diff --git a/PVE/API2/ACMEAccount.pm b/PVE/API2/ACMEAccount.pm
> index ec4eba24..bc45d5ab 100644
> --- a/PVE/API2/ACMEAccount.pm
> +++ b/PVE/API2/ACMEAccount.pm

> +returns => {
> + type => 'object',
> + additionalProperties => 1,
> + properties => {
> + termsOfService => {
> + type => 'string',
> + optional => 1,
> + description => 'ACME TermsOfService URL.',

nit: we normally place the description at the top, and sometimes as
second item after "type", but at the bottom is rather unusual.

> + },
> + externalAccountRequired => {
> + type => 'boolean',
> + optional => 1,
> + description => 'EAB Required'
> + },
> + website => {
> + type => 'string',
> + optional => 1,
> + description => 'URL to more information about the ACME server.'
> + },
> + caaIdentities => {
> + type => 'string',

but the RFC say's that this is an array, and we do not actually mangle this into
a plain string anywhere FWICT, checking the API response would agree on that 
too.

So, can you re-check and mark this as array in a follow-up?

> + optional => 1,
> + description => 'Hostnames referring to the ACME servers.'



> + },



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] applied: [PATCH acme v3 0/5] fix #4497: add support for external account bindings

2023-11-13 Thread Thomas Lamprecht
Am 31/10/2023 um 10:05 schrieb Folke Gleumes:
> Changes since v2:
>  * reverted the new_account abi to be non breaking
> 
> Changes since v1:
>  * fixed nit's
>  * expanded meta endpoint by all return values defined in the rfc
>  * expanded new_account signature by field for eab credentials
>  * allow for eab even if not required
> 
> This patch series adds functionality to use acme directiories
> that require the use of external account binding, as specified
> in rfc 8555 section 7.3.4.
> 
> To avoid code duplication and redundant calls to the CA,
> the `/cluster/acme/tos` endpoint has been deprecated and
> it's function will be covered by the new `/cluster/acme/meta`
> endpoint, which exposes all meta information provided by the CA,
> including the flag indicating that EAB needs to be used.
> The underlying call to the CA remains the same.
> 
> The CLI interface will only ask for the EAB credentials if needed,
> similar to how it works for the ToS.
> 
> The patches have been tested to work with and without EAB
> by using pebble [0] as the CA.
> 
> [0] https://github.com/letsencrypt/pebble
> 
> 
> acme: Folke Gleumes (1):
>   fix #4497: add support for external account bindings
> 
>  src/PVE/ACME.pm | 48 ++--
>  1 file changed, 42 insertions(+), 6 deletions(-)
> 
> 
> manager: Folke Gleumes (4):
>   fix #4497: acme: add support for external account bindings
>   api/acme: deprecate tos endpoint in favor of meta
>   fix #4497: cli/acme: detect eab and ask for credentials
>   ui/acme: switch to new meta endpoint
> 
>  PVE/API2/ACMEAccount.pm   | 83 ++-
>  PVE/CLI/pvenode.pm| 26 +++-
>  www/manager6/node/ACME.js | 12 --
>  3 files changed, 113 insertions(+), 8 deletions(-)
> 


applied series with Fabian's R-b and T-b trailers, thanks!

But I commented w.r.t. return schema on patch 3/5, please check that out.

And some commits I'd have slightly favored a split the rework and addition
of new feature into two separate commits, like the proxmox-acme one, but no
biggie.


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [PATCH pve-storage] fix #1611: implement import of base-images for LVM-thin Storage

2023-11-13 Thread Hannes Dürr



On 11/3/23 11:39, Fiona Ebner wrote:
[...]

This essentially duplicates most of the same function in the parent
plugin, i.e. LVMPlugin. What you can do to avoid it, is introduce new
helper functions for the parts that are different, call those in
LVMPlugin's implementation and overwrite the helpers appropriately in
LvmThinPlugin. Then call the parent plugin's function here and do the
base conversion at the end.

yes makes sense to work with the LVMPlugin implementation
rather then duplicating all the code, good point.

+sub volume_import {

(...)


+
+# Request new vm-name which is needed for the import
+if ($isBase) {
+   my $newvmname = $class->find_free_diskname($storeid, $scfg, $vmid);
+   $name = $newvmname;
+   $volname = $newvmname;
+}

So this is one of the parts that needs to be different.

You could also just set the name to undef and alloc_image will call
find_free_diskname itself. Might be slightly nicer and avoids a new
find_free_diskname call. We could also set the name to vm-XYZ-disk-N
instead of base-XYZ-disk-N for the allocation I think. Or the whole
function could even be:

1. check if base
2. check if already exists/rename allowed
3. call parent plugin's volume_import function passing along
vm-XYZ-disk-N (or undef if it should be renamed) instead of base-XYZ-disk-N
4. if it was a base image, handle the conversion to base image

Then there's no need for new helpers either. But this is just a sketch
of course, didn't think through all details. What do you think?
that sounds good so far, but you can't get around the 
"find_free_diskspace()" call, can you ?
Not specifying a name leads to an error in the volume_import of the LVM 
plugin,
because without a name no plugin can be parsed and to get a new name in 
the style
of "vm-XYZ-disk-N" you need "find_free_diskspace()" to make sure that 
the name does not yet exist,

or am I missing something ?

So I'd propose:

1. check if base
2. check if already exists/rename allowed
3. call parent plugin's volume_import function passing along
vm-XYZ-disk-N generated with "find_free_diskspace(), instead of 
base-XYZ-disk-N

4. if it was a base image, handle the conversion to base image



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] applied: [PATCH i18n] update Arabic translation

2023-11-13 Thread Thomas Lamprecht
Am 13/11/2023 um 11:14 schrieb Moayad Almalat:
> update Arabic translation
> 
> Signed-off-by: Moayad Almalat 
> 
> ---
>  ar.po | 961 +++---
>  1 file changed, 380 insertions(+), 581 deletions(-)
> 
>

applied, thanks!


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] applied-series: [PATCH v2 pve-manager 0/2] add bulk suspend

2023-11-13 Thread Thomas Lamprecht
Am 13/11/2023 um 11:20 schrieb Hannes Laimer:
> Adds support for bulk suspending VMs as it already exists for stop.
> 
> v2, thanks @Thomas:
> * api: skip CTs + fix permission checks
> * ui: disable `suspendall` button if user isn;t allowed to use it
> * ui: use new bulk action filtering
> 
> Hannes Laimer (2):
>   api: add suspendall endpoint
>   ui: add bulk suspend support
> 
>  PVE/API2/Nodes.pm | 124 ++
>  www/manager6/Utils.js |   1 +
>  www/manager6/node/CmdMenu.js  |  15 
>  www/manager6/node/Config.js   |  14 
>  www/manager6/window/BulkAction.js |   5 +-
>  5 files changed, 157 insertions(+), 2 deletions(-)
> 


applied series, thanks!

I made the "passed VMID of a CT" case use log_warn though, so it's more
visible that something odd was requested.

And UX style wise it could be nicer if we have "fixed" filters for the
store, as currently one can easily hit the "Clear Filters" button and
then can select all CTs (or stopped VMs) without any hint that suspension
for these won't work.

But it's not a biggie and did not want to block on that, so just for the
record that we can probably improve on this.


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [PATCH pve-storage] fix #1611: implement import of base-images for LVM-thin Storage

2023-11-13 Thread Fiona Ebner
Am 13.11.23 um 14:13 schrieb Hannes Dürr:
> 
> On 11/3/23 11:39, Fiona Ebner wrote:
>> 1. check if base
>> 2. check if already exists/rename allowed
>> 3. call parent plugin's volume_import function passing along
>> vm-XYZ-disk-N (or undef if it should be renamed) instead of
>> base-XYZ-disk-N
>> 4. if it was a base image, handle the conversion to base image
>>
>> Then there's no need for new helpers either. But this is just a sketch
>> of course, didn't think through all details. What do you think?
> that sounds good so far, but you can't get around the
> "find_free_diskspace()" call, can you ?
> Not specifying a name leads to an error in the volume_import of the LVM
> plugin,

Oh, I see.

> because without a name no plugin can be parsed and to get a new name in
> the style
> of "vm-XYZ-disk-N" you need "find_free_diskspace()" to make sure that
> the name does not yet exist,
> or am I missing something ?
> > So I'd propose:
> 
> 1. check if base
> 2. check if already exists/rename allowed
> 3. call parent plugin's volume_import function passing along
> vm-XYZ-disk-N generated with "find_free_diskspace(), instead of
> base-XYZ-disk-N

Yes. And while not super important, we might want to preserve the N in
the suffix (or custom disk names). This can be done by only calling
find_free_diskname() if an image with the name already exists and the
image can be renamed. Otherwise, we can just pass the name with "base-"
replaced by "vm-" instead of invoking find_free_diskname().

> 4. if it was a base image, handle the conversion to base image
> 


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager] acme: mark caaIdentities as an array

2023-11-13 Thread Folke Gleumes
caaIdentities was mistakenly labled as a string in a previous patch and
not as an array of strings, as it is defined in the rfc [0].

[0] https://datatracker.ietf.org/doc/html/rfc8555#section-7.1.1

Signed-off-by: Folke Gleumes 
---

This is a followup to Thomas correction, regarding the metadata sent by
the CA[1]

[1] https://lists.proxmox.com/pipermail/pve-devel/2023-November/060029.html

 PVE/API2/ACMEAccount.pm | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/PVE/API2/ACMEAccount.pm b/PVE/API2/ACMEAccount.pm
index 93820ec4..3e3cbd32 100644
--- a/PVE/API2/ACMEAccount.pm
+++ b/PVE/API2/ACMEAccount.pm
@@ -402,7 +402,10 @@ __PACKAGE__->register_method ({
},
caaIdentities => {
description => 'Hostnames referring to the ACME servers.',
-   type => 'string',
+   type => 'array',
+   items => {
+   type => 'string',
+   },
optional => 1,
},
},
-- 
2.39.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] applied: [PATCH v4 common 1/2] tools: Add mknod syscall

2023-11-13 Thread Thomas Lamprecht
Am 13/11/2023 um 11:30 schrieb Filip Schauer:
> Signed-off-by: Filip Schauer 
> ---
>  src/PVE/Syscall.pm | 1 +
>  src/PVE/Tools.pm   | 5 +
>  2 files changed, 6 insertions(+)
> 
>

applied this one already, thanks!


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] applied: [PATCH v4 common 2/2] tools: Add mount flag constants

2023-11-13 Thread Thomas Lamprecht
Am 13/11/2023 um 11:30 schrieb Filip Schauer:
> Signed-off-by: Filip Schauer 
> ---
>  src/PVE/Tools.pm | 31 +++
>  1 file changed, 31 insertions(+)
> 
>

applied this one already too, thanks!

Albeit I'm not too happy with having those constants in PVE::Tools
in the first place, but there are so many pre-existing ones that
yours don't really make it worse. And just inventing some new
module that takes only yours is not that good either, if we need
to clean this up as a whole, and that's for a separate, future
patch series.


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] applied: [PATCH manager] acme: mark caaIdentities as an array

2023-11-13 Thread Thomas Lamprecht
Am 13/11/2023 um 15:11 schrieb Folke Gleumes:
> caaIdentities was mistakenly labled as a string in a previous patch and
> not as an array of strings, as it is defined in the rfc [0].
> 
> [0] https://datatracker.ietf.org/doc/html/rfc8555#section-7.1.1
> 
> Signed-off-by: Folke Gleumes 
> ---
> 
> This is a followup to Thomas correction, regarding the metadata sent by
> the CA[1]
> 
> [1] https://lists.proxmox.com/pipermail/pve-devel/2023-November/060029.html
> 
>  PVE/API2/ACMEAccount.pm | 5 -
>  1 file changed, 4 insertions(+), 1 deletion(-)
> 
>

applied, thanks!


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [PATCH many 00/27] overhaul notification system, use matchers instead of filters

2023-11-13 Thread Dominik Csapak

a few high level ui things
(i did not look too deeply in the code, but i'll send
probably some comments there too)

that probably was already there, but i find the all/any + invert combination
confusing (i had to think about it for a bit before getting a grasp on it)

i would propose we can write the four options out what they mean
and internally convert them to all/any + invert, e.g.

'all rule match'
'any rule match'
'at least one rule does not match' (all + invert)
'no rule matches' (any + invert)

(also is the change from and/or to all/any not a breaking change?,
did we expose this in the api yet ?)

second, we already have a very similar interface in the guest wizard for
the disks, and there we have the remove button inline,
i guess we should keep the style consistent

third, do we really need the tree? we basically have the
four options from above, and then a list of the matches

wouldn't it be enough to seperate them?

e.g. have a dropdown with the four options + a list instead of a tree?

also currently the match options are not really verified before
i can submit the form


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [PATCH many 00/27] overhaul notification system, use matchers instead of filters

2023-11-13 Thread Thomas Lamprecht
Am 13/11/2023 um 15:34 schrieb Dominik Csapak:
> (also is the change from and/or to all/any not a breaking change?,
> did we expose this in the api yet ?)

there's lots of breaking change, it was applied a bit to early (reviewer
"fault", not dev one) but only ever exposed on pvetest, so Lukas got my
OK to break this, which for is most of the time fine for pvetest stuff
anyway, after all it's a testing ground and we should not make our life
harder than it is (configs get migrated best-effort wise).


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [PATCH many 00/27] overhaul notification system, use matchers instead of filters

2023-11-13 Thread Lukas Wagner

Hi, thanks for your input.

On 11/13/23 15:34, Dominik Csapak wrote:

a few high level ui things
(i did not look too deeply in the code, but i'll send
probably some comments there too)

Just as a warning, the tree code/data binding is definitely not as clean 
as it could be right now, a cleanup patch will follow. I just wanted to 
get these patches out ASAP ;)


that probably was already there, but i find the all/any + invert 
combination

confusing (i had to think about it for a bit before getting a grasp on it)

i would propose we can write the four options out what they mean
and internally convert them to all/any + invert, e.g.

'all rule match'
'any rule match'
'at least one rule does not match' (all + invert)
'no rule matches' (any + invert)


I was considering this as well and discussed both approaches with Aaron.
He was slightly in favor of the one I implemented, so I went with that.
But now that I think about it again I think I like this version better, 
I'll send a followup for this (since this is is purely cosmetic).


(also is the change from and/or to all/any not a breaking change?,
did we expose this in the api yet ?)


Well, the switch-over from filters to matchers is breaking as a whole, 
but it only ever hit pvetest, it is not a big problem.

(config parsing was adapted so that it will clean out 'old' entries)
The notification stuff was merged a bit too eagerly, this rework 
attempts to fix some of the issues with the first approach.


second, we already have a very similar interface in the guest wizard for
the disks, and there we have the remove button inline,
i guess we should keep the style consistent

Noted, i'll put it on my list for followups.



third, do we really need the tree? we basically have the
four options from above, and then a list of the matches

wouldn't it be enough to seperate them?


The tree was a conscious decision, because at some point I want to 
support 'sub-matchers' - where a matcher can reference another matcher. 
that way users can compose arbitrary boolean formulas, e.g.:


  All of
match ...
match ...
any of
match ...
match ...

For best UX, the sub-matchers shall be configurable within a single 
dialog, and this is the reason why I went with a tree widget.





e.g. have a dropdown with the four options + a list instead of a tree?

also currently the match options are not really verified before
i can submit the form



True, will fix this in a followup.

Thx again!

--
- Lukas


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH pve-docs] sdn: update documentation

2023-11-13 Thread Stefan Lendl
Try to homogenize style and format
Title case naming conventions for configuration options
Simplify examples
Re-phrase descriptions

Signed-off-by: Stefan Lendl 
---

Notes:
Used single line heading style

The multi line heading format just does not work with syntax
highlighting in my editor. The generated HTML from adoc is identical.

 pvesdn.adoc | 947 ++--
 1 file changed, 408 insertions(+), 539 deletions(-)

diff --git a/pvesdn.adoc b/pvesdn.adoc
index aadd8dc..600904d 100644
--- a/pvesdn.adoc
+++ b/pvesdn.adoc
@@ -1,6 +1,6 @@
 [[chapter_pvesdn]]
-Software Defined Network
-
+= Software Defined Network
+
 ifndef::manvolnum[]
 :pve-toplevel:
 endif::manvolnum[]
@@ -14,19 +14,16 @@ xref:getting_help[mailing lists or in the forum] for 
questions and feedback.
 
 
 [[pvesdn_installation]]
-Installation
-
+== Installation
 
-To enable the experimental Software Defined Network (SDN) integration, you need
-to install the `libpve-network-perl` and `ifupdown2` packages on every node:
+To enable the _tech-preview_ SDN integration, you need to install the
+`libpve-network-perl` packages on every node:
 
 
 apt update
-apt install libpve-network-perl ifupdown2
+apt install libpve-network-perl
 
 
-NOTE: {pve} version 7 and above come installed with ifupdown2.
-
 After this, you need to add the following line to the end of the
 `/etc/network/interfaces` configuration file, so that the SDN configuration 
gets
 included and activated.
@@ -36,260 +33,266 @@ source /etc/network/interfaces.d/*
 
 
 
-Basic Overview
---
+[[pvesdn_overview]]
+== Overview
 
 The {pve} SDN allows for separation and fine-grained control of virtual guest
 networks, using flexible, software-controlled configurations.
 
-Separation is managed through zones, where a zone is its own virtual separated
-network area. A 'VNet' is a type of a virtual network connected to a zone.
-Depending on which type or plugin the zone uses, it can behave differently and
-offer different features, advantages, and disadvantages. Normally, a 'VNet'
-appears as a common Linux bridge with either a VLAN or 'VXLAN' tag, however,
-some can also use layer 3 routing for control. 'VNets' are deployed locally on
-each node, after being configured from the cluster-wide datacenter SDN
-administration interface.
+Separation is managed through *zones*, virtual networks (*VNets*), and
+*subnets*.  A zone is its own virtually separated network area.  A VNet is a
+virtual network that belongs to a zone. A subnet is an IP range inside a VNet.
+
+Depending on the type of the zone, the network behaves differently and offers
+specific features, advantages, and limitations.
 
+Use cases for SDN range from an isolated private network on each individual 
node
+to complex overlay networks across multiple PVE clusters on different 
locations.
 
-Main Configuration
-~~
+After configuring an VNet in the cluster-wide datacenter SDN administration
+interface, it is available as a common Linux bridge, locally on each node, to 
be
+assigned to VMs and Containers.
 
-Configuration is done at the datacenter (cluster-wide) level and is saved in
-files located in the shared configuration file system:
-`/etc/pve/sdn`
 
-On the web-interface, SDN features 3 main sections:
+[[pvesdn_main_configuration]]
+== Main Configuration
 
-* SDN: An overview of the SDN state
+Configuration is done at the web UI at datacenter level and is saved in files
+located in the shared configuration file system at `/etc/pve/sdn`.
 
-* Zones: Create and manage the virtually separated network zones
+On the web interface, SDN features the following sections:
 
-* VNets: Create virtual network bridges and manage subnets
+* xref:pvesdn_config_main_sdn[SDN]:: An overview of the SDN state
 
-In addition to this, the following options are offered:
+* xref:pvesdn_config_zone[Zones]: Create and manage the virtually separated
+  network zones
 
-* Controller: For controlling layer 3 routing in complex setups
+* xref:pvesdn_config_vnets[VNets] VNets: Create virtual network bridges and
+  manage subnets
 
-* Subnets: Used to defined IP networks on VNets
+The Options category allows adding and managing additional services to be used
+in your SDN setup.
 
-* IPAM: Enables the use of external tools for IP address management (guest
-  IPs)
+* xref:pvesdn_config_controllers[Controllers]: For controlling layer 3 routing
+  in complex setups
 
-* DNS: Define a DNS server API for registering virtual guests' hostname and IP
+* xref:pvesdn_config_ipam[IPAM]: Enables external for IP address management for
+  guests
+
+* xref:pvesdn_config_dns[DNS]: Define a DNS server integration for registering
+  virtual guests' hostname and IP
   addresses
 
-[[pvesdn_config_main_sdn]]
 
-SDN
-~~~
+[[pvesdn_config_main_sdn]]
+=== SDN
 
 This is the main status panel. Here you can see the deployment status of zones
 on differe

Re: [pve-devel] [PATCH proxmox-widget-toolkit 25/27] notification: matcher: add UI for matcher editing

2023-11-13 Thread Dominik Csapak

in generaly my biggest issue with this patch is that
the various functions/formulas/handler are way to far
away from the place they live/get used/etc.

in generaly i'd try to use the 'controller+viewmodel' consistently
so have a controller (where the logic (methods+handlers) live, the
view where the structure of the layout lives
(and if necessary the viewmodel where the data+formulas live)

otherwise not a very deep review, but some comments inline nonetheless:

On 11/7/23 11:18, Lukas Wagner wrote:

This modifies the old filter edit window in the following ways:
   - Split content into multiple panels
 - Name and comment in the first tab
 - Match rules in a tree-structure in the second tab
 - Targets to notify in the third tab

Signed-off-by: Lukas Wagner 
---

Notes:
 The code binding the match rule tree structure to the editable fields
 could definitely be a bit cleaner. I think this is the first time that
 we have used such a pattern, so there there was much experimentation
 needed to get this working.
 I plan to revisit it and clean up a bit later, I wanted to get
 the notification system changes on the list ASAP.

  src/window/NotificationMatcherEdit.js | 867 --
  1 file changed, 820 insertions(+), 47 deletions(-)

diff --git a/src/window/NotificationMatcherEdit.js 
b/src/window/NotificationMatcherEdit.js
index a014f3e..c6f0726 100644
--- a/src/window/NotificationMatcherEdit.js
+++ b/src/window/NotificationMatcherEdit.js
@@ -1,6 +1,6 @@
-Ext.define('Proxmox.panel.NotificationMatcherEditPanel', {
+Ext.define('Proxmox.panel.NotificationMatcherGeneralPanel', {
  extend: 'Proxmox.panel.InputPanel',
-xtype: 'pmxNotificationMatcherEditPanel',
+xtype: 'pmxNotificationMatcherGeneralPanel',
  mixins: ['Proxmox.Mixin.CBind'],
  
  items: [

@@ -15,53 +15,27 @@ Ext.define('Proxmox.panel.NotificationMatcherEditPanel', {
allowBlank: false,
},
{
-   xtype: 'proxmoxKVComboBox',
-   name: 'min-severity',
-   fieldLabel: gettext('Minimum Severity'),
-   value: null,
+   xtype: 'proxmoxtextfield',
+   name: 'comment',
+   fieldLabel: gettext('Comment'),
cbind: {
deleteEmpty: '{!isCreate}',
},
-   comboItems: [
-   ['info', 'info'],
-   ['notice', 'notice'],
-   ['warning', 'warning'],
-   ['error', 'error'],
-   ],
-   triggers: {
-   clear: {
-   cls: 'pmx-clear-trigger',
-   weight: -1,
-   hidden: false,
-   handler: function() {
-   this.setValue('');
-   },
-   },
-   },
-   },
-   {
-   xtype: 'proxmoxcheckbox',
-   fieldLabel: gettext('Invert match'),
-   name: 'invert-match',
-   uncheckedValue: 0,
-   defaultValue: 0,
-   cbind: {
-   deleteDefaultValue: '{!isCreate}',
-   },
},
+],
+});
+
+Ext.define('Proxmox.panel.NotificationMatcherTargetPanel', {
+extend: 'Proxmox.panel.InputPanel',
+xtype: 'pmxNotificationMatcherTargetPanel',
+mixins: ['Proxmox.Mixin.CBind'],
+
+items: [
{
xtype: 'pmxNotificationTargetSelector',
name: 'target',
allowBlank: false,
},
-   {
-   xtype: 'proxmoxtextfield',
-   name: 'comment',
-   fieldLabel: gettext('Comment'),
-   cbind: {
-   deleteEmpty: '{!isCreate}',
-   },
-   },
  ],
  });
  
@@ -74,7 +48,7 @@ Ext.define('Proxmox.window.NotificationMatcherEdit', {

labelWidth: 120,
  },
  
-width: 500,

+width: 700,
  
  initComponent: function() {

let me = this;
@@ -97,12 +71,38 @@ Ext.define('Proxmox.window.NotificationMatcherEdit', {
me.subject = gettext('Notification Matcher');
  
  	Ext.apply(me, {

-   items: [{
-   name: me.name,
-   xtype: 'pmxNotificationMatcherEditPanel',
-   isCreate: me.isCreate,
-   baseUrl: me.baseUrl,
-   }],
+   bodyPadding: 0,
+   items: [
+   {
+   xtype: 'tabpanel',
+   region: 'center',
+   layout: 'fit',
+   bodyPadding: 10,
+   items: [
+   {
+   name: me.name,
+   title: gettext('General'),
+   xtype: 'pmxNotificationMatcherGeneralPanel',
+   isCreate: me.isCreate,
+   baseUrl: me.baseUrl,
+   },
+   {
+   name: me.name,
+   title: gettext('Match Rules'),
+   xtype: 'pmxN

[pve-devel] applied: [PATCH v2 pve-manager 1/1] ui: ceph status: add pg warning state

2023-11-13 Thread Thomas Lamprecht
Am 23/06/2023 um 11:45 schrieb Alexandre Derumier:
> Like ceph mgr dashboard, we need a warning state.
> 
> - set degraded as warning instead working
> - set undersized as warning instead error
> - rename error as critical
> - add "working" (info-blue) color for working state
> - use warning (orange) color for warning state
> 
> Signed-off-by: Alexandre Derumier 
> ---
>  www/manager6/ceph/StatusDetail.js | 31 +++
>  1 file changed, 19 insertions(+), 12 deletions(-)
> 
>

applied, thanks!

I folded in the CSS class into this patch to be added to pve-manager's
CSS file for now and renamed "Working" into "Busy", which is IMO slightly
better conveying what we mean here.


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [PATCH access-control 1/2] acl: allow more nesting for /mapping acl paths

2023-11-13 Thread Thomas Lamprecht
Am 10/11/2023 um 09:47 schrieb Lukas Wagner:
> I don't have any strong preference for any form, I just think
> that some consistency with the API would be nice - and changing
> the API routes would be much more work ;)

hehe OK, as said, it doesn't matters that much so I'm fine with whatever
you prefer here.

> 
> And regarding the granularity: Yes, maybe that's a bit overkill now. The 
> per-target permissions were kind of important with the 'old' system 
> where we would select a target at the notification call site (e.g. a 
> backup job), but with the new 'pub-sub'-alike system it probably does 
> not matter that much any more. But I don't really have any strong 
> preference here as well.
> 

FWIW, we can extend the system with the next major release, but it's
almost impossible to shrink it again without causing a bigger fall out
(if it's used at all that is), so just from that starting out with a
smaller scope would be better. But here we would need to reduce the
scope quite a bit, i.e., having just /mappings/notifications left, as
otherwise there's no good way to (potentially) extent that in the future -
which (while better than root-only via configuring postfix now)
is definitively limited.

So yeah, whatever we do, this is something that can only be judged well
in retro-perspective through user feedback, just go with your gut-feeling
I guess.


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [RFC series pve-network/pve-cluster/qemu-server] DHCP

2023-11-13 Thread DERUMIER, Alexandre
Hi Stefan !

On 11/13/23 11:04, Alexandre Derumier wrote:
> I have splitted the ipam add|del , from the dhcp lease reservation.
> 
> The ipam add|del ip is done when creating|deleting vm, or add|del a
> vm nic
> 
> The dhcp reservation is done at vm start.
> 
> The delete of dhcp reservation is done at vm destroy.
> 
> (This can be easily extend for ephemeral ip)
> 
> At vm start, we search ip associated with mac address.

>>Thanks very much, that looks like a very good solution! From what I
>>can
>>tell migration would also work with this?

yes sure !  (and also remote migration to another cluster with same
external ipam)




> I have only implemented calls in qemu-server for now

>>Do you plan on checking out pve-container as well? Or should I look
>>into this?

I was waiting comments before doing pve-container, but if you are ok,
no problem,I'll work on it.


I'll try also to improve dnsmasq reload, as we don't always need to
reload it (if nothing change), and maximum once by vm start. (currently
it's always done for each interface)

I don't have tested yet ipv6 with dnsmasq, but the ips are correctly
set in ethers file.


>>I integrated it into my branch and will be giving it a test drive
now.

Thanks. I have spare time theses next 2 weeks for help/fix/bug.


>>I've been working on a UI integration in the meanwhile and it's
>>getting
>>along quite well although it will need to cook for another day or
two.


I think it also need api to add dhcp-range in subnet, as for external
ipam like netbox, It need to call netbox api to add the ip range.

So, maybe a button in subnet panel :"add dhcp-range", allowing to add
multiple range. 



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] applied: [PATCH storage v3 1/1] api/filerestore: add 'tar' parameter to 'download' api

2023-11-13 Thread Thomas Lamprecht
Am 19/10/2023 um 11:13 schrieb Dominik Csapak:
> to be able to download 'tar.zst' archives
> 
> Signed-off-by: Dominik Csapak 
> ---
>  src/PVE/API2/Storage/FileRestore.pm | 9 -
>  1 file changed, 8 insertions(+), 1 deletion(-)
> 
>

applied, thanks!


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] applied: [PATCH widget-toolkit v3 1/1] window/FileBrowser: enable tar button by default

2023-11-13 Thread Thomas Lamprecht
Am 19/10/2023 um 11:13 schrieb Dominik Csapak:
> all endpoints now can handle the 'tar' parameter, so add it for all
> 
> Signed-off-by: Dominik Csapak 
> ---
>  src/window/FileBrowser.js | 6 +-
>  1 file changed, 1 insertion(+), 5 deletions(-)
> 
>

applied, thanks!


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [PATCH widget-toolkit v3 1/1] window/FileBrowser: enable tar button by default

2023-11-13 Thread Thomas Lamprecht
Am 19/10/2023 um 11:13 schrieb Dominik Csapak:
> diff --git a/src/window/FileBrowser.js b/src/window/FileBrowser.js
> index 4e4c639..e036d9f 100644
> --- a/src/window/FileBrowser.js
> +++ b/src/window/FileBrowser.js
> @@ -61,10 +61,6 @@ Ext.define("Proxmox.window.FileBrowser", {
>   'd': true, // directories
>   },
>  
> - // enable tar download, this will add a menu to the "Download" button 
> when the selection
> - // can be downloaded as `.tar` files
> - enableTar: false,
> -

ps. isn't there a call-site where we had this set, i.e., that could be dropped 
now?



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [PATCH widget-toolkit v3 1/1] window/FileBrowser: enable tar button by default

2023-11-13 Thread Dominik Csapak

On 11/13/23 16:46, Thomas Lamprecht wrote:

Am 19/10/2023 um 11:13 schrieb Dominik Csapak:

diff --git a/src/window/FileBrowser.js b/src/window/FileBrowser.js
index 4e4c639..e036d9f 100644
--- a/src/window/FileBrowser.js
+++ b/src/window/FileBrowser.js
@@ -61,10 +61,6 @@ Ext.define("Proxmox.window.FileBrowser", {
'd': true, // directories
},
  
-	// enable tar download, this will add a menu to the "Download" button when the selection

-   // can be downloaded as `.tar` files
-   enableTar: false,
-


ps. isn't there a call-site where we had this set, i.e., that could be dropped 
now?



yes, but that's in proxmox-backup, should i already send that patch
or do we  wait until we bump widget-toolkit?


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [RFC series pve-network/pve-cluster/qemu-server] DHCP

2023-11-13 Thread Stefan Hanreich



On 11/13/23 16:44, DERUMIER, Alexandre wrote:
> I think it also need api to add dhcp-range in subnet, as for external
> ipam like netbox, It need to call netbox api to add the ip range.
> 
> So, maybe a button in subnet panel :"add dhcp-range", allowing to add
> multiple range. 

Yes, that's what I've been working on. For now I have added a Tab in the
Subnete edit dialogue where you can edit the DHCP Ranges. Additionally I
have also created a view to inspect the status of the PVE IPAM.

Also I have added a checkbox for activating DHCP in the Zone edit dialogue.


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



Re: [pve-devel] [RFC qemu-server 2/5] vmnic add|remove : add|del ip in ipam

2023-11-13 Thread Stefan Hanreich



On 11/13/23 11:04, Alexandre Derumier wrote:
>  if ($have_sdn) {
> -PVE::Network::SDN::Dhcp::add_mapping($vmid, $net->{bridge}, 
> $net->{macaddr});
> +PVE::Network::SDN::Dhcp::add_mapping($net->{bridge}, $net->{macaddr});
>  
>  PVE::Network::SDN::Zones::tap_create($iface, $net->{bridge});
>  PVE::Network::SDN::Zones::tap_plug($iface, $net->{bridge}, $net->{tag}, 
> $net->{firewall}, $net->{trunks}, $net->{rate});

This call will fail if you have SDN enabled, but want to add a NIC on a
non-simple zone or a NIC on a Linux bridge. I have already fixed this
issue in my version of the patch series, so no need for you to do anything.


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] applied.series: [PATCH storage/widget-toolkit 0/2] List all OSDs on disk

2023-11-13 Thread Thomas Lamprecht
Am 22/08/2023 um 11:04 schrieb Aaron Lauterer:
> It is possible to have multiple OSD daemons on a single disk. This is
> useful if fast NVME drives are used to utilize their full potential.
> 
> For these situations we want to list all OSD daemons that are located on
> the disk in the disk panel of the node.
> 
> I added a new 'osdid-list' parameter to the Disks GET API endpoint for
> this.
> We can probably deprecate/remove the 'osdid' the next time we can
> introduce breaking API changes.
> 
> storage: Aaron Lauterer (1):
>   disks: get: add osdid-list return parameter
> 
>  src/PVE/API2/Disks.pm |  6 +++-
>  src/PVE/Diskmanage.pm | 10 ++
>  .../disk_tests/cciss/disklist_expected.json   |  1 +
>  .../hdd_smart/disklist_expected.json  |  2 ++
>  .../nvme_smart/disklist_expected.json |  1 +
>  .../disk_tests/sas/disklist_expected.json |  1 +
>  .../disk_tests/sas_ssd/disklist_expected.json |  1 +
>  .../ssd_smart/disklist_expected.json  |  5 +++
>  .../disk_tests/usages/disklist_expected.json  | 32 +--
>  9 files changed, 49 insertions(+), 10 deletions(-)
> 
> 
> widget-toolkit: Aaron Lauterer (1):
>   DiskList: render osdid-list if present
> 
>  src/panel/DiskList.js | 6 +-
>  1 file changed, 5 insertions(+), 1 deletion(-)
> 


applied series, thanks!

___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH docs/qemu-server 0/2] Ballooning/PCI passthrough incompatibility: add warning and docs

2023-11-13 Thread Friedrich Weber
When using PCI(e) passthrough, enabling ballooning for a VM (as in, setting a
"minimum memory" amount that is smaller than "memory") does not have any
effect, which may be surprising to users [0].

Patch #1 adds a note about this incompatibility to the PCI(e) passthrough and
ballooning sections of the docs.

Patch #2 implements a warning on VM start if PCI(e) passthrough and
ballooning are both enabled.

[0] https://forum.proxmox.com/threads/134202/



docs:

Friedrich Weber (1):
  pci passthrough: mention incompatibility with ballooning

 qm-pci-passthrough.adoc | 10 ++
 qm.adoc |  5 +
 2 files changed, 15 insertions(+)


qemu-server:

Friedrich Weber (1):
  vm start: warn if using ballooning and PCI(e) passthrough

 PVE/QemuServer.pm | 10 ++
 1 file changed, 10 insertions(+)


Summary over all repositories:
  3 files changed, 25 insertions(+), 0 deletions(-)

-- 
murpp v0.4.0



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH docs 1/2] pci passthrough: mention incompatibility with ballooning

2023-11-13 Thread Friedrich Weber
When using PCI(e) passthrough, setting a minimum amount of memory does
not have any effect, which may be surprising to users [0]. Add a note
to the PCI(e) passthrough section, and reference it in the ballooning
section.

[0] https://forum.proxmox.com/threads/134202/

Signed-off-by: Friedrich Weber 
---
 qm-pci-passthrough.adoc | 10 ++
 qm.adoc |  5 +
 2 files changed, 15 insertions(+)

diff --git a/qm-pci-passthrough.adoc b/qm-pci-passthrough.adoc
index 1f68721..406156d 100644
--- a/qm-pci-passthrough.adoc
+++ b/qm-pci-passthrough.adoc
@@ -20,6 +20,16 @@ PCI speeds. Passing through devices as PCIe just sets a flag 
for the guest to
 tell it that the device is a  PCIe device instead of a "really fast legacy PCI
 device". Some guest applications benefit from this.
 
+xref:qm_ballooning[Automatic memory allocation (ballooning)] is not possible
+when using PCI(e) passthrough. As the PCI device may use DMA (Direct Memory
+Access), QEMU needs to map the complete guest memory on VM startup. Hence, the
+QEMU process will use at least the (maximum) configured amount of VM memory and
+setting a minimum amount does not have any effect. When using PCI(e)
+passthrough, it is recommended to set memory and minimum memory to the same
+amount and keep the balloning device enabled. However, keep in mind that the
+memory consumption reported in the GUI for the VM may be much lower than the
+memory consumption of the QEMU process.
+
 General Requirements
 
 
diff --git a/qm.adoc b/qm.adoc
index b7938d7..cbc5e0d 100644
--- a/qm.adoc
+++ b/qm.adoc
@@ -623,6 +623,7 @@ it (like for debugging purposes), simply uncheck 
*Ballooning Device* or set
 
 in the configuration.
 
+[[qm_ballooning]]
 .Automatic Memory Allocation
 
 // see autoballoon() in pvestatd.pm
@@ -659,6 +660,10 @@ systems.
 When allocating RAM to your VMs, a good rule of thumb is always to leave 1GB
 of RAM available to the host.
 
+When using PCI(e) passthrough, setting a minimum amount of memory does not have
+any effect, see the section on xref:qm_pci_passthrough[PCI(e) passthrough] for
+more details.
+
 
 [[qm_network_device]]
 Network Device
-- 
2.39.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] [PATCH qemu-server 2/2] vm start: warn if using ballooning and PCI(e) passthrough

2023-11-13 Thread Friedrich Weber
If a VM uses PCI(e) passthrough, ballooning does not work as expected:
The QEMU process will always consume the full memory amount given in
`memory`, even if `balloon` is set to a smaller (non-zero) amount. The
reason is that the PCI device might use DMA, so QEMU needs to map the
complete guest memory on startup. However, users may not be aware of
that (see e.g. [0]).

To make users aware of the limitation, warn on VM start if at least
one PCI device is passed through and ballooning is enabled (and
`balloon` != `memory`).

[0] https://forum.proxmox.com/threads/134202/

Signed-off-by: Friedrich Weber 
---

Notes:
I did not test this on a "real" PCI passthrough setup as I don't have
one at hand, but Markus tested (an earlier version) of this patch on
his machine.

 PVE/QemuServer.pm | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index dbcd568..70983a4 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -5789,6 +5789,16 @@ sub vm_start_nolock {
die $err;
 }
 
+if (
+   scalar(%$pci_devices)
+   && defined($conf->{balloon})
+   && $conf->{balloon} != 0
+   && $conf->{balloon} != $memory
+) {
+   log_warn("Ballooning is not possible when using PCI(e) passthrough, "
+   ."VM will use maximum configured memory ($memory MiB).\n");
+}
+
 PVE::Storage::activate_volumes($storecfg, $vollist);
 
 
-- 
2.39.2



___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



[pve-devel] applied: [PATCH proxmox-widget-toolkit 1/1] apt: drop ChangeLogUrl

2023-11-13 Thread Thomas Lamprecht
Am 04/07/2023 um 11:45 schrieb Fabian Grünbichler:
> it's not returned anymore by the corresponding backends, since fetching
> changelogs is now fully delegated to `apt`.
> 
> Signed-off-by: Fabian Grünbichler 
> ---
> 
> Notes:
> could benefit from a Breaks on old versions of pve-manager/pmg-api ,
> but not strictly required, it will simply allow attempting to fetch
> changelogs for packages where the backend already said there is none
> 
>  src/node/APT.js | 8 
>  1 file changed, 4 insertions(+), 4 deletions(-)
> 
>

applied, thanks!

Not directly related, but I get 404 not found for all Debian origin updates that
are currently pending on a Proxmox VE 8 VM of mine (e.g., for ffmpeg)


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] applied: [PATCH proxmox-widget-toolkit 1/1] apt: drop ChangeLogUrl

2023-11-13 Thread Fabian Grünbichler
On November 13, 2023 6:18 pm, Thomas Lamprecht wrote:
> Am 04/07/2023 um 11:45 schrieb Fabian Grünbichler:
>> it's not returned anymore by the corresponding backends, since fetching
>> changelogs is now fully delegated to `apt`.
>> 
>> Signed-off-by: Fabian Grünbichler 
>> ---
>> 
>> Notes:
>> could benefit from a Breaks on old versions of pve-manager/pmg-api ,
>> but not strictly required, it will simply allow attempting to fetch
>> changelogs for packages where the backend already said there is none
>> 
>>  src/node/APT.js | 8 
>>  1 file changed, 4 insertions(+), 4 deletions(-)
>> 
>>
> 
> applied, thanks!
> 
> Not directly related, but I get 404 not found for all Debian origin updates 
> that
> are currently pending on a Proxmox VE 8 VM of mine (e.g., for ffmpeg)

yes, this is an (old) known issue also reported on the Debian side since
ages - the security repo doesn't get the changelogs published like the
other ones, only when packages are folded into the next point release
are they available via apt. sometimes it manages to pick it up locally
(from /usr/share/doc/$package/changelog.Debian.XX) *after* the upgrade,
but I haven't yet figured out what makes it do that only sometimes.


___
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel