when using a hyper-converged cluster it was previously possible to add the pool used by the ceph-mgr modules (".mgr" since quincy or "device_health_metrics" previously) as an RBD storage. this would lead to all kinds of errors when that storage was used (e.g.: VMs missing their disks after a migration). hence, filter these pools from the list of available pools.
Signed-off-by: Stefan Sterz <s.st...@proxmox.com> --- similar to the previous api change this tries to fail gracefully if no applications are defined for a pool. www/manager6/form/CephPoolSelector.js | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/www/manager6/form/CephPoolSelector.js b/www/manager6/form/CephPoolSelector.js index 5b96398d..4dd77269 100644 --- a/www/manager6/form/CephPoolSelector.js +++ b/www/manager6/form/CephPoolSelector.js @@ -15,9 +15,17 @@ Ext.define('PVE.form.CephPoolSelector', { throw "no nodename given"; } + let filterCephMgrPools = (item) => { + let apps = item.data?.applications; + return apps === undefined || apps?.rbd !== undefined; + }; + var store = Ext.create('Ext.data.Store', { fields: ['name'], sorters: 'name', + filters: [ + filterCephMgrPools, + ], proxy: { type: 'proxmox', url: '/api2/json/nodes/' + me.nodename + '/ceph/pools', @@ -32,8 +40,10 @@ Ext.define('PVE.form.CephPoolSelector', { store.load({ callback: function(rec, op, success) { - if (success && rec.length > 0) { - me.select(rec[0]); + let filteredRec = rec.filter(filterCephMgrPools); + + if (success && filteredRec.length > 0) { + me.select(filteredRec[0]); } }, }); -- 2.30.2 _______________________________________________ pve-devel mailing list pve-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel