This is an automated email from the ASF dual-hosted git repository.

wilfreds pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-k8shim.git


The following commit(s) were added to refs/heads/master by this push:
     new 9f44327a [YUNIKORN-3132] Hardening placeholder pod security context 
settings (#1002)
9f44327a is described below

commit 9f44327a5c936263727f898197d79e67c8faf5ee
Author: Aditya Maheshwari <[email protected]>
AuthorDate: Fri Mar 6 09:27:44 2026 +1100

    [YUNIKORN-3132] Hardening placeholder pod security context settings (#1002)
    
    Add new configuration options to set default Security Context for
    placeholder pod. Option to override:
    * runAsUser
    * runAsGroup
    * fsGroup
    via service configuration similar as the image itself.
    
    Default is no override which uses the image defaults.
    
    Closes: #1002
    
    Signed-off-by: Wilfred Spiegelenburg <[email protected]>
---
 .gitignore                     |  1 +
 Makefile                       |  6 +++
 pkg/cache/placeholder.go       | 62 ++++++++++++++++++++++--------
 pkg/cache/placeholder_test.go  | 55 ++++++++++++++++++++++++++-
 pkg/conf/schedulerconf.go      | 86 ++++++++++++++++++++++++++++++------------
 pkg/conf/schedulerconf_test.go | 54 ++++++++++++++++++++++++--
 pkg/log/logger.go              | 55 ++++++++++++++-------------
 pkg/log/logger_test.go         |  2 +-
 8 files changed, 247 insertions(+), 74 deletions(-)

diff --git a/.gitignore b/.gitignore
index 46c3f4e2..2921ae91 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
 .idea
+.vscode
 .DS_Store
 /tools/
 /build/
diff --git a/Makefile b/Makefile
index 5bc6d6a9..d1392cc3 100644
--- a/Makefile
+++ b/Makefile
@@ -318,6 +318,12 @@ $(GINKGO_BIN):
        @mkdir -p "$(GINKGO_PATH)"
        @GOBIN="$(BASE_DIR)/$(GINKGO_PATH)" "$(GO)" install 
"github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION)"
 
+# Format the code
+.PHONY: format
+format:
+       @echo "running go fmt"
+       @"$(GO)" fmt ./...
+
 # Run lint against the previous commit for PR and branch build
 # In dev setup look at all changes on top of master
 .PHONY: lint
diff --git a/pkg/cache/placeholder.go b/pkg/cache/placeholder.go
index 6208aa23..12361a9c 100644
--- a/pkg/cache/placeholder.go
+++ b/pkg/cache/placeholder.go
@@ -22,24 +22,16 @@ import (
        "fmt"
        "strings"
 
+       "go.uber.org/zap"
        v1 "k8s.io/api/core/v1"
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
        "github.com/apache/yunikorn-k8shim/pkg/common/constants"
        "github.com/apache/yunikorn-k8shim/pkg/common/utils"
        "github.com/apache/yunikorn-k8shim/pkg/conf"
+       "github.com/apache/yunikorn-k8shim/pkg/log"
 )
 
-// MUST: run the placeholder pod as non-root user
-// It doesn't matter which user we use to start the placeholders,
-// as long as it is not the root user. This is because the placeholder
-// is just a dummy container, that doesn't run anything.
-// On most of Linux distributions, uid bigger than 1000 is recommended
-// for normal user uses. So we are using 1000(uid)/3000(gid) here to
-// launch all the placeholder pods.
-var runAsUser int64 = 1000
-var runAsGroup int64 = 3000
-
 type Placeholder struct {
        appID         string
        taskGroupName string
@@ -47,6 +39,7 @@ type Placeholder struct {
 }
 
 func newPlaceholder(placeholderName string, app *Application, taskGroup 
TaskGroup) *Placeholder {
+       logger := log.Log(log.ShimPlaceHolderConfig)
        // Here the owner reference is always the originator pod
        ownerRefs := app.getPlaceholderOwnerReferences()
        annotations := utils.MergeMaps(taskGroup.Annotations, map[string]string{
@@ -85,7 +78,38 @@ func newPlaceholder(placeholderName string, app 
*Application, taskGroup TaskGrou
 
        // prepare the resource lists
        requests := GetPlaceholderResourceRequests(taskGroup.MinResource)
-       var zeroSeconds int64 = 0
+       // set default values for the placeholder pod
+       var zeroSeconds = int64(0)
+       var runAsNonRoot = true
+       var privileged = false
+       var allowPrivilegeEscalation = false
+       var readOnlyRootFilesystem = true
+       var hostNetwork = false
+       var placeHolderImage = constants.PlaceholderContainerImage
+       podSecContext := &v1.PodSecurityContext{
+               RunAsNonRoot: &runAsNonRoot,
+               SeccompProfile: &v1.SeccompProfile{
+                       Type: v1.SeccompProfileTypeRuntimeDefault,
+               },
+       }
+       schedulerConf := conf.GetSchedulerConf()
+       // override values if specified in the placeholder config
+       if schedulerConf.PlaceHolderConfig != nil {
+               logger.Info("Using placeholder config", zap.Any("config", 
schedulerConf.PlaceHolderConfig))
+               if schedulerConf.PlaceHolderConfig.Image != "" {
+                       placeHolderImage = schedulerConf.PlaceHolderConfig.Image
+               }
+               if schedulerConf.PlaceHolderConfig.RunAsUser != 0 {
+                       podSecContext.RunAsUser = 
&schedulerConf.PlaceHolderConfig.RunAsUser
+               }
+               if schedulerConf.PlaceHolderConfig.RunAsGroup != 0 {
+                       podSecContext.RunAsGroup = 
&schedulerConf.PlaceHolderConfig.RunAsGroup
+               }
+               if schedulerConf.PlaceHolderConfig.FSGroup != 0 {
+                       podSecContext.FSGroup = 
&schedulerConf.PlaceHolderConfig.FSGroup
+               }
+       }
+
        placeholderPod := &v1.Pod{
                ObjectMeta: metav1.ObjectMeta{
                        Name:      placeholderName,
@@ -98,20 +122,26 @@ func newPlaceholder(placeholderName string, app 
*Application, taskGroup TaskGrou
                        OwnerReferences: ownerRefs,
                },
                Spec: v1.PodSpec{
-                       SecurityContext: &v1.PodSecurityContext{
-                               RunAsUser:  &runAsUser,
-                               RunAsGroup: &runAsGroup,
-                       },
+                       SecurityContext:  podSecContext,
+                       HostNetwork:      hostNetwork,
                        ImagePullSecrets: imagePullSecrets,
                        Containers: []v1.Container{
                                {
                                        Name:            
constants.PlaceholderContainerName,
-                                       Image:           
conf.GetSchedulerConf().PlaceHolderImage,
+                                       Image:           placeHolderImage,
                                        ImagePullPolicy: v1.PullIfNotPresent,
                                        Resources: v1.ResourceRequirements{
                                                Requests: requests,
                                                Limits:   requests,
                                        },
+                                       SecurityContext: &v1.SecurityContext{
+                                               Privileged: &privileged,
+                                               Capabilities: &v1.Capabilities{
+                                                       Drop: 
[]v1.Capability{"ALL"},
+                                               },
+                                               AllowPrivilegeEscalation: 
&allowPrivilegeEscalation,
+                                               ReadOnlyRootFilesystem:   
&readOnlyRootFilesystem,
+                                       },
                                },
                        },
                        RestartPolicy:                 
constants.PlaceholderPodRestartPolicy,
diff --git a/pkg/cache/placeholder_test.go b/pkg/cache/placeholder_test.go
index cbabd289..093f67fe 100644
--- a/pkg/cache/placeholder_test.go
+++ b/pkg/cache/placeholder_test.go
@@ -29,6 +29,7 @@ import (
 
        "github.com/apache/yunikorn-k8shim/pkg/common"
        "github.com/apache/yunikorn-k8shim/pkg/common/constants"
+       "github.com/apache/yunikorn-k8shim/pkg/conf"
        siCommon "github.com/apache/yunikorn-scheduler-interface/lib/go/common"
 )
 
@@ -147,8 +148,13 @@ func TestNewPlaceholder(t *testing.T) {
        assert.Equal(t, len(holder.pod.Spec.NodeSelector), 2, "unexpected 
number of node selectors")
        assert.Equal(t, len(holder.pod.Spec.Tolerations), 1, "unexpected number 
of tolerations")
        assert.Equal(t, holder.String(), "appID: app01, taskGroup: 
test-group-1, podName: test/ph-name")
-       assert.Equal(t, holder.pod.Spec.SecurityContext.RunAsUser, &runAsUser)
-       assert.Equal(t, holder.pod.Spec.SecurityContext.RunAsGroup, &runAsGroup)
+       assert.Equal(t, *holder.pod.Spec.SecurityContext.RunAsNonRoot, true)
+       assert.Equal(t, holder.pod.Spec.SecurityContext.SeccompProfile.Type, 
v1.SeccompProfileTypeRuntimeDefault)
+       assert.Equal(t, holder.pod.Spec.HostNetwork, false)
+       assert.Equal(t, 
*holder.pod.Spec.Containers[0].SecurityContext.Privileged, false)
+       assert.Equal(t, 
*holder.pod.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation, false)
+       assert.Equal(t, 
*holder.pod.Spec.Containers[0].SecurityContext.ReadOnlyRootFilesystem, true)
+       assert.DeepEqual(t, 
holder.pod.Spec.Containers[0].SecurityContext.Capabilities.Drop, 
[]v1.Capability{"ALL"})
        assert.Equal(t, len(holder.pod.Spec.ImagePullSecrets), 2, "unexpected 
number of pull secrets")
        assert.Equal(t, "secret1", holder.pod.Spec.ImagePullSecrets[0].Name)
        assert.Equal(t, "secret2", holder.pod.Spec.ImagePullSecrets[1].Name)
@@ -297,3 +303,48 @@ func TestNewPlaceholderWithTopologySpreadConstraints(t 
*testing.T) {
                "labelKey1": "labelKeyValue1",
        })
 }
+
+func TestNewPlaceholderWithPlaceHolderConfig(t *testing.T) {
+       // Setup
+       mockedSchedulerAPI := newMockSchedulerAPI()
+       app := NewApplication(appID, queue, "bob",
+               testGroups, map[string]string{constants.AppTagNamespace: 
namespace}, mockedSchedulerAPI)
+       app.setTaskGroups(taskGroups)
+
+       // Update conf
+       originalConf := conf.GetSchedulerConf()
+       defer conf.SetSchedulerConf(originalConf)
+
+       newConf := originalConf.Clone()
+       runAsUser := int64(1000)
+       runAsGroup := int64(3000)
+       fsGroup := int64(2000)
+       newConf.PlaceHolderConfig = &conf.PlaceHolderConfig{
+               Image:      "new-image",
+               RunAsUser:  runAsUser,
+               RunAsGroup: runAsGroup,
+               FSGroup:    fsGroup,
+       }
+       conf.SetSchedulerConf(newConf)
+
+       // Execute
+       holder := newPlaceholder("ph-name", app, app.taskGroups[0])
+
+       // Verify
+       assert.Equal(t, holder.pod.Spec.Containers[0].Image, "new-image")
+       assert.Equal(t, *holder.pod.Spec.SecurityContext.RunAsUser, int64(1000))
+       assert.Equal(t, *holder.pod.Spec.SecurityContext.RunAsGroup, 
int64(3000))
+       assert.Equal(t, *holder.pod.Spec.SecurityContext.FSGroup, int64(2000))
+
+       // Test fallback
+       newConf2 := originalConf.Clone()
+       newConf2.PlaceHolderConfig = nil
+       conf.SetSchedulerConf(newConf2)
+
+       holder2 := newPlaceholder("ph-name-2", app, app.taskGroups[0])
+       assert.Equal(t, holder2.pod.Spec.Containers[0].Image, 
constants.PlaceholderContainerImage)
+       // RunAsUser etc should be nil/default
+       assert.Assert(t, holder2.pod.Spec.SecurityContext.RunAsUser == nil)
+       assert.Assert(t, holder2.pod.Spec.SecurityContext.RunAsGroup == nil)
+       assert.Assert(t, holder2.pod.Spec.SecurityContext.FSGroup == nil)
+}
diff --git a/pkg/conf/schedulerconf.go b/pkg/conf/schedulerconf.go
index f8add14e..7a98de7d 100644
--- a/pkg/conf/schedulerconf.go
+++ b/pkg/conf/schedulerconf.go
@@ -67,6 +67,9 @@ const (
        CMSvcDisableGangScheduling        = PrefixService + 
"disableGangScheduling"
        CMSvcEnableConfigHotRefresh       = PrefixService + 
"enableConfigHotRefresh"
        CMSvcPlaceholderImage             = PrefixService + "placeholderImage"
+       CMSvcPlaceholderRunAsUser         = PrefixService + 
"placeholderRunAsUser"
+       CMSvcPlaceholderRunAsGroup        = PrefixService + 
"placeholderRunAsGroup"
+       CMSvcPlaceholderFSGroup           = PrefixService + "placeholderFsGroup"
        CMSvcNodeInstanceTypeNodeLabelKey = PrefixService + 
"nodeInstanceTypeNodeLabelKey"
 
        // kubernetes
@@ -110,28 +113,35 @@ var confHolder atomic.Value
 var kubeLoggerOnce sync.Once
 
 type SchedulerConf struct {
-       SchedulerName            string        `json:"schedulerName"`
-       ClusterID                string        `json:"clusterId"`
-       ClusterVersion           string        `json:"clusterVersion"`
-       PolicyGroup              string        `json:"policyGroup"`
-       Interval                 time.Duration `json:"schedulingIntervalSecond"`
-       KubeConfig               string        
`json:"absoluteKubeConfigFilePath"`
-       VolumeBindTimeout        time.Duration `json:"volumeBindTimeout"`
-       EventChannelCapacity     int           `json:"eventChannelCapacity"`
-       DispatchTimeout          time.Duration `json:"dispatchTimeout"`
-       KubeQPS                  int           `json:"kubeQPS"`
-       KubeBurst                int           `json:"kubeBurst"`
-       EnableConfigHotRefresh   bool          `json:"enableConfigHotRefresh"`
-       DisableGangScheduling    bool          `json:"disableGangScheduling"`
-       UserLabelKey             string        `json:"userLabelKey"`
-       PlaceHolderImage         string        `json:"placeHolderImage"`
-       InstanceTypeNodeLabelKey string        `json:"instanceTypeNodeLabelKey"`
-       Namespace                string        `json:"namespace"`
-       GenerateUniqueAppIds     bool          `json:"generateUniqueAppIds"`
+       SchedulerName            string             `json:"schedulerName"`
+       ClusterID                string             `json:"clusterId"`
+       ClusterVersion           string             `json:"clusterVersion"`
+       PolicyGroup              string             `json:"policyGroup"`
+       Interval                 time.Duration      
`json:"schedulingIntervalSecond"`
+       KubeConfig               string             
`json:"absoluteKubeConfigFilePath"`
+       VolumeBindTimeout        time.Duration      `json:"volumeBindTimeout"`
+       EventChannelCapacity     int                
`json:"eventChannelCapacity"`
+       DispatchTimeout          time.Duration      `json:"dispatchTimeout"`
+       KubeQPS                  int                `json:"kubeQPS"`
+       KubeBurst                int                `json:"kubeBurst"`
+       EnableConfigHotRefresh   bool               
`json:"enableConfigHotRefresh"`
+       DisableGangScheduling    bool               
`json:"disableGangScheduling"`
+       UserLabelKey             string             `json:"userLabelKey"`
+       PlaceHolderConfig        *PlaceHolderConfig `json:"placeHolderConfig"`
+       InstanceTypeNodeLabelKey string             
`json:"instanceTypeNodeLabelKey"`
+       Namespace                string             `json:"namespace"`
+       GenerateUniqueAppIds     bool               
`json:"generateUniqueAppIds"`
 
        locking.RWMutex
 }
 
+type PlaceHolderConfig struct {
+       Image      string `json:"image"`
+       RunAsUser  int64  `json:"runAsUser,omitempty"`
+       RunAsGroup int64  `json:"runAsGroup,omitempty"`
+       FSGroup    int64  `json:"fsGroup,omitempty"`
+}
+
 func (conf *SchedulerConf) Clone() *SchedulerConf {
        conf.RLock()
        defer conf.RUnlock()
@@ -151,7 +161,7 @@ func (conf *SchedulerConf) Clone() *SchedulerConf {
                EnableConfigHotRefresh:   conf.EnableConfigHotRefresh,
                DisableGangScheduling:    conf.DisableGangScheduling,
                UserLabelKey:             conf.UserLabelKey,
-               PlaceHolderImage:         conf.PlaceHolderImage,
+               PlaceHolderConfig:        conf.PlaceHolderConfig,
                InstanceTypeNodeLabelKey: conf.InstanceTypeNodeLabelKey,
                Namespace:                conf.Namespace,
                GenerateUniqueAppIds:     conf.GenerateUniqueAppIds,
@@ -209,9 +219,12 @@ func handleNonReloadableConfig(old *SchedulerConf, new 
*SchedulerConf) {
        checkNonReloadableInt(CMKubeQPS, &old.KubeQPS, &new.KubeQPS)
        checkNonReloadableInt(CMKubeBurst, &old.KubeBurst, &new.KubeBurst)
        checkNonReloadableBool(CMSvcDisableGangScheduling, 
&old.DisableGangScheduling, &new.DisableGangScheduling)
-       checkNonReloadableString(CMSvcPlaceholderImage, &old.PlaceHolderImage, 
&new.PlaceHolderImage)
        checkNonReloadableString(CMSvcNodeInstanceTypeNodeLabelKey, 
&old.InstanceTypeNodeLabelKey, &new.InstanceTypeNodeLabelKey)
        checkNonReloadableBool(AMFilteringGenerateUniqueAppIds, 
&old.GenerateUniqueAppIds, &new.GenerateUniqueAppIds)
+       checkNonReloadableInt64(CMSvcPlaceholderRunAsUser, 
&old.PlaceHolderConfig.RunAsUser, &new.PlaceHolderConfig.RunAsUser)
+       checkNonReloadableInt64(CMSvcPlaceholderRunAsGroup, 
&old.PlaceHolderConfig.RunAsGroup, &new.PlaceHolderConfig.RunAsGroup)
+       checkNonReloadableInt64(CMSvcPlaceholderFSGroup, 
&old.PlaceHolderConfig.FSGroup, &new.PlaceHolderConfig.FSGroup)
+       checkNonReloadableString(CMSvcPlaceholderImage, 
&old.PlaceHolderConfig.Image, &new.PlaceHolderConfig.Image)
 }
 
 const warningNonReloadable = "ignoring non-reloadable configuration change 
(restart required to update)"
@@ -237,6 +250,13 @@ func checkNonReloadableInt(name string, old *int, new 
*int) {
        }
 }
 
+func checkNonReloadableInt64(name string, old *int64, new *int64) {
+       if *old != *new {
+               log.Log(log.ShimConfig).Warn(warningNonReloadable, 
zap.String("config", name), zap.Int64("existing", *old), zap.Int64("new", *new))
+               *new = *old
+       }
+}
+
 func checkNonReloadableBool(name string, old *bool, new *bool) {
        if *old != *new {
                log.Log(log.ShimConfig).Warn(warningNonReloadable, 
zap.String("config", name), zap.Bool("existing", *old), zap.Bool("new", *new))
@@ -314,9 +334,11 @@ func CreateDefaultConfig() *SchedulerConf {
                EnableConfigHotRefresh:   DefaultEnableConfigHotRefresh,
                DisableGangScheduling:    DefaultDisableGangScheduling,
                UserLabelKey:             constants.DefaultUserLabel,
-               PlaceHolderImage:         constants.PlaceholderContainerImage,
                InstanceTypeNodeLabelKey: 
constants.DefaultNodeInstanceTypeNodeLabelKey,
                GenerateUniqueAppIds:     
DefaultAMFilteringGenerateUniqueAppIds,
+               PlaceHolderConfig: &PlaceHolderConfig{
+                       Image: constants.PlaceholderContainerImage,
+               },
        }
 }
 
@@ -339,7 +361,11 @@ func parseConfig(config map[string]string, prev 
*SchedulerConf) (*SchedulerConf,
        parser.durationVar(&conf.DispatchTimeout, CMSvcDispatchTimeout)
        parser.boolVar(&conf.DisableGangScheduling, CMSvcDisableGangScheduling)
        parser.boolVar(&conf.EnableConfigHotRefresh, 
CMSvcEnableConfigHotRefresh)
-       parser.stringVar(&conf.PlaceHolderImage, CMSvcPlaceholderImage)
+       parser.stringVar(&conf.PlaceHolderConfig.Image, CMSvcPlaceholderImage)
+       parser.int64Var(&conf.PlaceHolderConfig.RunAsUser, 
CMSvcPlaceholderRunAsUser)
+       parser.int64Var(&conf.PlaceHolderConfig.RunAsGroup, 
CMSvcPlaceholderRunAsGroup)
+       parser.int64Var(&conf.PlaceHolderConfig.FSGroup, 
CMSvcPlaceholderFSGroup)
+
        parser.stringVar(&conf.InstanceTypeNodeLabelKey, 
CMSvcNodeInstanceTypeNodeLabelKey)
 
        // kubernetes
@@ -375,8 +401,8 @@ func (cp *configParser) stringVar(p *string, name string) {
 
 func (cp *configParser) intVar(p *int, name string) {
        if newValue, ok := cp.config[name]; ok {
-               int64Value, err := strconv.ParseInt(newValue, 10, 32)
-               intValue := int(int64Value)
+               int32Value, err := strconv.ParseInt(newValue, 10, 32)
+               intValue := int(int32Value)
                if err != nil {
                        log.Log(log.ShimConfig).Error("Unable to parse 
configmap entry", zap.String("key", name), zap.String("value", newValue), 
zap.Error(err))
                        cp.errors = append(cp.errors, err)
@@ -410,6 +436,18 @@ func (cp *configParser) durationVar(p *time.Duration, name 
string) {
        }
 }
 
+func (cp *configParser) int64Var(p *int64, name string) {
+       if newValue, ok := cp.config[name]; ok {
+               int64Value, err := strconv.ParseInt(newValue, 10, 64)
+               if err != nil {
+                       log.Log(log.ShimConfig).Error("Unable to parse 
configmap entry", zap.String("key", name), zap.String("value", newValue), 
zap.Error(err))
+                       cp.errors = append(cp.errors, err)
+                       return
+               }
+               *p = int64Value
+       }
+}
+
 func updateKubeLogger() {
        // if log level is debug, enable klog and set its log level verbosity 
to 4 (represents debug level),
        // For details refer to the Logging Conventions of klog at
diff --git a/pkg/conf/schedulerconf_test.go b/pkg/conf/schedulerconf_test.go
index ef16dff3..cf56c898 100644
--- a/pkg/conf/schedulerconf_test.go
+++ b/pkg/conf/schedulerconf_test.go
@@ -100,6 +100,23 @@ func TestDecompressUnknownKey(t *testing.T) {
        assert.Assert(t, len(decodedConfigString) == 0, "expected 
decodedConfigString to be nil")
 }
 
+func TestPlaceholderConfigParsing(t *testing.T) {
+       err := UpdateConfigMaps([]*v1.ConfigMap{
+               {Data: map[string]string{
+                       CMSvcPlaceholderImage:      "new-image",
+                       CMSvcPlaceholderRunAsUser:  "1001",
+                       CMSvcPlaceholderRunAsGroup: "1002",
+                       CMSvcPlaceholderFSGroup:    "1003",
+               }},
+       }, true)
+       assert.NilError(t, err, "UpdateConfigMap failed")
+       conf := GetSchedulerConf()
+       assert.Equal(t, conf.PlaceHolderConfig.Image, "new-image")
+       assert.Equal(t, conf.PlaceHolderConfig.RunAsUser, int64(1001))
+       assert.Equal(t, conf.PlaceHolderConfig.RunAsGroup, int64(1002))
+       assert.Equal(t, conf.PlaceHolderConfig.FSGroup, int64(1003))
+}
+
 func TestDecompressBadCompression(t *testing.T) {
        encodedConfigString := make([]byte, 
base64.StdEncoding.EncodedLen(len([]byte(configs.DefaultSchedulerConfig))))
        base64.StdEncoding.Encode(encodedConfigString, 
[]byte(configs.DefaultSchedulerConfig))
@@ -122,7 +139,10 @@ func TestParseConfigMap(t *testing.T) {
                {CMSvcDispatchTimeout, "DispatchTimeout", 3 * time.Minute},
                {CMSvcDisableGangScheduling, "DisableGangScheduling", true},
                {CMSvcEnableConfigHotRefresh, "EnableConfigHotRefresh", false},
-               {CMSvcPlaceholderImage, "PlaceHolderImage", "test-image"},
+               {CMSvcPlaceholderImage, "PlaceHolderConfig.Image", 
"test-image"},
+               {CMSvcPlaceholderRunAsUser, "PlaceHolderConfig.RunAsUser", 
int64(1001)},
+               {CMSvcPlaceholderRunAsGroup, "PlaceHolderConfig.RunAsGroup", 
int64(1002)},
+               {CMSvcPlaceholderFSGroup, "PlaceHolderConfig.FSGroup", 
int64(1003)},
                {CMSvcNodeInstanceTypeNodeLabelKey, "InstanceTypeNodeLabelKey", 
"node.kubernetes.io/instance-type"},
                {CMKubeQPS, "KubeQPS", 2345},
                {CMKubeBurst, "KubeBurst", 3456},
@@ -153,8 +173,11 @@ func TestUpdateConfigMapNonReloadable(t *testing.T) {
                {CMSvcEventChannelCapacity, "EventChannelCapacity", 1234, 
false},
                {CMSvcDispatchTimeout, "DispatchTimeout", 3 * time.Minute, 
false},
                {CMSvcDisableGangScheduling, "DisableGangScheduling", true, 
false},
-               {CMSvcPlaceholderImage, "PlaceHolderImage", "test-image", 
false},
                {CMSvcNodeInstanceTypeNodeLabelKey, "InstanceTypeNodeLabelKey", 
"node.kubernetes.io/instance-type", false},
+               {CMSvcPlaceholderImage, "PlaceHolderConfig.Image", 
"test-image", false},
+               {CMSvcPlaceholderRunAsUser, "PlaceHolderConfig.RunAsUser", 
int64(1001), false},
+               {CMSvcPlaceholderRunAsGroup, "PlaceHolderConfig.RunAsGroup", 
int64(1002), false},
+               {CMSvcPlaceholderFSGroup, "PlaceHolderConfig.FSGroup", 
int64(1003), false},
                {CMKubeQPS, "KubeQPS", 2345, false},
                {CMKubeBurst, "KubeBurst", 3456, false},
        }
@@ -213,9 +236,32 @@ func TestParseConfigMapWithInvalidDuration(t *testing.T) {
        assert.ErrorContains(t, errs[0], "invalid duration", "wrong error type")
 }
 
+func TestParseConfigMapWithInvalidInt64(t *testing.T) {
+       prev := CreateDefaultConfig()
+       conf, errs := parseConfig(map[string]string{CMSvcPlaceholderRunAsUser: 
"x"}, prev)
+       assert.Assert(t, conf == nil, "Conf parsing failed")
+       assert.Equal(t, 1, len(errs), "1 Error for parsing invalid runAsUser")
+       assert.ErrorContains(t, errs[0], "invalid syntax", "wrong error type")
+}
+
 // get a configuration value by field name
 func getConfValue(t *testing.T, conf *SchedulerConf, name string) interface{} {
-       val := reflect.ValueOf(conf).Elem().FieldByName(name)
-       assert.Assert(t, val.IsValid(), "Field not valid: "+name)
+       // Split by "." to handle nested fields
+       parts := strings.Split(name, ".")
+       val := reflect.ValueOf(conf).Elem()
+
+       for _, part := range parts {
+               val = val.FieldByName(part)
+               assert.Assert(t, val.IsValid(), fmt.Sprintf("Field not valid: 
%s", name))
+
+               // If it's a pointer, dereference it
+               if val.Kind() == reflect.Ptr {
+                       if val.IsNil() {
+                               return nil
+                       }
+                       val = val.Elem()
+               }
+       }
+
        return val.Interface()
 }
diff --git a/pkg/log/logger.go b/pkg/log/logger.go
index f3ee845c..1ac6291d 100644
--- a/pkg/log/logger.go
+++ b/pkg/log/logger.go
@@ -53,32 +53,33 @@ const (
 
 // Defined loggers: when adding new loggers, ids must be sequential, and all 
must be added to the loggers slice in the same order
 var (
-       Shim                 = &LoggerHandle{id: 0, name: "shim"}
-       Kubernetes           = &LoggerHandle{id: 1, name: "kubernetes"}
-       Test                 = &LoggerHandle{id: 2, name: "test"}
-       Admission            = &LoggerHandle{id: 3, name: "admission"}
-       AdmissionClient      = &LoggerHandle{id: 4, name: "admission.client"}
-       AdmissionConf        = &LoggerHandle{id: 5, name: "admission.conf"}
-       AdmissionWebhook     = &LoggerHandle{id: 6, name: "admission.webhook"}
-       AdmissionUtils       = &LoggerHandle{id: 7, name: "admission.utils"}
-       ShimContext          = &LoggerHandle{id: 8, name: "shim.context"}
-       ShimFSM              = &LoggerHandle{id: 9, name: "shim.fsm"}
-       ShimCacheApplication = &LoggerHandle{id: 10, name: 
"shim.cache.application"}
-       ShimCacheAppMgmt     = &LoggerHandle{id: 11, name: "shim.cache.appmgmt"}
-       ShimCacheNode        = &LoggerHandle{id: 12, name: "shim.cache.node"}
-       ShimCacheTask        = &LoggerHandle{id: 13, name: "shim.cache.task"}
-       ShimCacheExternal    = &LoggerHandle{id: 14, name: 
"shim.cache.external"}
-       ShimCachePlaceholder = &LoggerHandle{id: 15, name: 
"shim.cache.placeholder"}
-       ShimRMCallback       = &LoggerHandle{id: 16, name: "shim.rmcallback"}
-       ShimClient           = &LoggerHandle{id: 17, name: "shim.client"}
-       ShimResources        = &LoggerHandle{id: 18, name: "shim.resources"}
-       ShimUtils            = &LoggerHandle{id: 19, name: "shim.utils"}
-       ShimConfig           = &LoggerHandle{id: 20, name: "shim.config"}
-       ShimDispatcher       = &LoggerHandle{id: 21, name: "shim.dispatcher"}
-       ShimScheduler        = &LoggerHandle{id: 22, name: "shim.scheduler"}
-       ShimSchedulerPlugin  = &LoggerHandle{id: 23, name: 
"shim.scheduler.plugin"}
-       ShimPredicates       = &LoggerHandle{id: 24, name: "shim.predicates"}
-       ShimFramework        = &LoggerHandle{id: 25, name: "shim.framework"}
+       Shim                  = &LoggerHandle{id: 0, name: "shim"}
+       Kubernetes            = &LoggerHandle{id: 1, name: "kubernetes"}
+       Test                  = &LoggerHandle{id: 2, name: "test"}
+       Admission             = &LoggerHandle{id: 3, name: "admission"}
+       AdmissionClient       = &LoggerHandle{id: 4, name: "admission.client"}
+       AdmissionConf         = &LoggerHandle{id: 5, name: "admission.conf"}
+       AdmissionWebhook      = &LoggerHandle{id: 6, name: "admission.webhook"}
+       AdmissionUtils        = &LoggerHandle{id: 7, name: "admission.utils"}
+       ShimContext           = &LoggerHandle{id: 8, name: "shim.context"}
+       ShimFSM               = &LoggerHandle{id: 9, name: "shim.fsm"}
+       ShimCacheApplication  = &LoggerHandle{id: 10, name: 
"shim.cache.application"}
+       ShimCacheAppMgmt      = &LoggerHandle{id: 11, name: 
"shim.cache.appmgmt"}
+       ShimCacheNode         = &LoggerHandle{id: 12, name: "shim.cache.node"}
+       ShimCacheTask         = &LoggerHandle{id: 13, name: "shim.cache.task"}
+       ShimCacheExternal     = &LoggerHandle{id: 14, name: 
"shim.cache.external"}
+       ShimCachePlaceholder  = &LoggerHandle{id: 15, name: 
"shim.cache.placeholder"}
+       ShimRMCallback        = &LoggerHandle{id: 16, name: "shim.rmcallback"}
+       ShimClient            = &LoggerHandle{id: 17, name: "shim.client"}
+       ShimResources         = &LoggerHandle{id: 18, name: "shim.resources"}
+       ShimUtils             = &LoggerHandle{id: 19, name: "shim.utils"}
+       ShimConfig            = &LoggerHandle{id: 20, name: "shim.config"}
+       ShimDispatcher        = &LoggerHandle{id: 21, name: "shim.dispatcher"}
+       ShimScheduler         = &LoggerHandle{id: 22, name: "shim.scheduler"}
+       ShimSchedulerPlugin   = &LoggerHandle{id: 23, name: 
"shim.scheduler.plugin"}
+       ShimPredicates        = &LoggerHandle{id: 24, name: "shim.predicates"}
+       ShimFramework         = &LoggerHandle{id: 25, name: "shim.framework"}
+       ShimPlaceHolderConfig = &LoggerHandle{id: 26, name: 
"shim.placeholder.config"}
 )
 
 // this tracks all the known logger handles, used to preallocate the real 
logger instances when configuration changes
@@ -87,7 +88,7 @@ var loggers = []*LoggerHandle{
        Admission, AdmissionClient, AdmissionConf, AdmissionWebhook, 
AdmissionUtils, ShimContext, ShimFSM,
        ShimCacheApplication, ShimCacheAppMgmt, ShimCacheNode, ShimCacheTask, 
ShimCacheExternal, ShimCachePlaceholder,
        ShimRMCallback, ShimClient, ShimResources, ShimUtils, ShimConfig, 
ShimDispatcher,
-       ShimScheduler, ShimSchedulerPlugin, ShimPredicates, ShimFramework,
+       ShimScheduler, ShimSchedulerPlugin, ShimPredicates, ShimFramework, 
ShimPlaceHolderConfig,
 }
 
 // structure to hold all current logger configuration state
diff --git a/pkg/log/logger_test.go b/pkg/log/logger_test.go
index 01a50828..e373d7ca 100644
--- a/pkg/log/logger_test.go
+++ b/pkg/log/logger_test.go
@@ -38,7 +38,7 @@ func TestLoggerIds(t *testing.T) {
        _ = Log(Test)
 
        // validate logger count
-       assert.Equal(t, 26, len(loggers), "wrong logger count")
+       assert.Equal(t, 27, len(loggers), "wrong logger count")
 
        // validate that all loggers are populated and have sequential ids
        for i := 0; i < len(loggers); i++ {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to