hubcio commented on code in PR #3019:
URL: https://github.com/apache/iggy/pull/3019#discussion_r2987288336


##########
helm/charts/iggy/templates/hpa.yaml:
##########
@@ -38,24 +38,24 @@ spec:
     - type: Resource
       resource:
         name: cpu
-        {{- if .Capabilities.APIVersions.Has "autoscaling/v2" }}
+        {{ if semverCompare ">=1.23-0" .Capabilities.KubeVersion.GitVersion }}

Review Comment:
   switching from `{{-` to `{{` on the if/else/end blocks here introduces blank 
lines in the rendered output:
   
   ```yaml
   metrics:
     - type: Resource
       resource:
         name: cpu
         
         target:
           type: Utilization
           averageUtilization: 80
         
   ```
   
   the blank lines between `name: cpu` and `target:` (and same for memory 
below) are because the template directives no longer trim whitespace. valid 
yaml, kubernetes accepts it, but it's sloppy. should use `{{-` trim markers 
like the original had. same issue at lines 45, 47, 53, 57, 59.



##########
scripts/ci/test_helm.sh:
##########
@@ -0,0 +1,418 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+set -euo pipefail
+
+usage() {
+  cat <<'EOF'
+Usage: scripts/ci/test_helm.sh 
<validate|smoke|cleanup-smoke|collect-smoke-diagnostics>
+
+Commands:
+  validate                  Run Helm lint and render validation scenarios.
+  smoke                     Run the Helm runtime smoke scenario against the 
current Kubernetes context.
+  cleanup-smoke             Remove the Helm smoke release namespace and any 
failed-install leftovers.
+  collect-smoke-diagnostics Collect diagnostics for the Helm smoke namespace.
+
+Notes:
+  - validate requires helm.
+  - smoke requires helm, kubectl, curl, and python3, plus an existing cluster 
and ingress controller.
+  - cleanup-smoke requires kubectl and optionally helm.
+  - collect-smoke-diagnostics is best-effort and does not fail on missing 
resources.
+EOF
+}
+
+REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
+cd "$REPO_ROOT"
+
+CHART_DIR="${CHART_DIR:-helm/charts/iggy}"
+HELM_RENDER_DIR="${HELM_RENDER_DIR:-/tmp/helm-render}"
+HELM_SMOKE_NAMESPACE="${HELM_SMOKE_NAMESPACE:-iggy-smoke}"
+HELM_SMOKE_RELEASE="${HELM_SMOKE_RELEASE:-iggy-smoke}"
+HELM_SMOKE_REPORT_DIR="${HELM_SMOKE_REPORT_DIR:-reports/helm-smoke}"
+HELM_SMOKE_SERVER_HOST="${HELM_SMOKE_SERVER_HOST:-server.iggy.local}"
+HELM_SMOKE_UI_HOST="${HELM_SMOKE_UI_HOST:-ui.iggy.local}"
+HELM_SMOKE_TIMEOUT="${HELM_SMOKE_TIMEOUT:-5m}"
+HELM_SMOKE_INGRESS_CLASS="${HELM_SMOKE_INGRESS_CLASS:-nginx}"
+HELM_SMOKE_KIND_NAME="${HELM_SMOKE_KIND_NAME:-iggy-helm-smoke}"
+HELM_SMOKE_SERVER_CPU_ALLOCATION="${HELM_SMOKE_SERVER_CPU_ALLOCATION:-1}"
+
+require_command() {
+  if ! command -v "$1" >/dev/null 2>&1; then
+    echo "Error: required command '$1' not found" >&2
+    exit 1
+  fi
+}
+
+extract_chart_field() {
+  local field="$1"
+  local value
+
+  value="$(
+    awk -v field="$field" '
+      $1 == field ":" {
+        gsub(/"/, "", $2)
+        print $2
+        exit
+      }
+    ' "$CHART_DIR/Chart.yaml"
+  )"
+
+  if [ -z "$value" ]; then
+    echo "Error: could not extract '$field' from $CHART_DIR/Chart.yaml" >&2
+    exit 1
+  fi
+
+  printf '%s\n' "$value"
+}
+
+extract_values_tag() {
+  local section="$1"
+  local value
+
+  value="$(
+    awk -v section="$section" '
+      $1 == section ":" {
+        in_section = 1
+        next
+      }
+      in_section && /^[^[:space:]]/ {
+        in_section = 0
+      }
+      in_section && $1 == "tag:" {
+        gsub(/"/, "", $2)
+        print $2
+        exit
+      }
+    ' "$CHART_DIR/values.yaml"
+  )"
+
+  if [ -z "$value" ]; then
+    echo "Error: could not extract '$section.image.tag' from 
$CHART_DIR/values.yaml" >&2
+    exit 1
+  fi
+
+  printf '%s\n' "$value"
+}
+
+prepare_render_dir() {
+  if [ -z "$HELM_RENDER_DIR" ] || [ "$HELM_RENDER_DIR" = "/" ]; then
+    echo "Error: HELM_RENDER_DIR must not be empty or /" >&2
+    exit 1
+  fi
+
+  rm -rf "$HELM_RENDER_DIR"
+  mkdir -p "$HELM_RENDER_DIR"
+}
+
+extract_kind_names() {
+  local file="$1"
+  local kind="$2"
+
+  awk -v kind="$kind" '
+    /^kind: / {
+      current_kind = $2
+      in_metadata = 0
+      next
+    }
+    /^metadata:$/ {
+      in_metadata = 1
+      next
+    }
+    in_metadata && /^  name: / {
+      if (current_kind == kind) {
+        print $2
+      }
+      in_metadata = 0
+    }
+  ' "$file"
+}
+
+validate() {
+  require_command helm
+
+  local chart_version
+  local chart_app_version
+  local server_image_tag
+  local ui_image_tag
+
+  chart_version="$(extract_chart_field version)"
+  chart_app_version="$(extract_chart_field appVersion)"
+  server_image_tag="$(extract_values_tag server)"
+  ui_image_tag="$(extract_values_tag ui)"
+
+  prepare_render_dir
+
+  helm lint --strict "$CHART_DIR"
+
+  helm template iggy "$CHART_DIR" > "$HELM_RENDER_DIR/default.yaml"
+  test "$(grep -c '^kind: Deployment$' "$HELM_RENDER_DIR/default.yaml")" -eq 2
+  test "$(grep -c '^kind: Service$' "$HELM_RENDER_DIR/default.yaml")" -eq 2
+  test "$(grep -c '^kind: ServiceAccount$' "$HELM_RENDER_DIR/default.yaml")" 
-eq 1
+  test "$(grep -c '^kind: Secret$' "$HELM_RENDER_DIR/default.yaml")" -eq 1
+  grep -q "helm.sh/chart: iggy-${chart_version}" 
"$HELM_RENDER_DIR/default.yaml"
+  grep -q "helm.sh/chart: iggy-ui-${chart_version}" 
"$HELM_RENDER_DIR/default.yaml"
+  grep -q "app.kubernetes.io/version: \"${chart_app_version}\"" 
"$HELM_RENDER_DIR/default.yaml"
+  grep -q "image: \"apache/iggy:${server_image_tag}\"" 
"$HELM_RENDER_DIR/default.yaml"
+  grep -q "image: \"apache/iggy-web-ui:${ui_image_tag}\"" 
"$HELM_RENDER_DIR/default.yaml"
+
+  helm template iggy "$CHART_DIR" \
+    --set server.persistence.enabled=true \
+    --set autoscaling.enabled=true \
+    --set autoscaling.targetCPUUtilizationPercentage=80 \
+    --set server.ingress.enabled=true \
+    --set ui.ingress.enabled=true \
+    --set server.serviceMonitor.enabled=true \
+    > "$HELM_RENDER_DIR/all-features.yaml"
+  grep -q '^kind: PersistentVolumeClaim$' "$HELM_RENDER_DIR/all-features.yaml"
+  grep -q '^kind: HorizontalPodAutoscaler$' 
"$HELM_RENDER_DIR/all-features.yaml"
+  test "$(grep -c '^kind: Ingress$' "$HELM_RENDER_DIR/all-features.yaml")" -eq 
2
+  test "$(extract_kind_names "$HELM_RENDER_DIR/all-features.yaml" Ingress | 
sort -u | wc -l | tr -d ' ')" -eq 2
+  extract_kind_names "$HELM_RENDER_DIR/all-features.yaml" Ingress | grep -qx 
'iggy'
+  extract_kind_names "$HELM_RENDER_DIR/all-features.yaml" Ingress | grep -qx 
'iggy-ui'
+  grep -q '^kind: ServiceMonitor$' "$HELM_RENDER_DIR/all-features.yaml"
+
+  helm template iggy "$CHART_DIR" \
+    --kube-version 1.18.0 \
+    --api-versions networking.k8s.io/v1beta1 \
+    --api-versions autoscaling/v2beta2 \
+    --set autoscaling.enabled=true \
+    --set autoscaling.targetCPUUtilizationPercentage=80 \
+    --set server.ingress.enabled=true \
+    --set ui.ingress.enabled=true \
+    > "$HELM_RENDER_DIR/legacy-k8s-1.18.yaml"
+  test "$(grep -c '^apiVersion: networking.k8s.io/v1beta1$' 
"$HELM_RENDER_DIR/legacy-k8s-1.18.yaml")" -eq 2
+  grep -q '^apiVersion: autoscaling/v2beta2$' 
"$HELM_RENDER_DIR/legacy-k8s-1.18.yaml"
+
+  helm template iggy "$CHART_DIR" --set ui.enabled=false > 
"$HELM_RENDER_DIR/server-only.yaml"
+  test "$(grep -c '^kind: Deployment$' "$HELM_RENDER_DIR/server-only.yaml")" 
-eq 1
+  test "$(grep -c '^kind: Service$' "$HELM_RENDER_DIR/server-only.yaml")" -eq 1
+
+  helm template iggy "$CHART_DIR" --set server.enabled=false > 
"$HELM_RENDER_DIR/ui-only.yaml"
+  test "$(grep -c '^kind: Deployment$' "$HELM_RENDER_DIR/ui-only.yaml")" -eq 1
+  test "$(grep -c '^kind: Service$' "$HELM_RENDER_DIR/ui-only.yaml")" -eq 1
+
+  helm template iggy "$CHART_DIR" \
+    --set server.users.root.createSecret=false \
+    --set server.users.root.existingSecret.name=supersecret \
+    > "$HELM_RENDER_DIR/existing-secret.yaml"
+  if grep -q 'root-credentials' "$HELM_RENDER_DIR/existing-secret.yaml"; then
+    echo "Error: existing-secret render should not include generated root 
credentials" >&2
+    exit 1
+  fi
+  grep -q 'name: supersecret' "$HELM_RENDER_DIR/existing-secret.yaml"
+}
+
+smoke() {
+  require_command helm
+  require_command kubectl
+  require_command curl
+  require_command python3
+
+  local ui_image_tag
+  local server_ping_status
+  local ui_healthz_status
+  local leftover_resources
+  local helm_status
+  local smoke_values_file
+
+  mkdir -p "$HELM_SMOKE_REPORT_DIR"
+
+  if ! helm status "$HELM_SMOKE_RELEASE" -n "$HELM_SMOKE_NAMESPACE" >/dev/null 
2>&1; then
+    leftover_resources="$(
+      kubectl -n "$HELM_SMOKE_NAMESPACE" get 
deployment,service,ingress,secret,serviceaccount \
+        -l "app.kubernetes.io/instance=${HELM_SMOKE_RELEASE}" \
+        -o name 2>/dev/null || true
+    )"
+    if [ -n "$leftover_resources" ]; then
+      cat >&2 <<EOF
+Found leftover resources for a failed Helm smoke install in namespace 
'${HELM_SMOKE_NAMESPACE}'.
+Run 'scripts/ci/test_helm.sh cleanup-smoke' once, then rerun the smoke test.
+EOF

Review Comment:
   since this script is meant to be run locally by devs (per the README 
instructions), the `smoke` command should either auto-cleanup on success or the 
script should accept a `--cleanup` flag. right now a dev who runs `test_helm.sh 
smoke` locally is left with a dangling kind cluster + namespace + helm release 
that they have to know to clean up via `cleanup-smoke`. in CI this doesn't 
matter (ephemeral runners), but locally it leaks resources.
   
   could be as simple as calling `cleanup_smoke` at the end of a successful 
`smoke()` run, or adding a trap.



##########
scripts/ci/test_helm.sh:
##########
@@ -0,0 +1,418 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+set -euo pipefail
+
+usage() {
+  cat <<'EOF'
+Usage: scripts/ci/test_helm.sh 
<validate|smoke|cleanup-smoke|collect-smoke-diagnostics>
+
+Commands:
+  validate                  Run Helm lint and render validation scenarios.
+  smoke                     Run the Helm runtime smoke scenario against the 
current Kubernetes context.
+  cleanup-smoke             Remove the Helm smoke release namespace and any 
failed-install leftovers.
+  collect-smoke-diagnostics Collect diagnostics for the Helm smoke namespace.
+
+Notes:
+  - validate requires helm.
+  - smoke requires helm, kubectl, curl, and python3, plus an existing cluster 
and ingress controller.
+  - cleanup-smoke requires kubectl and optionally helm.
+  - collect-smoke-diagnostics is best-effort and does not fail on missing 
resources.
+EOF
+}
+
+REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
+cd "$REPO_ROOT"
+
+CHART_DIR="${CHART_DIR:-helm/charts/iggy}"
+HELM_RENDER_DIR="${HELM_RENDER_DIR:-/tmp/helm-render}"
+HELM_SMOKE_NAMESPACE="${HELM_SMOKE_NAMESPACE:-iggy-smoke}"
+HELM_SMOKE_RELEASE="${HELM_SMOKE_RELEASE:-iggy-smoke}"
+HELM_SMOKE_REPORT_DIR="${HELM_SMOKE_REPORT_DIR:-reports/helm-smoke}"
+HELM_SMOKE_SERVER_HOST="${HELM_SMOKE_SERVER_HOST:-server.iggy.local}"
+HELM_SMOKE_UI_HOST="${HELM_SMOKE_UI_HOST:-ui.iggy.local}"
+HELM_SMOKE_TIMEOUT="${HELM_SMOKE_TIMEOUT:-5m}"
+HELM_SMOKE_INGRESS_CLASS="${HELM_SMOKE_INGRESS_CLASS:-nginx}"
+HELM_SMOKE_KIND_NAME="${HELM_SMOKE_KIND_NAME:-iggy-helm-smoke}"
+HELM_SMOKE_SERVER_CPU_ALLOCATION="${HELM_SMOKE_SERVER_CPU_ALLOCATION:-1}"
+
+require_command() {
+  if ! command -v "$1" >/dev/null 2>&1; then
+    echo "Error: required command '$1' not found" >&2
+    exit 1
+  fi
+}
+
+extract_chart_field() {
+  local field="$1"
+  local value
+
+  value="$(
+    awk -v field="$field" '
+      $1 == field ":" {
+        gsub(/"/, "", $2)
+        print $2
+        exit
+      }
+    ' "$CHART_DIR/Chart.yaml"
+  )"
+
+  if [ -z "$value" ]; then
+    echo "Error: could not extract '$field' from $CHART_DIR/Chart.yaml" >&2
+    exit 1
+  fi
+
+  printf '%s\n' "$value"
+}
+
+extract_values_tag() {
+  local section="$1"
+  local value
+
+  value="$(
+    awk -v section="$section" '
+      $1 == section ":" {
+        in_section = 1
+        next
+      }
+      in_section && /^[^[:space:]]/ {
+        in_section = 0
+      }
+      in_section && $1 == "tag:" {
+        gsub(/"/, "", $2)
+        print $2
+        exit
+      }
+    ' "$CHART_DIR/values.yaml"
+  )"
+
+  if [ -z "$value" ]; then
+    echo "Error: could not extract '$section.image.tag' from 
$CHART_DIR/values.yaml" >&2
+    exit 1
+  fi
+
+  printf '%s\n' "$value"
+}
+
+prepare_render_dir() {
+  if [ -z "$HELM_RENDER_DIR" ] || [ "$HELM_RENDER_DIR" = "/" ]; then
+    echo "Error: HELM_RENDER_DIR must not be empty or /" >&2
+    exit 1
+  fi
+
+  rm -rf "$HELM_RENDER_DIR"
+  mkdir -p "$HELM_RENDER_DIR"
+}
+
+extract_kind_names() {
+  local file="$1"
+  local kind="$2"
+
+  awk -v kind="$kind" '
+    /^kind: / {
+      current_kind = $2
+      in_metadata = 0
+      next
+    }
+    /^metadata:$/ {
+      in_metadata = 1
+      next
+    }
+    in_metadata && /^  name: / {
+      if (current_kind == kind) {
+        print $2
+      }
+      in_metadata = 0
+    }
+  ' "$file"
+}
+
+validate() {
+  require_command helm
+
+  local chart_version
+  local chart_app_version
+  local server_image_tag
+  local ui_image_tag
+
+  chart_version="$(extract_chart_field version)"
+  chart_app_version="$(extract_chart_field appVersion)"
+  server_image_tag="$(extract_values_tag server)"
+  ui_image_tag="$(extract_values_tag ui)"
+
+  prepare_render_dir
+
+  helm lint --strict "$CHART_DIR"
+
+  helm template iggy "$CHART_DIR" > "$HELM_RENDER_DIR/default.yaml"
+  test "$(grep -c '^kind: Deployment$' "$HELM_RENDER_DIR/default.yaml")" -eq 2
+  test "$(grep -c '^kind: Service$' "$HELM_RENDER_DIR/default.yaml")" -eq 2
+  test "$(grep -c '^kind: ServiceAccount$' "$HELM_RENDER_DIR/default.yaml")" 
-eq 1
+  test "$(grep -c '^kind: Secret$' "$HELM_RENDER_DIR/default.yaml")" -eq 1
+  grep -q "helm.sh/chart: iggy-${chart_version}" 
"$HELM_RENDER_DIR/default.yaml"
+  grep -q "helm.sh/chart: iggy-ui-${chart_version}" 
"$HELM_RENDER_DIR/default.yaml"
+  grep -q "app.kubernetes.io/version: \"${chart_app_version}\"" 
"$HELM_RENDER_DIR/default.yaml"
+  grep -q "image: \"apache/iggy:${server_image_tag}\"" 
"$HELM_RENDER_DIR/default.yaml"
+  grep -q "image: \"apache/iggy-web-ui:${ui_image_tag}\"" 
"$HELM_RENDER_DIR/default.yaml"
+
+  helm template iggy "$CHART_DIR" \
+    --set server.persistence.enabled=true \
+    --set autoscaling.enabled=true \
+    --set autoscaling.targetCPUUtilizationPercentage=80 \
+    --set server.ingress.enabled=true \
+    --set ui.ingress.enabled=true \
+    --set server.serviceMonitor.enabled=true \
+    > "$HELM_RENDER_DIR/all-features.yaml"
+  grep -q '^kind: PersistentVolumeClaim$' "$HELM_RENDER_DIR/all-features.yaml"
+  grep -q '^kind: HorizontalPodAutoscaler$' 
"$HELM_RENDER_DIR/all-features.yaml"
+  test "$(grep -c '^kind: Ingress$' "$HELM_RENDER_DIR/all-features.yaml")" -eq 
2
+  test "$(extract_kind_names "$HELM_RENDER_DIR/all-features.yaml" Ingress | 
sort -u | wc -l | tr -d ' ')" -eq 2
+  extract_kind_names "$HELM_RENDER_DIR/all-features.yaml" Ingress | grep -qx 
'iggy'
+  extract_kind_names "$HELM_RENDER_DIR/all-features.yaml" Ingress | grep -qx 
'iggy-ui'
+  grep -q '^kind: ServiceMonitor$' "$HELM_RENDER_DIR/all-features.yaml"
+
+  helm template iggy "$CHART_DIR" \
+    --kube-version 1.18.0 \
+    --api-versions networking.k8s.io/v1beta1 \
+    --api-versions autoscaling/v2beta2 \
+    --set autoscaling.enabled=true \
+    --set autoscaling.targetCPUUtilizationPercentage=80 \
+    --set server.ingress.enabled=true \
+    --set ui.ingress.enabled=true \
+    > "$HELM_RENDER_DIR/legacy-k8s-1.18.yaml"
+  test "$(grep -c '^apiVersion: networking.k8s.io/v1beta1$' 
"$HELM_RENDER_DIR/legacy-k8s-1.18.yaml")" -eq 2
+  grep -q '^apiVersion: autoscaling/v2beta2$' 
"$HELM_RENDER_DIR/legacy-k8s-1.18.yaml"
+
+  helm template iggy "$CHART_DIR" --set ui.enabled=false > 
"$HELM_RENDER_DIR/server-only.yaml"
+  test "$(grep -c '^kind: Deployment$' "$HELM_RENDER_DIR/server-only.yaml")" 
-eq 1
+  test "$(grep -c '^kind: Service$' "$HELM_RENDER_DIR/server-only.yaml")" -eq 1
+
+  helm template iggy "$CHART_DIR" --set server.enabled=false > 
"$HELM_RENDER_DIR/ui-only.yaml"
+  test "$(grep -c '^kind: Deployment$' "$HELM_RENDER_DIR/ui-only.yaml")" -eq 1
+  test "$(grep -c '^kind: Service$' "$HELM_RENDER_DIR/ui-only.yaml")" -eq 1
+
+  helm template iggy "$CHART_DIR" \
+    --set server.users.root.createSecret=false \
+    --set server.users.root.existingSecret.name=supersecret \
+    > "$HELM_RENDER_DIR/existing-secret.yaml"
+  if grep -q 'root-credentials' "$HELM_RENDER_DIR/existing-secret.yaml"; then
+    echo "Error: existing-secret render should not include generated root 
credentials" >&2
+    exit 1
+  fi
+  grep -q 'name: supersecret' "$HELM_RENDER_DIR/existing-secret.yaml"
+}
+
+smoke() {
+  require_command helm
+  require_command kubectl
+  require_command curl
+  require_command python3
+
+  local ui_image_tag
+  local server_ping_status
+  local ui_healthz_status
+  local leftover_resources
+  local helm_status
+  local smoke_values_file
+
+  mkdir -p "$HELM_SMOKE_REPORT_DIR"
+
+  if ! helm status "$HELM_SMOKE_RELEASE" -n "$HELM_SMOKE_NAMESPACE" >/dev/null 
2>&1; then
+    leftover_resources="$(
+      kubectl -n "$HELM_SMOKE_NAMESPACE" get 
deployment,service,ingress,secret,serviceaccount \
+        -l "app.kubernetes.io/instance=${HELM_SMOKE_RELEASE}" \
+        -o name 2>/dev/null || true
+    )"
+    if [ -n "$leftover_resources" ]; then
+      cat >&2 <<EOF
+Found leftover resources for a failed Helm smoke install in namespace 
'${HELM_SMOKE_NAMESPACE}'.
+Run 'scripts/ci/test_helm.sh cleanup-smoke' once, then rerun the smoke test.
+EOF
+      printf '%s\n' "$leftover_resources" >&2
+      exit 1
+    fi
+  fi
+
+  ui_image_tag="$(
+    python3 - <<'PY'
+import json
+
+with open("web/package.json", "r", encoding="utf-8") as f:
+    print(json.load(f)["version"])
+PY
+  )"
+
+  echo "$ui_image_tag" > "$HELM_SMOKE_REPORT_DIR/ui-image-tag.txt"
+
+  smoke_values_file="$(mktemp 
"${TMPDIR:-/tmp}/iggy-helm-smoke-values.XXXXXX.yaml")"
+
+  cat > "$smoke_values_file" <<EOF
+server:
+  ingress:
+    enabled: true
+    className: ${HELM_SMOKE_INGRESS_CLASS}
+    hosts:
+      - host: ${HELM_SMOKE_SERVER_HOST}
+        paths:
+          - path: /
+            pathType: Prefix
+  env:
+    - name: RUST_LOG
+      value: info
+    - name: IGGY_HTTP_ADDRESS

Review Comment:
   `smoke()` reads `web/package.json` via python3 to get the UI image tag, but 
`validate()` already has `extract_values_tag` that pulls tags from 
`values.yaml`. using two different sources of truth for the same value is 
fragile - if someone updates `values.yaml` but not `web/package.json` (or vice 
versa), the smoke test will use a different tag than what validate checks. 
consider reusing `extract_values_tag ui` here instead, or at minimum document 
why the two intentionally differ.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to