This is an automated email from the ASF dual-hosted git repository.
elizabeth pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/superset.git
The following commit(s) were added to refs/heads/master by this push:
new a9dca529c17 fix(mcp): treat runtime validation warnings as
informational, not errors (#37214)
a9dca529c17 is described below
commit a9dca529c174bcc475403c4959373ba2ec4e118a
Author: Amin Ghadersohi <[email protected]>
AuthorDate: Thu Feb 5 10:23:51 2026 -0700
fix(mcp): treat runtime validation warnings as informational, not errors
(#37214)
Co-authored-by: Claude Opus 4.5 <[email protected]>
---
superset/mcp_service/chart/tool/generate_chart.py | 33 ++++++++---
superset/mcp_service/chart/validation/pipeline.py | 66 ++++++++++++++++++----
.../chart/validation/runtime/__init__.py | 29 ++++++----
.../chart/validation/test_runtime_validator.py | 52 +++++++++++++----
4 files changed, 140 insertions(+), 40 deletions(-)
diff --git a/superset/mcp_service/chart/tool/generate_chart.py
b/superset/mcp_service/chart/tool/generate_chart.py
index 7b7f3a17895..bb8157adf4d 100644
--- a/superset/mcp_service/chart/tool/generate_chart.py
+++ b/superset/mcp_service/chart/tool/generate_chart.py
@@ -123,6 +123,9 @@ async def generate_chart( # noqa: C901
"Chart configuration details: config=%s" %
(request.config.model_dump(),)
)
+ # Track runtime warnings to include in response
+ runtime_warnings: list[str] = []
+
try:
# Run comprehensive validation pipeline
await ctx.report_progress(1, 5, "Running validation pipeline")
@@ -131,22 +134,34 @@ async def generate_chart( # noqa: C901
)
from superset.mcp_service.chart.validation import ValidationPipeline
- is_valid, parsed_request, validation_error = (
- ValidationPipeline.validate_request(request.model_dump())
+ validation_result = ValidationPipeline.validate_request_with_warnings(
+ request.model_dump()
)
- if is_valid and parsed_request is not None:
+
+ if validation_result.is_valid and validation_result.request is not
None:
# Use the validated request going forward
- request = parsed_request
- if not is_valid:
+ request = validation_result.request
+
+ # Capture runtime warnings (informational, not blocking)
+ if validation_result.warnings:
+ runtime_warnings = validation_result.warnings.get("warnings", [])
+ if runtime_warnings:
+ await ctx.info(
+ "Runtime suggestions: %s" % (";
".join(runtime_warnings[:3]),)
+ )
+
+ if not validation_result.is_valid:
execution_time = int((time.time() - start_time) * 1000)
- assert validation_error is not None # Type narrowing for mypy
+ if validation_result.error is None:
+ raise RuntimeError("Validation failed but error object is
missing")
await ctx.error(
- "Chart validation failed: error=%s" %
(validation_error.model_dump(),)
+ "Chart validation failed: error=%s"
+ % (validation_result.error.model_dump(),)
)
return GenerateChartResponse.model_validate(
{
"chart": None,
- "error": validation_error.model_dump(),
+ "error": validation_result.error.model_dump(),
"performance": {
"query_duration_ms": execution_time,
"cache_status": "error",
@@ -471,6 +486,8 @@ async def generate_chart( # noqa: C901
else {},
"performance": performance.model_dump() if performance else None,
"accessibility": accessibility.model_dump() if accessibility else
None,
+ # Runtime warnings (informational suggestions, not errors)
+ "warnings": runtime_warnings,
"success": True,
"schema_version": "2.0",
"api_version": "v1",
diff --git a/superset/mcp_service/chart/validation/pipeline.py
b/superset/mcp_service/chart/validation/pipeline.py
index 8d595aa97ab..948f9d2e62d 100644
--- a/superset/mcp_service/chart/validation/pipeline.py
+++ b/superset/mcp_service/chart/validation/pipeline.py
@@ -101,12 +101,28 @@ def _sanitize_validation_error(error: Exception) -> str:
return error_str
+class ValidationResult:
+ """Result of validation pipeline including optional warnings."""
+
+ def __init__(
+ self,
+ is_valid: bool,
+ request: GenerateChartRequest | None = None,
+ error: ChartGenerationError | None = None,
+ warnings: Dict[str, Any] | None = None,
+ ):
+ self.is_valid = is_valid
+ self.request = request
+ self.error = error
+ self.warnings = warnings # Runtime warnings (informational, not
blocking)
+
+
class ValidationPipeline:
"""
Main validation orchestrator that runs validations in sequence:
1. Schema validation (structure and types)
2. Dataset validation (columns exist)
- 3. Runtime validation (performance, compatibility)
+ 3. Runtime validation (performance, compatibility) - returns warnings, not
errors
"""
@staticmethod
@@ -121,6 +137,24 @@ class ValidationPipeline:
Returns:
Tuple of (is_valid, parsed_request, error)
+
+ Note: Use validate_request_with_warnings() to also get runtime
warnings.
+ """
+ result =
ValidationPipeline.validate_request_with_warnings(request_data)
+ return result.is_valid, result.request, result.error
+
+ @staticmethod
+ def validate_request_with_warnings(
+ request_data: Dict[str, Any],
+ ) -> ValidationResult:
+ """
+ Validate a chart generation request and return warnings as metadata.
+
+ Args:
+ request_data: Raw request data dictionary
+
+ Returns:
+ ValidationResult with is_valid, request, error, and optional
warnings
"""
try:
# Layer 1: Schema validation
@@ -128,27 +162,28 @@ class ValidationPipeline:
is_valid, request, error =
SchemaValidator.validate_request(request_data)
if not is_valid:
- return False, None, error
+ return ValidationResult(is_valid=False, error=error)
# Ensure request is not None
if request is None:
- return False, None, error
+ return ValidationResult(is_valid=False, error=error)
# Layer 2: Dataset validation
is_valid, error = ValidationPipeline._validate_dataset(
request.config, request.dataset_id
)
if not is_valid:
- return False, request, error
+ return ValidationResult(is_valid=False, request=request,
error=error)
- # Layer 3: Runtime validation
- is_valid, error = ValidationPipeline._validate_runtime(
+ # Layer 3: Runtime validation - returns warnings as metadata, not
errors
+ _is_valid, warnings_metadata =
ValidationPipeline._validate_runtime(
request.config, request.dataset_id
)
- if not is_valid:
- return False, request, error
+ # Runtime validation always returns True now, warnings are
informational
- return True, request, None
+ return ValidationResult(
+ is_valid=True, request=request, warnings=warnings_metadata
+ )
except Exception as e:
logger.exception("Validation pipeline error")
@@ -164,7 +199,7 @@ class ValidationPipeline:
template_vars={"reason": sanitized_reason},
error_code="VALIDATION_PIPELINE_ERROR",
)
- return False, None, error
+ return ValidationResult(is_valid=False, error=error)
@staticmethod
def _validate_dataset(
@@ -189,8 +224,15 @@ class ValidationPipeline:
@staticmethod
def _validate_runtime(
config: ChartConfig, dataset_id: int | str
- ) -> Tuple[bool, ChartGenerationError | None]:
- """Validate runtime issues (performance, compatibility)."""
+ ) -> Tuple[bool, Dict[str, Any] | None]:
+ """
+ Validate runtime issues (performance, compatibility).
+
+ Returns:
+ Tuple of (is_valid, warnings_metadata)
+ - is_valid: Always True (runtime warnings don't block generation)
+ - warnings_metadata: Dict with warnings/suggestions, or None
+ """
try:
from .runtime import RuntimeValidator
diff --git a/superset/mcp_service/chart/validation/runtime/__init__.py
b/superset/mcp_service/chart/validation/runtime/__init__.py
index 659ada61de2..7228d73481b 100644
--- a/superset/mcp_service/chart/validation/runtime/__init__.py
+++ b/superset/mcp_service/chart/validation/runtime/__init__.py
@@ -21,13 +21,12 @@ Validates performance, compatibility, and user experience
issues.
"""
import logging
-from typing import List, Tuple
+from typing import Any, Dict, List, Tuple
from superset.mcp_service.chart.schemas import (
ChartConfig,
XYChartConfig,
)
-from superset.mcp_service.common.error_schemas import ChartGenerationError
logger = logging.getLogger(__name__)
@@ -38,16 +37,21 @@ class RuntimeValidator:
@staticmethod
def validate_runtime_issues(
config: ChartConfig, dataset_id: int | str
- ) -> Tuple[bool, ChartGenerationError | None]:
+ ) -> Tuple[bool, Dict[str, Any] | None]:
"""
Validate runtime issues that could affect chart rendering or
performance.
+ Warnings are returned as informational metadata, NOT as errors.
+ Chart generation proceeds regardless of warnings.
+
Args:
config: Chart configuration to validate
dataset_id: Dataset identifier
Returns:
- Tuple of (is_valid, error)
+ Tuple of (is_valid, warnings_metadata)
+ - is_valid: Always True (warnings don't block generation)
+ - warnings_metadata: Dict with warnings and suggestions, or None
"""
warnings: List[str] = []
suggestions: List[str] = []
@@ -75,16 +79,21 @@ class RuntimeValidator:
warnings.extend(type_warnings)
suggestions.extend(type_suggestions)
- # Semantic warnings are informational, not blocking errors.
- # Log them for debugging but allow chart generation to proceed.
+ # Return warnings as metadata, NOT as errors
+ # Warnings should inform, not block chart generation
if warnings:
logger.info(
- "Runtime semantic warnings for dataset %s: %s",
+ "Runtime validation warnings for dataset %s: %s",
dataset_id,
- "; ".join(warnings[:3]) + ("..." if len(warnings) > 3 else ""),
+ warnings[:3],
+ )
+ return (
+ True,
+ {
+ "warnings": warnings[:5], # Limit to 5 warnings
+ "suggestions": suggestions[:5], # Limit to 5 suggestions
+ },
)
- if suggestions:
- logger.info("Suggestions: %s", "; ".join(suggestions[:3]))
return True, None
diff --git
a/tests/unit_tests/mcp_service/chart/validation/test_runtime_validator.py
b/tests/unit_tests/mcp_service/chart/validation/test_runtime_validator.py
index ba8cc3fb9f9..c49677cb99f 100644
--- a/tests/unit_tests/mcp_service/chart/validation/test_runtime_validator.py
+++ b/tests/unit_tests/mcp_service/chart/validation/test_runtime_validator.py
@@ -67,11 +67,16 @@ class TestRuntimeValidatorNonBlocking:
"Currency format '$,.2f' may not display dates correctly"
]
- is_valid, error = RuntimeValidator.validate_runtime_issues(config,
1)
+ is_valid, warnings_metadata =
RuntimeValidator.validate_runtime_issues(
+ config, 1
+ )
# Should still return valid despite warnings
assert is_valid is True
- assert error is None
+ # Warnings are returned as metadata, not as blocking errors
+ assert warnings_metadata is not None
+ assert "warnings" in warnings_metadata
+ assert len(warnings_metadata["warnings"]) > 0
def
test_validate_runtime_issues_non_blocking_with_cardinality_warnings(self):
"""Test that cardinality warnings do NOT block chart generation."""
@@ -92,11 +97,16 @@ class TestRuntimeValidatorNonBlocking:
["Consider using aggregation or filtering"],
)
- is_valid, error = RuntimeValidator.validate_runtime_issues(config,
1)
+ is_valid, warnings_metadata =
RuntimeValidator.validate_runtime_issues(
+ config, 1
+ )
# Should still return valid despite high cardinality warning
assert is_valid is True
- assert error is None
+ # Warnings are returned as metadata, not as blocking errors
+ assert warnings_metadata is not None
+ assert "warnings" in warnings_metadata
+ assert len(warnings_metadata["warnings"]) > 0
def
test_validate_runtime_issues_non_blocking_with_chart_type_suggestions(self):
"""Test that chart type suggestions do NOT block chart generation."""
@@ -117,11 +127,16 @@ class TestRuntimeValidatorNonBlocking:
["Consider using bar chart for better visualization"],
)
- is_valid, error = RuntimeValidator.validate_runtime_issues(config,
1)
+ is_valid, warnings_metadata =
RuntimeValidator.validate_runtime_issues(
+ config, 1
+ )
# Should still return valid despite suggestion
assert is_valid is True
- assert error is None
+ # Warnings are returned as metadata, not as blocking errors
+ assert warnings_metadata is not None
+ assert "warnings" in warnings_metadata
+ assert len(warnings_metadata["warnings"]) > 0
def test_validate_runtime_issues_non_blocking_with_multiple_warnings(self):
"""Test that multiple warnings combined do NOT block chart
generation."""
@@ -158,11 +173,17 @@ class TestRuntimeValidatorNonBlocking:
["Chart type suggestion"],
)
- is_valid, error = RuntimeValidator.validate_runtime_issues(config,
1)
+ is_valid, warnings_metadata =
RuntimeValidator.validate_runtime_issues(
+ config, 1
+ )
# Should still return valid despite multiple warnings
assert is_valid is True
- assert error is None
+ # Warnings are returned as metadata, not as blocking errors
+ assert warnings_metadata is not None
+ assert "warnings" in warnings_metadata
+ # Multiple warnings from different validators should all be
collected
+ assert len(warnings_metadata["warnings"]) >= 3
def test_validate_runtime_issues_logs_warnings(self):
"""Test that warnings are logged for debugging purposes."""
@@ -184,12 +205,16 @@ class TestRuntimeValidatorNonBlocking:
):
mock_format.return_value = ["Test warning message"]
- is_valid, error = RuntimeValidator.validate_runtime_issues(config,
1)
+ is_valid, warnings_metadata =
RuntimeValidator.validate_runtime_issues(
+ config, 1
+ )
# Verify warnings were logged
assert mock_logger.info.called
assert is_valid is True
- assert error is None
+ # Warnings are returned as metadata
+ assert warnings_metadata is not None
+ assert "warnings" in warnings_metadata
def test_validate_table_chart_skips_xy_validations(self):
"""Test that table charts skip XY-specific validations."""
@@ -211,7 +236,14 @@ class TestRuntimeValidatorNonBlocking:
"superset.mcp_service.chart.validation.runtime.RuntimeValidator."
"_validate_cardinality"
) as mock_cardinality,
+ patch(
+
"superset.mcp_service.chart.validation.runtime.RuntimeValidator."
+ "_validate_chart_type"
+ ) as mock_chart_type,
):
+ # Mock chart type validator to return no warnings
+ mock_chart_type.return_value = ([], [])
+
is_valid, error = RuntimeValidator.validate_runtime_issues(config,
1)
# Format and cardinality validation should not be called for table
charts