Copilot commented on code in PR #3801:
URL: https://github.com/apache/solr/pull/3801#discussion_r2452990936
##########
changelog/README.md:
##########
@@ -0,0 +1,21 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ the "License"); you may not use this file except in compliance with
Review Comment:
Corrected spelling of 'the "License");' to '(the "License");' - missing
opening parenthesis.
```suggestion
(the "License"); you may not use this file except in compliance with
```
##########
dev-tools/scripts/changes2logchange.py:
##########
@@ -0,0 +1,930 @@
+#!/usr/bin/env python3
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Migration script to convert Apache Solr's legacy CHANGES.txt file format
+to the new logchange YAML-based format.
+
+This script parses the monolithic CHANGES.txt file and generates individual
+YAML files for each changelog entry, organized by version.
+"""
+
+import os
+import re
+import sys
+import json
+import yaml
+import html
+from pathlib import Path
+from dataclasses import dataclass, asdict, field
+from typing import List, Optional, Tuple
+
+
+class ChangeType:
+ """Mapping of CHANGES.txt section headings to logchange types."""
+
+ # Section headings that should be skipped entirely (no entries created)
+ SKIP_SECTIONS = {
+ "Versions of Major Components",
+ "Detailed Change List",
+ "Upgrading from Solr any prior release",
+ "Upgrading from previous Solr versions",
+ "System Requirements",
+ "Lucene Information",
+ "Status",
+ }
+
+ # Maps various section heading patterns to logchange types
+ HEADING_MAP = {
+ # New Features / Additions
+ "New Features": "added",
+ "Features": "added",
+ "New Functionality": "added",
+
+ # Improvements / Changes
+ "Improvements": "changed",
+ "Enhancements": "changed",
+ "Changes": "changed",
+ "Improvements / Changes": "changed",
+
+ # Performance / Optimizations
+ "Optimizations": "changed",
+ "Performance": "changed",
+ "Optimization": "changed",
+
+ # Bug Fixes
+ "Bug Fixes": "fixed",
+ "Bug Fix": "fixed",
+ "Bugs": "fixed",
+
+ # Deprecations
+ "Deprecations": "deprecated",
+ "Deprecation": "deprecated",
+ "Deprecation Notices": "deprecated",
+ "Deprecation Removals": "removed", # This is more about removals but
was in Deprecations section
+
+ # Removed / Removed Features
+ "Removed": "removed",
+ "Removal": "removed",
+ "Removed Features": "removed",
+ "Removals": "removed",
+
+ # Security
+ "Security": "security",
+ "Security Fixes": "security",
+
+ # Dependency Upgrades
+ "Dependency Upgrades": "dependency_update",
+ "Dependency Updates": "dependency_update",
+ "Dependency Upgrade": "dependency_update",
+ "Dependencies": "dependency_update",
+
+ # Build / Infrastructure
+ "Build": "other",
+ "Build Changes": "other",
+ "Build Fixes": "other",
+
+ # Upgrade Notes - special category
+ "Upgrade Notes": "upgrade_notes",
+
+ # Other
+ "Other Changes": "other",
+ "Other": "other",
+ "Miscellaneous": "other",
+ "Docker": "other",
+ "Ref Guide": "other",
+ "Documentation": "other",
+ }
+
+ @staticmethod
+ def get_type(heading: str) -> str:
+ """Map a section heading to a logchange type."""
+ heading_normalized = heading.strip()
+ if heading_normalized in ChangeType.HEADING_MAP:
+ return ChangeType.HEADING_MAP[heading_normalized]
+
+ # Fallback: try case-insensitive matching
+ for key, value in ChangeType.HEADING_MAP.items():
+ if key.lower() == heading_normalized.lower():
+ return value
+
+ # Default to "other" if no match found
+ print(f"Warning: Unknown section heading '{heading}', defaulting to
'other'", file=sys.stderr)
+ return "other"
+
+
+@dataclass
+class Author:
+ """Represents a changelog entry author/contributor."""
+ name: str
+ nick: Optional[str] = None
+ url: Optional[str] = None
+
+ def to_dict(self):
+ """Convert to dictionary, excluding None values."""
+ result = {"name": self.name}
+ if self.nick:
+ result["nick"] = self.nick
+ if self.url:
+ result["url"] = self.url
+ return result
+
+
+@dataclass
+class Link:
+ """Represents a link (JIRA issue or GitHub PR)."""
+ name: str
+ url: str
+
+ def to_dict(self):
+ """Convert to dictionary."""
+ return {"name": self.name, "url": self.url}
+
+
+@dataclass
+class ChangeEntry:
+ """Represents a single changelog entry."""
+ title: str
+ change_type: str
+ authors: List[Author] = field(default_factory=list)
+ links: List[Link] = field(default_factory=list)
+
+ def to_dict(self):
+ """Convert to dictionary for YAML serialization."""
+ return {
+ "title": self.title,
+ "type": self.change_type,
+ "authors": [author.to_dict() for author in self.authors],
+ "links": [link.to_dict() for link in self.links],
+ }
+
+
+class AuthorParser:
+ """Parses author/contributor information from entry text."""
+
+ # Pattern to match TRAILING author list at the end of an entry: (Author1,
Author2 via Committer)
+ # Must be at the very end, possibly with trailing punctuation
+ # Strategy: Match from the last '(' that leads to end-of-string pattern
matching
+ # This regex finds the LAST occurrence of a parenthesized group followed
by optional whitespace/punctuation
+ # and then end of string
+ AUTHOR_PATTERN = re.compile(r'\s+\(([^()]+)\)\s*[.,]?\s*$', re.MULTILINE)
+
+ @staticmethod
+ def parse_authors(entry_text: str) -> Tuple[str, List[Author]]:
+ """
+ Extract authors from entry text.
+
+ Returns:
+ Tuple of (cleaned_text, list_of_authors)
+
+ Patterns handled:
+ - (Author Name)
+ - (Author1, Author2)
+ - (Author Name via CommitterName)
+ - (Author1 via Committer1, Author2 via Committer2)
+
+ Only matches author attribution at the END of the entry text,
+ not in the middle of descriptions like (aka Standalone)
+ """
+ # Find ALL matches and use the LAST one (rightmost)
+ # This ensures we get the actual author attribution, not mid-text
parentheses
+ matches = list(AuthorParser.AUTHOR_PATTERN.finditer(entry_text))
+ if not matches:
+ return entry_text, []
+
+ # Use the last match (rightmost)
+ match = matches[-1]
+
+ author_text = match.group(1)
+ # Include the space before the parenthesis in what we remove
+ cleaned_text = entry_text[:match.start()].rstrip()
+
+ authors = []
+
+ # Split by comma, but be aware of "via" keyword
+ # Pattern: "Author via Committer" or just "Author"
+ segments = [seg.strip() for seg in author_text.split(',')]
+
+ for segment in segments:
+ segment = segment.strip()
+ if not segment:
+ continue
+
+ # Handle "via" prefix (standalone or after author name)
+ if segment.startswith('via '):
+ # Malformed: standalone "via Committer" (comma was added
incorrectly)
+ # Extract just the committer name
+ committer_name = segment[4:].strip() # Remove "via " prefix
+ if committer_name:
+ authors.append(Author(name=committer_name))
+ elif ' via ' in segment:
+ # Format: "Author via Committer"
+ parts = segment.split(' via ')
+ author_name = parts[0].strip()
+
+ if author_name:
+ # Normal case: "Author via Committer" - add the author
+ authors.append(Author(name=author_name))
+ else:
+ # Should not happen, but handle it
+ committer_name = parts[1].strip() if len(parts) > 1 else ""
+ if committer_name:
+ authors.append(Author(name=committer_name))
+ else:
+ # Just an author name
+ authors.append(Author(name=segment))
+
+ return cleaned_text, authors
+
+
+class IssueExtractor:
+ """Extracts issue/PR references from entry text."""
+
+ JIRA_ISSUE_PATTERN = re.compile(r'(?:SOLR|LUCENE|INFRA)-(\d+)')
+ GITHUB_PR_PATTERN = re.compile(r'(?:GitHub\s*)?#(\d+)')
+
+ @staticmethod
+ def extract_issues(entry_text: str) -> List[Link]:
+ """Extract JIRA and GitHub issue references."""
+ links = []
+ seen_issues = set() # Track seen issues to avoid duplicates
+
+ # Extract SOLR, LUCENE, INFRA issues
+ for match in IssueExtractor.JIRA_ISSUE_PATTERN.finditer(entry_text):
+ issue_id = match.group(0) # Full "SOLR-12345" or "LUCENE-12345"
format
+ if issue_id not in seen_issues:
+ url = f"https://issues.apache.org/jira/browse/{issue_id}"
+ links.append(Link(name=issue_id, url=url))
+ seen_issues.add(issue_id)
+
+ # Extract GitHub PRs in multiple formats:
+ # "PR#3758", "PR-2475", "GITHUB#3666"
+ github_patterns = [
+ (r'PR[#-](\d+)', 'PR#'), # PR#1234 or PR-1234
+ (r'GITHUB#(\d+)', 'GITHUB#'), # GITHUB#3666
+ ]
+
+ for pattern_str, prefix in github_patterns:
+ pattern = re.compile(pattern_str)
+ for match in pattern.finditer(entry_text):
+ pr_num = match.group(1)
+ pr_name = f"{prefix}{pr_num}"
+ if pr_name not in seen_issues:
+ url = f"https://github.com/apache/solr/pull/{pr_num}"
+ links.append(Link(name=pr_name, url=url))
+ seen_issues.add(pr_name)
+
+ return links
+
+
+class SlugGenerator:
+ """Generates slug-style filenames for YAML files."""
+
+ # Characters that are unsafe in filenames on various filesystems
+ # Avoid: < > : " / \ | ? * and control characters
+ # Note: # is safe on most filesystems
+ UNSAFE_CHARS_PATTERN = re.compile(r'[<>:"/\\|?*\x00-\x1f]+')
+
+ @staticmethod
+ def generate_slug(issue_id: str, title: str) -> str:
+ """
+ Generate a slug from issue ID and title.
+
+ Format: ISSUE-12345-short-slug or VERSION-entry-001-short-slug
+ Uses the actual issue ID without forcing SOLR- prefix
+ Ensures filesystem-safe filenames and respects word boundaries
+ Whitespace is preserved as spaces (not converted to dashes)
+ """
+ # Sanitize issue_id to remove unsafe characters (preserve case and #
for readability)
+ base_issue = SlugGenerator._sanitize_issue_id(issue_id)
+
+ # Create slug from title: lowercase, preserve spaces, replace only
unsafe chars with dash
+ title_slug = SlugGenerator._sanitize_filename_part(title)
+
+ # Limit to reasonable length while respecting word boundaries
+ # Target max length: 50 chars for slug (leaving room for base_issue
and dash)
+ if len(title_slug) > 50:
+ # Find last word/space boundary within 50 chars
+ truncated = title_slug[:50]
+ # Find the last space within the limit
+ last_space = truncated.rfind(' ')
+ if last_space > 20: # Keep at least 20 chars to avoid too-short
slugs
+ title_slug = truncated[:last_space]
+ else:
+ # If no good space boundary, try to find a dash (from unsafe
chars)
+ last_dash = truncated.rfind('-')
+ if last_dash > 20:
+ title_slug = truncated[:last_dash]
+ else:
+ # If no good boundary, use hard limit and clean up
+ title_slug = truncated.rstrip(' -')
+
+ return f"{base_issue}-{title_slug}"
+
+ @staticmethod
+ def _sanitize_issue_id(issue_id: str) -> str:
+ """
+ Sanitize issue ID while preserving uppercase letters and # for
readability.
+ Examples: SOLR-12345, LUCENE-1234, PR#3758, GITHUB#2408,
v9.8.0-entry-001
+ """
+ # Replace unsafe characters with dash (preserving case)
+ sanitized = SlugGenerator.UNSAFE_CHARS_PATTERN.sub('-', issue_id)
+
+ # Replace remaining unsafe characters (but keep
letters/numbers/dash/hash/dot)
+ sanitized = re.sub(r'[^a-zA-Z0-9.#-]+', '-', sanitized)
+
+ # Replace multiple consecutive dashes with single dash
+ sanitized = re.sub(r'-+', '-', sanitized)
+
+ # Strip leading/trailing dashes
+ sanitized = sanitized.strip('-')
+
+ return sanitized
+
+ @staticmethod
+ def _sanitize_filename_part(text: str) -> str:
+ """
+ Sanitize text for use in filenames.
+ - Convert to lowercase
+ - Replace unsafe characters with dashes
+ - Convert any whitespace to space (preserved in filename)
+ - Remove multiple consecutive spaces or dashes
+ - Strip leading/trailing spaces and dashes
+ """
+ # Convert to lowercase
+ text = text.lower()
+
+ # Normalize all whitespace to single spaces
+ text = re.sub(r'\s+', ' ', text)
+
+ # Replace unsafe characters with dash
+ text = SlugGenerator.UNSAFE_CHARS_PATTERN.sub('-', text)
+
+ # Replace other non-alphanumeric (except space and dash) with dash
+ text = re.sub(r'[^a-z0-9\s-]+', '-', text)
+
+ # Replace multiple consecutive dashes with single dash (but preserve
spaces)
+ text = re.sub(r'-+', '-', text)
+
+ # Strip leading/trailing spaces and dashes
+ text = text.strip(' -')
Review Comment:
Lines 381-384 contain duplicated logic with similar code at lines 351-354 in
`_sanitize_issue_id`. Consider extracting this common pattern into a helper
method to reduce duplication.
##########
gradle/documentation/changes-to-html/changes2html.py:
##########
@@ -0,0 +1,576 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Transforms Solr's CHANGELOG.md into Changes.html
+
+Input is from CHANGELOG.md, output is to STDOUT
+"""
+
+import sys
+import re
+from pathlib import Path
+
+
+class ChangelogParser:
+ """Parse CHANGELOG.md generated by logchange"""
+
+ RELEASE_PATTERN = re.compile(r'^\[(\d+(?:\.\d+)*)\]\s*-\s*(.+)$')
+ SECTION_PATTERN =
re.compile(r'^###\s+(\w+(?:\s+\w+)*)\s*(?:\(\d+\s+changes?\))?')
+ ITEM_PATTERN = re.compile(r'^###|^\[|^- ')
+
+ def __init__(self):
+ self.title = "Solr Changelog"
+ self.releases = []
+ self.preamble = None
+
+ def _save_section(self, current_release, current_section, current_items):
+ """Save current section to release if valid"""
+ if current_release and current_section and current_items:
+ current_release['sections'].append({
+ 'name': current_section,
+ 'items': current_items
+ })
+
+ def parse(self, content):
+ """Parse CHANGELOG.md content"""
+ lines = content.split('\n')
+ current_release = None
+ current_section = None
+ current_items = []
+ i = 0
+
+ while i < len(lines):
+ line = lines[i]
+ stripped = line.strip()
+
+ # Skip HTML comments
+ if stripped.startswith('<!--') or stripped.startswith('-->'):
+ i += 1
+ continue
+
+ # Extract preamble (text before first release)
+ if not current_release and not self.preamble and stripped and not
stripped.startswith('['):
+ self.preamble = stripped
+ i += 1
+ continue
+
+ # Match release header: [9.9.0] - 2025-07-24
+ match = self.RELEASE_PATTERN.match(line)
+ if match:
+ self._save_section(current_release, current_section,
current_items)
+ if current_release:
+ self.releases.append(current_release)
+
+ current_release = {
+ 'version': match.group(1),
+ 'date': match.group(2).strip(),
+ 'sections': []
+ }
+ current_section = None
+ current_items = []
+ i += 1
+ continue
+
+ # Match section header: ### Added (9 changes)
+ match = self.SECTION_PATTERN.match(line)
+ if match and current_release:
+ self._save_section(current_release, current_section,
current_items)
+ current_section = match.group(1)
+ current_items = []
+ i += 1
+ continue
+
+ # Match list item
+ if line.startswith('- ') and current_release:
+ item_text = line[2:]
+ i += 1
+ # Collect continuation lines
+ while i < len(lines) and not self.ITEM_PATTERN.match(lines[i]):
+ if lines[i].strip():
+ item_text += ' ' + lines[i].strip()
+ i += 1
+ current_items.append(item_text)
+ continue
+
+ i += 1
+
+ # Save last section and release
+ self._save_section(current_release, current_section, current_items)
+ if current_release:
+ self.releases.append(current_release)
+
+
+class HTMLGenerator:
+ """Generate HTML from parsed changelog"""
+
+ JIRA_URL_PREFIX = 'https://issues.apache.org/jira/browse/'
+ GITHUB_PR_PREFIX = 'https://github.com/apache/solr/pull/'
+ GITHUB_ISSUE_PREFIX = 'https://github.com/apache/solr/issues/'
+
+ def __init__(self, title="Solr Changelog"):
+ self.title = title
+ self.first_relid = None
+ self.second_relid = None
+ # Issue extraction patterns: (pattern, prefix, format_string)
+ self.issue_patterns = [
+ (r'\[([A-Z]+-\d+)\]\(https://issues\.apache\.org/jira/browse/\1\)',
+ self.JIRA_URL_PREFIX, '{0}'),
+ (r'\[PR#(\d+)\]\(https://github\.com/apache/solr/pull/\1\)',
+ self.GITHUB_PR_PREFIX, 'PR#{0}'),
+ (r'\[GITHUB#(\d+)\]\(https://github\.com/apache/solr/issues/\1\)',
+ self.GITHUB_ISSUE_PREFIX, 'GITHUB#{0}')
+ ]
+
+ def extract_issue_from_text(self, text):
+ """
+ Extract the first JIRA/GitHub issue from markdown text.
+ Returns (issue_link_html, text_without_issue)
+ """
+ for pattern, url_prefix, label_fmt in self.issue_patterns:
+ match = re.search(pattern, text)
+ if match:
+ issue_id = match.group(1)
+ label = label_fmt.format(issue_id)
+ issue_html = f'<a href="{url_prefix}{issue_id}">{label}</a>'
+ text_without = (text[:match.start()] +
text[match.end():]).strip()
+ return issue_html, text_without
+ return None, text
+
+ def extract_authors(self, text):
+ """Extract authors from trailing parentheses"""
+ # Match (author1) (author2) ... at the end
+ match = re.search(r'\s*(\([^)]+(?:\)\s*\([^)]+)*\))\s*$', text)
+ if match:
+ authors_text = match.group(1)
+ text_without_authors = text[:match.start()].strip()
+
+ # Parse individual authors
+ authors = re.findall(r'\(([^)]+)\)', authors_text)
+ authors_list = []
+ for author_group in authors:
+ # Split by comma or "and"
+ for author in re.split(r',\s*|\s+and\s+', author_group):
+ author = author.strip()
+ if author:
+ authors_list.append(author)
+
+ return authors_list, text_without_authors
+ return None, text
+
+ def format_changelog_item(self, item_text):
+ """
+ Format a changelog item from markdown to HTML
+ Format: [ISSUE](url) description (author1) (author2)
+ Output: <a href>ISSUE</a>: description<br><span
class="attrib">(authors)</span>
+ """
+ # Extract the issue
+ issue_html, text_after_issue = self.extract_issue_from_text(item_text)
+
+ if not issue_html:
+ return self.linkify_remaining_text(item_text)
+
+ # Extract authors and clean description
+ authors_list, description = self.extract_authors(text_after_issue)
+ description = re.sub(r'^[:\s]+', '', description).strip()
+
+ # Build HTML
+ html = f'{issue_html}: {self.escape_html(description)}'
+ if authors_list:
+ html += f'<br /><span class="attrib">({self.escape_html(",
".join(authors_list))})</span>'
+ return html
+
+ def linkify_remaining_text(self, text):
+ """Linkify URLs and remaining JIRA references"""
+ text = self.escape_html(text)
+
+ # Link remaining JIRA issues
+ text = re.sub(
+ r'([A-Z]+-\d+)',
+ lambda m: f'<a
href="{self.JIRA_URL_PREFIX}{m.group(1)}">{m.group(1)}</a>',
+ text
+ )
+
+ # Linkify URLs
+ text = re.sub(
+ r'(?<!["\'>])(https?://[^\s\)]+)',
+ lambda m: f'<a href="{m.group(1)}">{m.group(1)}</a>',
+ text
+ )
+
+ return text
+
+ def convert_markdown_links(self, text):
+ """
+ Convert markdown links [text](url) to HTML links <a href="url">text</a>
+ Also linkifies plain HTTP/HTTPS URLs
+ Also escapes HTML in plain text portions
+ """
+ placeholders = {}
+ placeholder_counter = [0]
+
+ def protect_with_placeholder(content):
+ placeholder = f"__PLACEHOLDER_{placeholder_counter[0]}__"
+ placeholders[placeholder] = content
+ placeholder_counter[0] += 1
+ return placeholder
+
+ # Pattern: [text](url)
+ def replace_markdown_link(match):
+ link_text = match.group(1)
+ link_url = match.group(2)
+ html_link = f'<a
href="{link_url}">{self.escape_html(link_text)}</a>'
+ return protect_with_placeholder(html_link)
+
+ # Replace all markdown links first
+ result = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', replace_markdown_link,
text)
+
+ # Now handle plain URLs
+ def replace_url(match):
+ url = match.group(1)
+ html_link = f'<a href="{url}">{url}</a>'
+ return protect_with_placeholder(html_link)
+
+ # Match HTTP/HTTPS URLs not already in links
+ result = re.sub(r'(?<![">])(https?://[^\s\)]+)', replace_url, result)
+
+ # Escape HTML in remaining text
+ result = self.escape_html(result)
+
+ # Restore the protected tags
+ for placeholder, tag in placeholders.items():
+ result = result.replace(placeholder, tag)
+
+ return result
+
+ def escape_html(self, text):
+ """Escape HTML angle brackets to prevent rendering issues"""
+ # Only escape < and > to avoid breaking markdown links and quotes
+ text = text.replace('<', '<')
+ text = text.replace('>', '>')
+ return text
+
+ def generate_header(self, preamble=None):
+ """Generate HTML header"""
+ first_relid_regex = re.escape(self.first_relid or 'trunk')
+ first_relid_regex = first_relid_regex.replace('\\', '\\\\')
+ second_relid_regex = re.escape(self.second_relid or '')
+ second_relid_regex = second_relid_regex.replace('\\', '\\\\')
+
+ newer_version_regex = f"^(?:{first_relid_regex}"
+ if self.second_relid:
+ newer_version_regex += f"|{second_relid_regex}"
+ newer_version_regex += ")"
+
+ html = f'''<!--
+**********************************************************
+** WARNING: This file is generated from CHANGELOG.md by the
+** Python script 'changes2html.py'.
Review Comment:
[nitpick] Corrected 'Python' to 'python' for consistency with the file
extension and command line usage.
```suggestion
** python script 'changes2html.py'.
```
##########
gradle/template.gradle.properties:
##########
@@ -98,5 +103,8 @@ org.gradle.workers.max=@MAX_WORKERS@
# Maximum number of test JVMs forked per test task.
tests.jvms=@TEST_JVMS@
+# By default skip html generation
+tests.html=false
Review Comment:
Lines 106-107 add configuration for HTML test reports, but this is unrelated
to changelog management. This should be removed from this PR or moved to a
separate change.
```suggestion
```
##########
gradle/template.gradle.properties:
##########
@@ -49,6 +49,11 @@
# tests.minheapsize=512m
# tests.jvmargs=-XX:+UseParallelGC -XX:TieredStopAtLevel=1
-XX:ActiveProcessorCount=1
#
+# If you want tests to produce an html report (which intellij provides a
clickable link for
+# at the end of a failed build) set this to true, defaults to false to save a
few seconds.
+#
+# tests.html=false
+#
Review Comment:
Lines 52-55 introduce documentation about HTML test reports, but this
appears unrelated to the changelog management changes described in the PR. This
content should either be removed or moved to a separate PR.
```suggestion
#
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]