211 lines
8.5 KiB
Python
211 lines
8.5 KiB
Python
import xml.etree.ElementTree as ET
|
|
from pathlib import Path
|
|
from datetime import datetime
|
|
import argparse
|
|
import html
|
|
|
|
def parse_junit_xml(xml_path):
|
|
tree = ET.parse(xml_path)
|
|
root = tree.getroot()
|
|
|
|
test_data = {
|
|
"metadata": {
|
|
"timestamp": root.attrib.get("timestamp"),
|
|
"hostname": root.attrib.get("hostname"),
|
|
"pytest_version": root.attrib.get("pytest_version", "unknown")
|
|
},
|
|
"summary": {
|
|
"total": int(root.attrib.get("tests", 0)),
|
|
"passed": int(root.attrib.get("tests", 0)) - int(root.attrib.get("failures", 0)) - int(root.attrib.get("errors", 0)),
|
|
"failed": int(root.attrib.get("failures", 0)),
|
|
"errors": int(root.attrib.get("errors", 0)),
|
|
"skipped": int(root.attrib.get("skipped", 0)),
|
|
"time": float(root.attrib.get("time", 0))
|
|
},
|
|
"environment": {},
|
|
"files": {},
|
|
"system_out": [],
|
|
"system_err": []
|
|
}
|
|
|
|
# Capture les logs système
|
|
for system_out in root.findall(".//system-out"):
|
|
test_data["system_out"].append(system_out.text.strip())
|
|
for system_err in root.findall(".//system-err"):
|
|
test_data["system_err"].append(system_err.text.strip())
|
|
|
|
# Analyse détaillée des tests
|
|
for testcase in root.findall(".//testcase"):
|
|
file_path = testcase.attrib["classname"].replace(".", "/") + ".py"
|
|
test_name_full = testcase.attrib["name"]
|
|
|
|
# Découpage des paramètres pour les tests paramétrés
|
|
if "[" in test_name_full and "]" in test_name_full:
|
|
test_name = test_name_full.split("[")[0]
|
|
params = test_name_full.split("[")[1][:-1]
|
|
else:
|
|
test_name = test_name_full
|
|
params = None
|
|
|
|
if file_path not in test_data["files"]:
|
|
test_data["files"][file_path] = []
|
|
|
|
test_entry = {
|
|
"name": test_name,
|
|
"full_name": test_name_full,
|
|
"status": "passed",
|
|
"time": float(testcase.attrib.get("time", 0)),
|
|
"params": params,
|
|
"error": None,
|
|
"logs": "",
|
|
"metadata": {
|
|
"lineno": testcase.attrib.get("lineno"),
|
|
"file": testcase.attrib.get("file")
|
|
}
|
|
}
|
|
|
|
# Détection des échecs/erreurs
|
|
for failure in testcase.findall("failure"):
|
|
test_entry["status"] = "failed"
|
|
test_entry["error"] = {
|
|
"type": "assertion",
|
|
"message": html.escape(failure.attrib.get("message", "")),
|
|
"traceback": html.escape(failure.text.strip() if failure.text else ""),
|
|
"full": html.escape(ET.tostring(failure, encoding="unicode"))
|
|
}
|
|
|
|
for error in testcase.findall("error"):
|
|
test_entry["status"] = "error"
|
|
test_entry["error"] = {
|
|
"type": "exception",
|
|
"message": html.escape(error.attrib.get("message", "")),
|
|
"traceback": html.escape(error.text.strip() if error.text else ""),
|
|
"full": html.escape(ET.tostring(error, encoding="unicode"))
|
|
}
|
|
|
|
# Capture des logs spécifiques au test
|
|
for system_out in testcase.findall("system-out"):
|
|
test_entry["logs"] += f"STDOUT:\n{html.escape(system_out.text.strip())}\n"
|
|
for system_err in testcase.findall("system-err"):
|
|
test_entry["logs"] += f"STDERR:\n{html.escape(system_err.text.strip())}\n"
|
|
|
|
test_data["files"][file_path].append(test_entry)
|
|
|
|
return test_data
|
|
|
|
def generate_markdown_report(test_data, output_path):
|
|
md_content = f"""# 🛠️ Test Report - Technical Post-Mortem
|
|
**Generated:** {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
|
|
**Pytest Version:** {test_data["metadata"]["pytest_version"]}
|
|
**Test Timestamp:** {test_data["metadata"]["timestamp"]}
|
|
**Hostname:** {test_data["metadata"]["hostname"]}
|
|
|
|
## 🔍 Global Overview
|
|
| Metric | Count |
|
|
|-----------------|-------|
|
|
| Total Tests | {test_data["summary"]["total"]} |
|
|
| ✅ Passed | {test_data["summary"]["passed"]} |
|
|
| ❌ Failed | {test_data["summary"]["failed"]} |
|
|
| 💥 Errors | {test_data["summary"]["errors"]} |
|
|
| ⏸️ Skipped | {test_data["summary"]["skipped"]} |
|
|
| ⏱️ Total Duration | {test_data["summary"]["time"]:.2f}s |
|
|
|
|
"""
|
|
|
|
# Détails par fichier
|
|
for file_path, tests in test_data["files"].items():
|
|
md_content += f"\n## 📂 File: `{file_path}`\n"
|
|
|
|
# Statistiques du fichier
|
|
file_stats = {
|
|
"passed": sum(1 for t in tests if t["status"] == "passed"),
|
|
"failed": sum(1 for t in tests if t["status"] == "failed"),
|
|
"errors": sum(1 for t in tests if t["status"] == "error")
|
|
}
|
|
md_content += f"**Results:** ✅ {file_stats['passed']} | ❌ {file_stats['failed']} | 💥 {file_stats['errors']}\n\n"
|
|
|
|
# Groupement par nom de test
|
|
test_groups = {}
|
|
for test in tests:
|
|
if test["name"] not in test_groups:
|
|
test_groups[test["name"]] = []
|
|
test_groups[test["name"]].append(test)
|
|
|
|
for test_name, test_versions in test_groups.items():
|
|
md_content += f"### 🧪 Test: `{test_name}`\n"
|
|
|
|
for test in test_versions:
|
|
# Emoji selon le statut
|
|
status_emoji = {
|
|
"passed": "✅",
|
|
"failed": "❌",
|
|
"error": "💥"
|
|
}.get(test["status"], "⏺️")
|
|
|
|
md_content += f"#### {status_emoji} Variant: `{test['params'] if test['params'] else 'default'}`\n"
|
|
md_content += f"- **Duration:** {test['time']:.3f}s\n"
|
|
md_content += f"- **Location:** `{test['metadata']['file']}:{test['metadata']['lineno']}`\n"
|
|
|
|
if test["error"]:
|
|
md_content += f"\n##### 🔥 Error Details\n"
|
|
md_content += f"**Type:** `{test['error']['type'].upper()}`\n"
|
|
md_content += f"**Message:**\n```\n{test['error']['message']}\n```\n"
|
|
md_content += f"**Traceback:**\n```python\n{test['error']['traceback']}\n```\n"
|
|
md_content += f"<details>\n<summary>📜 Raw Error Data</summary>\n\n```xml\n{test['error']['full']}\n```\n</details>\n"
|
|
|
|
if test["logs"]:
|
|
md_content += f"\n##### 📜 Test Logs\n```\n{test['logs']}\n```\n"
|
|
|
|
md_content += "\n---\n"
|
|
|
|
# Logs système globaux
|
|
if test_data["system_out"] or test_data["system_err"]:
|
|
md_content += "\n## 🌐 System Logs\n"
|
|
if test_data["system_out"]:
|
|
md_content += "### STDOUT\n```\n" + "\n".join(test_data["system_out"]) + "\n```\n"
|
|
if test_data["system_err"]:
|
|
md_content += "### STDERR\n```\n" + "\n".join(test_data["system_err"]) + "\n```\n"
|
|
|
|
with open(output_path, "w", encoding="utf-8") as f:
|
|
f.write(md_content)
|
|
|
|
def process_test_reports(input_dir, output_file):
|
|
combined_data = {
|
|
"metadata": {},
|
|
"summary": {"total": 0, "passed": 0, "failed": 0, "errors": 0, "skipped": 0, "time": 0},
|
|
"files": {},
|
|
"system_out": [],
|
|
"system_err": []
|
|
}
|
|
|
|
for xml_file in Path(input_dir).glob("*.xml"):
|
|
file_data = parse_junit_xml(xml_file)
|
|
|
|
# Fusion des métadonnées (on garde celles du premier fichier)
|
|
if not combined_data["metadata"]:
|
|
combined_data["metadata"] = file_data["metadata"]
|
|
|
|
# Agrégation des stats
|
|
for k in combined_data["summary"]:
|
|
combined_data["summary"][k] += file_data["summary"].get(k, 0)
|
|
|
|
# Fusion des fichiers
|
|
for file_path, tests in file_data["files"].items():
|
|
if file_path not in combined_data["files"]:
|
|
combined_data["files"][file_path] = []
|
|
combined_data["files"][file_path].extend(tests)
|
|
|
|
# Logs système
|
|
combined_data["system_out"].extend(file_data["system_out"])
|
|
combined_data["system_err"].extend(file_data["system_err"])
|
|
|
|
generate_markdown_report(combined_data, output_file)
|
|
|
|
if __name__ == "__main__":
|
|
parser = argparse.ArgumentParser(description="Convert pytest XML reports to technical Markdown")
|
|
parser.add_argument("--input", help="Input directory containing XML reports", default="./test-reports")
|
|
parser.add_argument("--output", help="Output Markdown file", default="./TEST-REPORT-TECHNICAL.md")
|
|
args = parser.parse_args()
|
|
|
|
process_test_reports(args.input, args.output)
|
|
print(f"Technical report generated at {args.output}") |