393 lines
15 KiB
Python
393 lines
15 KiB
Python
from collections import defaultdict
|
|
from datetime import datetime
|
|
import json
|
|
import argparse
|
|
import os
|
|
import re
|
|
import pytest
|
|
from pytest import ExitCode
|
|
import traceback
|
|
import pytz
|
|
from textwrap import indent
|
|
|
|
|
|
|
|
def stringify(obj, indent=0):
|
|
space = ' ' * indent
|
|
|
|
if obj is None or obj == "":
|
|
return "None"
|
|
|
|
if isinstance(obj, list):
|
|
if not obj:
|
|
return "[]"
|
|
return '\n'.join(
|
|
f"{space}- (vide)" if not e or (isinstance(e, dict) and not e)
|
|
else f"{space}- {stringify(e, indent + 1)}" if not isinstance(e, dict)
|
|
else f"{space}- {stringify(e, indent + 1)}" # ← plus de \n !
|
|
for e in obj
|
|
)
|
|
|
|
if isinstance(obj, dict):
|
|
if not obj:
|
|
return "{}"
|
|
return '\n'.join(
|
|
f"{space}{k}: {stringify(v, indent + 1)}" if not isinstance(v, (dict, list))
|
|
else f"{space}{k}:\n{stringify(v, indent + 1)}"
|
|
for k, v in obj.items()
|
|
)
|
|
|
|
return str(obj)
|
|
|
|
def sanitize_param_value(v):
|
|
s = str(v)
|
|
return (
|
|
s.replace('&', '^')
|
|
.replace('<', '(')
|
|
.replace('>', ')')
|
|
.replace('"', '')
|
|
.replace("'", '')
|
|
.replace('`', '')
|
|
.replace('=', '≡')
|
|
.replace('\\', '/')
|
|
)
|
|
|
|
def normalize_nodeid(nodeid):
|
|
"""Convert pytest nodeid to Allure fullName format"""
|
|
match = re.match(r"(tests[/\\].+?)\.py::(.+?)(?:\[.*)?$", nodeid)
|
|
if match:
|
|
file_part = match.group(1).replace("/", ".").replace("\\", ".")
|
|
func_part = match.group(2)
|
|
return f"{file_part}#{func_part}"
|
|
return None
|
|
|
|
def get_details_block_other_sections(summary, body, level=0):
|
|
margin = 18 * level
|
|
border = f"border-left: 2px solid #eee;" if level > 0 else ""
|
|
return (f'<div style="margin-left: {margin}px; {border} padding-left: 8px;">\n'
|
|
f"<details>\n<summary>{summary}</summary>\n\n"
|
|
f"{body}\n"
|
|
f"</details>\n"
|
|
f"</div>\n\n")
|
|
|
|
def make_test_block(test, status, emoji, level, runtime_params):
|
|
nodeid = test.get("nodeid", "")
|
|
callspec = runtime_params.get(nodeid, {})
|
|
params = callspec.get("params", {}) if isinstance(callspec, dict) else {}
|
|
|
|
# Résumé du test
|
|
summary = f"{emoji} Test {test['global_number']}"
|
|
if params:
|
|
param_display = ", ".join(f"{k}={sanitize_param_value(v)}" for k, v in params.items())
|
|
summary += f"<br><span style='color: #888; font-size: 0.9em;'>(params: {param_display})</span>"
|
|
|
|
# Ajouter les runtime parameters avant les autres phases
|
|
body = ""
|
|
if callspec:
|
|
block = f"```python\n{stringify(callspec)}\n```"
|
|
body += f"**_*📌 Runtime Parameters*_**\n\n{block}\n\n"
|
|
|
|
# Parcours des phases du test
|
|
skip_keys = {"nodeid", "global_number"}
|
|
for phase in [k for k in test if isinstance(test[k], dict) and k not in skip_keys]:
|
|
body += f"**_*📌 {phase.capitalize()} phase*_**\n\n"
|
|
for field, value in test[phase].items():
|
|
block = stringify(value)
|
|
body += f"**{field}:**\n\n```python\n{block}\n```\n\n"
|
|
|
|
# Formatage final avec indentation cohérente
|
|
body_content = "\n".join(f" {line}" for line in body.strip().splitlines())
|
|
|
|
return (
|
|
f" - <details>\n"
|
|
f" <summary>{summary}</summary>\n\n"
|
|
f"{body_content}\n"
|
|
f" </details>\n\n"
|
|
)
|
|
|
|
|
|
def json_to_md_nested(json_path, md_path, runtime_params=None):
|
|
with open(json_path) as f:
|
|
data = json.load(f)
|
|
|
|
with open(md_path, 'w', encoding='utf-8') as f:
|
|
f.write(f"# 🧪 Test Report\n")
|
|
|
|
local = pytz.timezone("Europe/Zurich")
|
|
now = datetime.now(local)
|
|
f.write(f"*Generated on {now.strftime('%Y-%m-%d %H:%M:%S %Z')}*\n\n")
|
|
|
|
general_info = {}
|
|
for k, v in data.items():
|
|
if k == "summary":
|
|
break
|
|
if k not in {"created", "exitcode"}:
|
|
general_info[k] = v
|
|
|
|
if general_info:
|
|
f.write("## 🧾 General Info\n")
|
|
for key, value in general_info.items():
|
|
f.write(f"- **{key}**: {stringify(value)}\n")
|
|
f.write("\n")
|
|
|
|
if 'summary' in data:
|
|
f.write("## 📋 Summary\n")
|
|
for key, value in data['summary'].items():
|
|
f.write(f"- **{key.capitalize()}**: {stringify(value)}\n")
|
|
f.write("\n")
|
|
|
|
|
|
|
|
# --------- Tests section ----------
|
|
if "tests" in data and "summary" in data:
|
|
test_counter = 1
|
|
for test in data["tests"]:
|
|
test['global_number'] = test_counter
|
|
test_counter += 1
|
|
|
|
f.write("## 🔎 Tests\n\n")
|
|
|
|
summary_items = list(data["summary"].items())
|
|
total_index = next((i for i, (k, _) in enumerate(summary_items) if k == "total"), len(summary_items))
|
|
status_order = [k for k, _ in summary_items[:total_index]]
|
|
|
|
tests_by_status = defaultdict(list)
|
|
for test in data["tests"]:
|
|
outcome = test.get("outcome", "unknown")
|
|
tests_by_status[outcome].append(test)
|
|
|
|
for status in status_order:
|
|
if status not in tests_by_status:
|
|
continue
|
|
|
|
count = len(tests_by_status[status])
|
|
emoji = "✅" if status == "passed" else "❌"
|
|
status_label = status.capitalize().replace('_', ' ')
|
|
|
|
f.write(f"<details>\n")
|
|
f.write(f"<summary>{emoji} {status_label} ({count})</summary>\n\n")
|
|
|
|
grouped = defaultdict(lambda: defaultdict(list))
|
|
for test in tests_by_status[status]:
|
|
nodeid = test.get("nodeid", "")
|
|
parts = nodeid.split("::")
|
|
filename = parts[0].replace("tests\\", "").replace("tests/", "")
|
|
funcname = parts[1].split("[")[0]
|
|
grouped[filename][funcname].append(test)
|
|
|
|
for filename, funcs in grouped.items():
|
|
f.write(f" - <details>\n")
|
|
f.write(f' <summary>📄 <span style="color: #00695C; font-weight: bold">{filename}</span></summary>\n\n')
|
|
|
|
for funcname, tests in funcs.items():
|
|
# Ligne de fonction avec flèche et gris
|
|
f.write(f' ↳ <span style="color: #009688; font-weight: bold">Function: </span> {funcname}\n')
|
|
|
|
# Tests indentés
|
|
for test in sorted(tests, key=lambda x: x['global_number']):
|
|
block = make_test_block(test, status, emoji, level=6, runtime_params=runtime_params)
|
|
for line in block.strip().splitlines():
|
|
f.write(f" {line}\n")
|
|
f.write(f" </details>\n\n")
|
|
f.write(f"</details>\n\n")
|
|
|
|
|
|
# ---------- Collectors section -----------
|
|
|
|
if "collectors" in data:
|
|
f.write("## 📚 Collected files\n\n")
|
|
grouped = defaultdict(list)
|
|
for collector in data["collectors"]:
|
|
nodeid = collector.get("nodeid", "unknown")
|
|
path = nodeid.split("::")[0]
|
|
main_folder = path.split("/")[0] if "/" in path else path
|
|
grouped[main_folder].append(collector)
|
|
|
|
for folder, collectors in grouped.items():
|
|
has_fail = any(c.get("outcome") != "passed" for c in collectors)
|
|
folder_emoji = "✅" if not has_fail else "❌"
|
|
|
|
body_collectors = ""
|
|
outputs = []
|
|
|
|
# Résumés d'erreurs collectées
|
|
for collector in collectors:
|
|
outcome = collector.get("outcome", "unknown")
|
|
nodeid = collector.get("nodeid", "unknown")
|
|
short_node = nodeid.split("[")[0]
|
|
results = collector.get("result", [])
|
|
if outcome != "passed" and results:
|
|
outputs.append(f"### ❌ {short_node}\n```\n" + "\n".join(
|
|
f"{k}: {v}" if isinstance(item, dict) else str(item)
|
|
for item in results
|
|
for k, v in item.items() if isinstance(item, dict)
|
|
) + "\n```")
|
|
|
|
if outputs:
|
|
body_collectors += "### 🧾 Error or Result Summary\n\n"
|
|
for out in outputs:
|
|
body_collectors += out + "\n"
|
|
|
|
# Écriture de chaque fichier
|
|
collectors_sorted = sorted(collectors, key=lambda c: c.get("nodeid", "").split("[")[0])
|
|
print("\n".join(c["nodeid"] for c in collectors_sorted))
|
|
folder_body = ""
|
|
for collector in collectors_sorted:
|
|
outcome = collector.get("outcome", "unknown")
|
|
emoji = "✅" if outcome == "passed" else "❌"
|
|
nodeid = collector.get("nodeid", "unknown")
|
|
short_node = nodeid.split("[")[0]
|
|
|
|
body_coll = f"- **Outcome:** `{outcome}`\n"
|
|
other_keys = {k: v for k, v in collector.items() if k not in {"nodeid", "outcome"}}
|
|
if other_keys:
|
|
for k, v in other_keys.items():
|
|
block = stringify(v)
|
|
body_coll += f"- **{k}:**\n\n```python\n{block}\n```\n\n"
|
|
else:
|
|
body_coll += "- **Details:** `None`\n"
|
|
|
|
# ✅ Mettre absolument tout dans <details>, même si vide
|
|
folder_body += f" - <details>\n"
|
|
folder_body += f" <summary>{emoji} {short_node}</summary>\n\n"
|
|
for line in body_coll.strip().splitlines():
|
|
folder_body += f" {line}\n"
|
|
folder_body += f" </details>\n\n"
|
|
|
|
# ✅ Encapsuler tout dans un bloc parent (ci-reports etc.)
|
|
# MODIFICATION ICI: Toujours utiliser <details> même pour le premier niveau
|
|
f.write(f"<details>\n")
|
|
f.write(f"<summary>{folder_emoji} {folder} ({len(collectors)} tests)</summary>\n\n")
|
|
for line in (folder_body + body_collectors).strip().splitlines():
|
|
f.write(f" {line}\n")
|
|
f.write(f"</details>\n\n")
|
|
|
|
|
|
# ---------- OTHER sections -----------
|
|
EXCLUDED_KEYS = {'tests', 'collectors'}
|
|
|
|
keys = list(data.keys())
|
|
if "summary" in keys:
|
|
start_index = keys.index("summary") + 1
|
|
else:
|
|
start_index = len(keys)
|
|
|
|
for key in keys[start_index:]:
|
|
if key in EXCLUDED_KEYS or not data[key]:
|
|
continue
|
|
|
|
f.write(f"## ⚠️ {key.capitalize()}\n\n")
|
|
|
|
for i, entry in enumerate(data[key], 1):
|
|
entry_body = "```python\n"
|
|
if isinstance(entry, dict):
|
|
for k, v in entry.items():
|
|
entry_body += f"{k}: {v}\n"
|
|
else:
|
|
entry_body += str(entry) + "\n"
|
|
entry_body += "```\n"
|
|
f.write(get_details_block_other_sections(f"{key.capitalize()} #{i}", entry_body, level=1))
|
|
|
|
|
|
def run_pytest_and_generate_banner_with_logs(md_path, log_path, exit_code):
|
|
if exit_code == 0 or exit_code == 1:
|
|
banner = (
|
|
"✅ **Pytest completed successfully**\n\n"
|
|
"> All tests were collected and executed properly. See the details below.\n\n"
|
|
)
|
|
elif exit_code == 2:
|
|
banner = (
|
|
"⚠️ **Test execution interrupted**\n\n"
|
|
"> The test run was interrupted by the user (reasons : KeyboardInterrupt or ...).\n\n"
|
|
)
|
|
elif exit_code == 3:
|
|
banner = (
|
|
"🛑 **Internal error during testing**\n\n"
|
|
"> An internal error occurred while executing the tests.\n\n"
|
|
)
|
|
elif exit_code == 4:
|
|
banner = (
|
|
"❗ **Pytest command line usage error**\n\n"
|
|
"> There was an error in how pytest was invoked.\n\n"
|
|
)
|
|
elif exit_code == 5:
|
|
banner = (
|
|
"❗ **No tests were collected**\n\n"
|
|
"> Pytest did not find any tests to run.\n\n"
|
|
)
|
|
else:
|
|
banner = (
|
|
f"❓ **Unknown pytest exit code: {exit_code}**\n\n"
|
|
"> Unexpected result during test execution.\n\n"
|
|
)
|
|
|
|
try:
|
|
with open(log_path, "r") as lf:
|
|
log_lines = lf.readlines()
|
|
except Exception as e:
|
|
print(f"❌ Could not read log file: {e}")
|
|
return
|
|
|
|
short_summary_lines = []
|
|
in_summary = False
|
|
for line in log_lines:
|
|
if "short test summary info" in line.lower():
|
|
in_summary = True
|
|
if in_summary:
|
|
short_summary_lines.append(line)
|
|
if re.match(r"=+.* in .*s =+", line):
|
|
break
|
|
|
|
try:
|
|
with open(md_path, "r") as f:
|
|
original_md = f.read()
|
|
except Exception as e:
|
|
print(f"❌ Could not read markdown report: {e}")
|
|
return
|
|
|
|
full_banner = (
|
|
banner +
|
|
"<details>\n<summary>📋 Short test summary info</summary>\n\n" +
|
|
"```python\n" + "".join(short_summary_lines) + "```\n</details>\n\n" +
|
|
"<details>\n<summary>🪵 Full raw pytest log</summary>\n\n" +
|
|
"```python\n" + "".join(log_lines) + "```\n</details>\n\n" +
|
|
"---\n\n"
|
|
)
|
|
|
|
try:
|
|
with open(md_path, "w") as f:
|
|
f.write(full_banner + original_md)
|
|
print("✅ Banner and log summary added to markdown report.")
|
|
except Exception as e:
|
|
print(f"❌ Failed to update markdown report: {e}")
|
|
return
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description="Convert JSON test results to Markdown.")
|
|
parser.add_argument("--input", required=True, help="Path to pytest JSON file")
|
|
parser.add_argument("--output", required=True, help="Path to output Markdown file")
|
|
parser.add_argument("--log", required=False, help="Path to raw pytest output log (optional)")
|
|
parser.add_argument("--params", required=False, help="Path to runtime-params.json")
|
|
parser.add_argument("--exit-code", type=int, default=0, help="Exit code from pytest to determine the banner.")
|
|
|
|
args = parser.parse_args()
|
|
|
|
runtime_params = {}
|
|
if args.params and os.path.exists(args.params):
|
|
try:
|
|
with open(args.params, "r") as f:
|
|
param_list = json.load(f)
|
|
for entry in param_list:
|
|
if "nodeid" in entry and isinstance(entry.get("callspec"), dict):
|
|
runtime_params[entry["nodeid"]] = entry["callspec"]
|
|
except Exception as e:
|
|
print(f"❌ Failed to read runtime parameters: {e}")
|
|
|
|
json_to_md_nested(args.input, args.output, runtime_params)
|
|
|
|
run_pytest_and_generate_banner_with_logs(md_path=args.output, log_path=args.log, exit_code=args.exit_code)
|
|
|
|
print(f"✅ Report generated at {args.output}")
|
|
|
|
if __name__ == "__main__":
|
|
main() |