306 lines
12 KiB
Python
306 lines
12 KiB
Python
from collections import defaultdict
|
|
from datetime import datetime
|
|
import json
|
|
import argparse
|
|
import os
|
|
import re
|
|
import pytest
|
|
from pytest import ExitCode
|
|
import traceback
|
|
|
|
def stringify(obj):
|
|
if obj is None or obj == "":
|
|
return "None"
|
|
if isinstance(obj, list):
|
|
return '\n'.join(stringify(e) for e in obj)
|
|
if isinstance(obj, dict):
|
|
return '\n'.join(f"{k}: {stringify(v)}" for k, v in obj.items())
|
|
return str(obj)
|
|
|
|
def normalize_nodeid(nodeid):
|
|
"""Convert pytest nodeid to Allure fullName format"""
|
|
match = re.match(r"(tests[/\\].+?)\.py::(.+?)(?:\[.*)?$", nodeid)
|
|
if match:
|
|
file_part = match.group(1).replace("/", ".").replace("\\", ".")
|
|
func_part = match.group(2)
|
|
return f"{file_part}#{func_part}"
|
|
return None
|
|
|
|
def extract_param_str_from_nodeid(nodeid):
|
|
"""Extract parameter string from nodeid, e.g. test[x-y-z] → x-y-z.
|
|
Handles nested or inner brackets."""
|
|
first = nodeid.find('[')
|
|
last = nodeid.rfind(']')
|
|
if first != -1 and last != -1 and last > first:
|
|
return nodeid[first+1:last]
|
|
return None
|
|
|
|
def get_details_block(summary, body, level=0, params_str=None):
|
|
margin = 18 * level
|
|
border = f"border-left: 2px solid #eee;" if level > 0 else ""
|
|
# Affiche les paramètres dans le résumé si présent
|
|
if params_str:
|
|
summary = f"{summary} <span style='color: #888; font-size: 0.9em;'>parameters: [{params_str}]</span>"
|
|
return (f'<div style="margin-left: {margin}px; {border} padding-left: 8px;">\n'
|
|
f"<details>\n<summary>{summary}</summary>\n\n"
|
|
f"{body}\n"
|
|
f"</details>\n"
|
|
f"</div>\n\n")
|
|
|
|
|
|
def make_test_block(test, status, emoji, level):
|
|
nodeid = test.get("nodeid", "")
|
|
param_str = extract_param_str_from_nodeid(nodeid)
|
|
|
|
body_test = ""
|
|
|
|
if param_str:
|
|
body_test += f"- **Parameters:** `{param_str}`\n"
|
|
|
|
skip_keys = set(["nodeid"])
|
|
phase_keys = [k for k in test.keys() if isinstance(test[k], dict) and k not in skip_keys]
|
|
|
|
for phase in phase_keys:
|
|
phase_body = ""
|
|
for field, value in test[phase].items(): # 🟢 ici l'indentation est corrigée
|
|
if value is None:
|
|
phase_body += f"- **{field.capitalize()}:** None\n"
|
|
else:
|
|
details_body = "```\n" + stringify(value) + "\n```"
|
|
phase_body += get_details_block(f"📌 {field.capitalize()}", details_body, level+2)
|
|
if phase_body:
|
|
body_test += f"\n### 🔧 {phase.capitalize()} Phase\n\n" + phase_body
|
|
|
|
return get_details_block(f"{emoji} #{test['global_number']}", body_test, level+1, params_str=param_str)
|
|
|
|
|
|
def json_to_md_nested(json_path, md_path):
|
|
with open(json_path) as f:
|
|
data = json.load(f)
|
|
|
|
with open(md_path, 'w', encoding='utf-8') as f:
|
|
f.write(f"# 🧪 Test Report\n")
|
|
f.write(f"*Generated on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*\n\n")
|
|
|
|
general_info = {}
|
|
for k, v in data.items():
|
|
if k == "summary":
|
|
break
|
|
if k not in {"created", "exitcode"}:
|
|
general_info[k] = v
|
|
|
|
if general_info:
|
|
f.write("## 🧾 General Info\n")
|
|
for key, value in general_info.items():
|
|
f.write(f"- **{key}**: {stringify(value)}\n")
|
|
f.write("\n")
|
|
|
|
if 'summary' in data:
|
|
f.write("## 📋 Summary\n")
|
|
for key, value in data['summary'].items():
|
|
f.write(f"- **{key.capitalize()}**: {stringify(value)}\n")
|
|
f.write("\n")
|
|
|
|
# --------- Tests section ----------
|
|
if "tests" in data and "summary" in data:
|
|
test_counter = 1
|
|
for test in data["tests"]:
|
|
test['global_number'] = test_counter
|
|
test_counter += 1
|
|
|
|
f.write("## 🔎 Tests\n")
|
|
|
|
summary_items = list(data["summary"].items())
|
|
total_index = next((i for i, (k, _) in enumerate(summary_items) if k == "total"), len(summary_items))
|
|
status_order = [k for k, _ in summary_items[:total_index]]
|
|
|
|
tests_by_status = defaultdict(list)
|
|
for test in data["tests"]:
|
|
outcome = test.get("outcome", "unknown")
|
|
tests_by_status[outcome].append(test)
|
|
|
|
for status in status_order:
|
|
if status not in tests_by_status:
|
|
continue
|
|
|
|
count = len(tests_by_status[status])
|
|
emoji = "✅" if status == "passed" else "❌"
|
|
status_label = status.capitalize().replace('_', ' ')
|
|
|
|
body_status = ""
|
|
grouped = defaultdict(lambda: defaultdict(list))
|
|
for test in tests_by_status[status]:
|
|
nodeid = test.get("nodeid", "")
|
|
parts = nodeid.split("::")
|
|
filename = parts[0].replace("tests\\", "").replace("tests/", "")
|
|
funcname = parts[1].split("[")[0]
|
|
grouped[filename][funcname].append(test)
|
|
|
|
for filename, funcs in grouped.items():
|
|
body_file = ""
|
|
for funcname, tests in funcs.items():
|
|
body_func = ""
|
|
for test in sorted(tests, key=lambda x: x['global_number']):
|
|
body_func += make_test_block(test, status, emoji, level=3)
|
|
body_file += get_details_block(f"🔧 Function: `{funcname}`", body_func, level=2)
|
|
body_status += get_details_block(f"📁 {filename}", body_file, level=1)
|
|
f.write(get_details_block(f"{emoji} {status_label} ({count})", body_status, level=0))
|
|
|
|
# ---------- Collectors section -----------
|
|
if "collectors" in data:
|
|
f.write("## 📚 Collected files\n")
|
|
grouped = defaultdict(list)
|
|
for collector in data["collectors"]:
|
|
nodeid = collector.get("nodeid", "unknown")
|
|
path = nodeid.split("::")[0]
|
|
main_folder = path.split("/")[0] if "/" in path else path
|
|
grouped[main_folder].append(collector)
|
|
|
|
for folder, collectors in grouped.items():
|
|
has_fail = any(c.get("outcome") != "passed" for c in collectors)
|
|
folder_emoji = "✅" if not has_fail else "❌"
|
|
|
|
body_collectors = ""
|
|
outputs = []
|
|
for collector in collectors:
|
|
outcome = collector.get("outcome", "unknown")
|
|
nodeid = collector.get("nodeid", "unknown")
|
|
short_node = nodeid.split("[")[0]
|
|
results = collector.get("result", [])
|
|
if outcome != "passed" and results:
|
|
outputs.append(f"### ❌ {short_node}\n```\n" + "\n".join(
|
|
f"{k}: {v}" if isinstance(item, dict) else str(item)
|
|
for item in results
|
|
for k, v in item.items() if isinstance(item, dict)
|
|
) + "\n```")
|
|
|
|
if outputs:
|
|
body_collectors += "### 🧾 Error or Result Summary\n\n"
|
|
for out in outputs:
|
|
body_collectors += out + "\n"
|
|
|
|
collectors_sorted = sorted(collectors, key=lambda c: c.get("nodeid", "").split("[")[0])
|
|
folder_body = ""
|
|
for collector in collectors_sorted:
|
|
outcome = collector.get("outcome", "unknown")
|
|
emoji = "✅" if outcome == "passed" else "❌"
|
|
nodeid = collector.get("nodeid", "unknown")
|
|
short_node = nodeid.split("[")[0]
|
|
|
|
body_coll = f"- **Outcome:** `{outcome}`\n"
|
|
other_keys = {k: v for k, v in collector.items() if k not in {"nodeid", "outcome"}}
|
|
if other_keys:
|
|
body_coll += "- **Details:**\n"
|
|
body_coll += "```\n"
|
|
for k, v in other_keys.items():
|
|
body_coll += f"{k}:\n"
|
|
if v is None:
|
|
body_coll += " None\n"
|
|
else:
|
|
body_coll += stringify(v) + "\n"
|
|
body_coll += "\n"
|
|
body_coll += "```\n"
|
|
else:
|
|
body_coll += "- **Details:** `None`\n"
|
|
folder_body += get_details_block(f"{emoji} {short_node}", body_coll, level=2)
|
|
f.write(get_details_block(f"{folder_emoji} {folder} ({len(collectors)} tests)", folder_body + body_collectors, level=1))
|
|
|
|
# ---------- Warnings section -----------
|
|
if 'warnings' in data and data['warnings']:
|
|
f.write("## ⚠️ Warnings\n\n")
|
|
for i, warning in enumerate(data['warnings'], 1):
|
|
warn_body = "```\n"
|
|
for k, v in warning.items():
|
|
warn_body += f"{k}: {v}\n"
|
|
warn_body += "```\n"
|
|
f.write(get_details_block(f"Warning #{i}", warn_body, level=1))
|
|
|
|
def run_pytest_and_generate_banner_with_logs(md_path, log_path, exit_code):
|
|
#exit_code = pytest.main(["tests/"])
|
|
if exit_code == 0 or exit_code == 1:
|
|
return
|
|
|
|
if exit_code == 2:
|
|
banner = (
|
|
"⚠️ **Test execution interrupted**\n\n"
|
|
"> The test run was interrupted by the user (reasons : KeyboardInterrupt or ...).\n\n"
|
|
)
|
|
elif exit_code == 3:
|
|
banner = (
|
|
"🛑 **Internal error during testing**\n\n"
|
|
"> An internal error occurred while executing the tests.\n\n"
|
|
)
|
|
elif exit_code == 4:
|
|
banner = (
|
|
"❗ **Pytest command line usage error**\n\n"
|
|
"> There was an error in how pytest was invoked.\n\n"
|
|
)
|
|
elif exit_code == 5:
|
|
banner = (
|
|
"❗ **No tests were collected**\n\n"
|
|
"> Pytest did not find any tests to run.\n\n"
|
|
)
|
|
else:
|
|
banner = (
|
|
f"❓ **Unknown pytest exit code: {exit_code}**\n\n"
|
|
"> Unexpected result during test execution.\n\n"
|
|
)
|
|
|
|
try:
|
|
with open(log_path, "r") as lf:
|
|
log_lines = lf.readlines()
|
|
except Exception as e:
|
|
print(f"❌ Could not read log file: {e}")
|
|
return
|
|
|
|
short_summary_lines = []
|
|
in_summary = False
|
|
for line in log_lines:
|
|
if "short test summary info" in line.lower():
|
|
in_summary = True
|
|
if in_summary:
|
|
short_summary_lines.append(line)
|
|
if re.match(r"=+.* in .*s =+", line):
|
|
break
|
|
|
|
try:
|
|
with open(md_path, "r") as f:
|
|
original_md = f.read()
|
|
except Exception as e:
|
|
print(f"❌ Could not read markdown report: {e}")
|
|
return
|
|
|
|
full_banner = (
|
|
banner +
|
|
"<details>\n<summary>📋 Short test summary info</summary>\n\n" +
|
|
"```\n" + "".join(short_summary_lines) + "```\n</details>\n\n" +
|
|
"<details>\n<summary>🪵 Full raw pytest log</summary>\n\n" +
|
|
"```\n" + "".join(log_lines) + "```\n</details>\n\n" +
|
|
"---\n\n"
|
|
)
|
|
|
|
try:
|
|
with open(md_path, "w") as f:
|
|
f.write(full_banner + original_md)
|
|
print("✅ Banner and log summary added to markdown report.")
|
|
except Exception as e:
|
|
print(f"❌ Failed to update markdown report: {e}")
|
|
return
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description="Convert JSON test results to Markdown.")
|
|
parser.add_argument("--input", required=True, help="Path to pytest JSON file")
|
|
parser.add_argument("--output", required=True, help="Path to output Markdown file")
|
|
parser.add_argument("--log", required=False, help="Path to raw pytest output log (optional)")
|
|
parser.add_argument("--json", required=False, help="Path to pytest-report.json")
|
|
parser.add_argument("--exit-code", type=int, default=0, help="Exit code from pytest to determine the banner.")
|
|
|
|
args = parser.parse_args()
|
|
|
|
json_to_md_nested(args.input, args.output)
|
|
run_pytest_and_generate_banner_with_logs(md_path=args.output, log_path=args.log, exit_code=args.exit_code)
|
|
|
|
print(f"✅ Report generated at {args.output}")
|
|
|
|
if __name__ == "__main__":
|
|
main() |