diff --git a/tests/test_random.py b/tests/test_random.py new file mode 100644 index 000000000..66dc36c57 --- /dev/null +++ b/tests/test_random.py @@ -0,0 +1,48 @@ +import pytest + +# ✅ Un test qui passe +def test_success(): + assert 1 + 1 == 2 + +# ❌ Un test qui échoue +def test_failure(): + assert 1 + 1 == 3 + +# ⏭️ Un test marqué skip +@pytest.mark.skip(reason="This test is skipped on purpose") +def test_skipped(): + assert True + +# ⏭️ Un test conditionnellement skip +@pytest.mark.skipif(True, reason="Condition met → skipped") +def test_skipif(): + assert False + +# ⚠️ Un test qui lève une erreur d'exécution +def test_error(): + raise RuntimeError("This is a runtime error") + +# ❌ Un test attendu comme échec (xfail) mais qui réussit → XFAIL strict +@pytest.mark.xfail(reason="Known bug, expected to fail") +def test_expected_fail(): + assert 1 + 1 == 3 + +# ❌ Un test attendu comme échec mais qui passe quand même → XPASS +@pytest.mark.xfail(reason="Unexpected pass", strict=False) +def test_xpass(): + assert 1 + 1 == 2 + +# ⏳ Un test lent (simule du temps) +import time +def test_slow(): + time.sleep(1) + assert True + +# ⚠️ Un test paramétré dont certains passent et d'autres échouent +@pytest.mark.parametrize("x,y,expected", [ + (1, 1, 2), # pass + (2, 2, 5), # fail + (3, 3, 6), # pass +]) +def test_parametrize(x, y, expected): + assert x + y == expected