mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2024-11-26 19:54:33 +08:00
ci/lava: Uprev freezegun
The former version was presenting some bugs running fixtures in parallel testing. With the new version, we need to change the side effects on mocks a little bit and fix a start time to make the tests reproducible. Signed-off-by: Guilherme Gallo <guilherme.gallo@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/32163>
This commit is contained in:
parent
93ce733425
commit
f4301626cd
@ -25,7 +25,7 @@ def mock_sleep():
|
|||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def frozen_time(mock_sleep):
|
def frozen_time(mock_sleep):
|
||||||
with freeze_time() as frozen_time:
|
with freeze_time("2024-01-01") as frozen_time:
|
||||||
yield frozen_time
|
yield frozen_time
|
||||||
|
|
||||||
|
|
||||||
@ -34,6 +34,10 @@ RESULT_GET_TESTJOB_RESULTS = [{"metadata": {"result": "test"}}]
|
|||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def mock_proxy(frozen_time):
|
def mock_proxy(frozen_time):
|
||||||
|
def mock_job_state(jid) -> dict[str, str]:
|
||||||
|
frozen_time.tick(1)
|
||||||
|
return {"job_state": "Running"}
|
||||||
|
|
||||||
def create_proxy_mock(
|
def create_proxy_mock(
|
||||||
job_results=RESULT_GET_TESTJOB_RESULTS,
|
job_results=RESULT_GET_TESTJOB_RESULTS,
|
||||||
testsuite_results=[generate_testsuite_result()],
|
testsuite_results=[generate_testsuite_result()],
|
||||||
@ -53,8 +57,7 @@ def mock_proxy(frozen_time):
|
|||||||
proxy_logs_mock.return_value = jobs_logs_response()
|
proxy_logs_mock.return_value = jobs_logs_response()
|
||||||
|
|
||||||
proxy_job_state = proxy_mock.scheduler.job_state
|
proxy_job_state = proxy_mock.scheduler.job_state
|
||||||
proxy_job_state.return_value = {"job_state": "Running"}
|
proxy_job_state.side_effect = mock_job_state
|
||||||
proxy_job_state.side_effect = frozen_time.tick(1)
|
|
||||||
|
|
||||||
proxy_show_mock = proxy_mock.scheduler.jobs.show
|
proxy_show_mock = proxy_mock.scheduler.jobs.show
|
||||||
proxy_show_mock.return_value = defaultdict(
|
proxy_show_mock.return_value = defaultdict(
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
from contextlib import nullcontext as does_not_raise
|
from contextlib import nullcontext as does_not_raise
|
||||||
from datetime import datetime
|
from datetime import UTC, datetime
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
from itertools import cycle
|
from itertools import cycle
|
||||||
from typing import Any, Callable, Generator, Iterable, Optional, Tuple, Union
|
from typing import Any, Callable, Generator, Iterable, Optional, Tuple, Union
|
||||||
@ -46,7 +46,7 @@ def generate_testsuite_result(
|
|||||||
def jobs_logs_response(
|
def jobs_logs_response(
|
||||||
finished=False, msg=None, lvl="target", result=None, exit_code=None
|
finished=False, msg=None, lvl="target", result=None, exit_code=None
|
||||||
) -> Tuple[bool, str]:
|
) -> Tuple[bool, str]:
|
||||||
timed_msg = {"dt": str(datetime.now()), "msg": "New message", "lvl": lvl}
|
timed_msg = {"dt": str(datetime.now(tz=UTC)), "msg": "New message", "lvl": lvl}
|
||||||
if result:
|
if result:
|
||||||
timed_msg["lvl"] = "target"
|
timed_msg["lvl"] = "target"
|
||||||
timed_msg["msg"] = f"hwci: mesa: {result}, exit_code: {exit_code}"
|
timed_msg["msg"] = f"hwci: mesa: {result}, exit_code: {exit_code}"
|
||||||
@ -102,7 +102,7 @@ def generate_n_logs(
|
|||||||
else:
|
else:
|
||||||
tick_gen = cycle((tick_fn,))
|
tick_gen = cycle((tick_fn,))
|
||||||
|
|
||||||
with freeze_time(datetime.now()) as time_travel:
|
with freeze_time(datetime.now(tz=UTC)) as time_travel:
|
||||||
tick_sec: int = next(tick_gen)
|
tick_sec: int = next(tick_gen)
|
||||||
while True:
|
while True:
|
||||||
# Simulate a scenario where the target job is waiting for being started
|
# Simulate a scenario where the target job is waiting for being started
|
||||||
@ -128,7 +128,7 @@ def to_iterable(tick_fn):
|
|||||||
def mock_logs(messages=None, result=None, exit_code=None):
|
def mock_logs(messages=None, result=None, exit_code=None):
|
||||||
if messages is None:
|
if messages is None:
|
||||||
messages = {}
|
messages = {}
|
||||||
with freeze_time(datetime.now()) as time_travel:
|
with freeze_time(datetime.now(tz=UTC)) as time_travel:
|
||||||
# Simulate a complete run given by message_fn
|
# Simulate a complete run given by message_fn
|
||||||
for msg, tick_list in section_aware_message_generator(messages, result, exit_code):
|
for msg, tick_list in section_aware_message_generator(messages, result, exit_code):
|
||||||
for tick_sec in tick_list:
|
for tick_sec in tick_list:
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
import os
|
import os
|
||||||
import xmlrpc.client
|
import xmlrpc.client
|
||||||
from contextlib import nullcontext as does_not_raise
|
from contextlib import nullcontext as does_not_raise
|
||||||
from datetime import datetime
|
from datetime import UTC, datetime
|
||||||
from itertools import cycle, islice, repeat
|
from itertools import cycle, islice, repeat
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Generator
|
from typing import Generator
|
||||||
@ -43,11 +43,14 @@ NUMBER_OF_MAX_ATTEMPTS = NUMBER_OF_RETRIES_TIMEOUT_DETECTION + 1
|
|||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def mock_proxy_waiting_time(mock_proxy):
|
def mock_proxy_waiting_time(mock_proxy):
|
||||||
def update_mock_proxy(frozen_time, **kwargs):
|
def update_mock_proxy(frozen_time, **kwargs):
|
||||||
|
def mock_job_state(jid) -> dict[str, str]:
|
||||||
|
frozen_time.tick(wait_time)
|
||||||
|
return {"job_state": "Running"}
|
||||||
|
|
||||||
wait_time = kwargs.pop("wait_time", 1)
|
wait_time = kwargs.pop("wait_time", 1)
|
||||||
proxy_mock = mock_proxy(**kwargs)
|
proxy_mock = mock_proxy(**kwargs)
|
||||||
proxy_job_state = proxy_mock.scheduler.job_state
|
proxy_job_state = proxy_mock.scheduler.job_state
|
||||||
proxy_job_state.return_value = {"job_state": "Running"}
|
proxy_job_state.side_effect = mock_job_state
|
||||||
proxy_job_state.side_effect = frozen_time.tick(wait_time)
|
|
||||||
|
|
||||||
return proxy_mock
|
return proxy_mock
|
||||||
|
|
||||||
@ -261,7 +264,7 @@ def test_simulate_a_long_wait_to_start_a_job(
|
|||||||
side_effect,
|
side_effect,
|
||||||
mock_proxy_waiting_time,
|
mock_proxy_waiting_time,
|
||||||
):
|
):
|
||||||
start_time = datetime.now()
|
start_time = datetime.now(tz=UTC)
|
||||||
job: LAVAJob = retriable_follow_job(
|
job: LAVAJob = retriable_follow_job(
|
||||||
mock_proxy_waiting_time(
|
mock_proxy_waiting_time(
|
||||||
frozen_time, side_effect=side_effect, wait_time=wait_time
|
frozen_time, side_effect=side_effect, wait_time=wait_time
|
||||||
@ -271,7 +274,7 @@ def test_simulate_a_long_wait_to_start_a_job(
|
|||||||
None
|
None
|
||||||
)
|
)
|
||||||
|
|
||||||
end_time = datetime.now()
|
end_time = datetime.now(tz=UTC)
|
||||||
delta_time = end_time - start_time
|
delta_time = end_time - start_time
|
||||||
|
|
||||||
assert job.status == "pass"
|
assert job.status == "pass"
|
||||||
@ -292,6 +295,7 @@ LONG_LAVA_QUEUE_SCENARIOS = {
|
|||||||
)
|
)
|
||||||
def test_wait_for_job_get_started_no_time_to_run(monkeypatch, job_timeout, expectation):
|
def test_wait_for_job_get_started_no_time_to_run(monkeypatch, job_timeout, expectation):
|
||||||
monkeypatch.setattr("lava.lava_job_submitter.CI_JOB_TIMEOUT_SEC", job_timeout)
|
monkeypatch.setattr("lava.lava_job_submitter.CI_JOB_TIMEOUT_SEC", job_timeout)
|
||||||
|
monkeypatch.setattr("lava.lava_job_submitter.CI_JOB_STARTED_AT", datetime.now(tz=UTC))
|
||||||
job = MagicMock()
|
job = MagicMock()
|
||||||
# Make it escape the loop
|
# Make it escape the loop
|
||||||
job.is_started.side_effect = (False, False, True)
|
job.is_started.side_effect = (False, False, True)
|
||||||
@ -439,14 +443,14 @@ def test_full_yaml_log(mock_proxy, frozen_time, lava_job_submitter):
|
|||||||
proxy.scheduler.jobs.submit = reset_logs
|
proxy.scheduler.jobs.submit = reset_logs
|
||||||
try:
|
try:
|
||||||
time_travel_to_test_time()
|
time_travel_to_test_time()
|
||||||
start_time = datetime.now()
|
start_time = datetime.now(tz=UTC)
|
||||||
retriable_follow_job(proxy, "", "", None)
|
retriable_follow_job(proxy, "", "", None)
|
||||||
finally:
|
finally:
|
||||||
try:
|
try:
|
||||||
# If the job fails, maybe there will be no structured log
|
# If the job fails, maybe there will be no structured log
|
||||||
print(lava_job_submitter.structured_log_file.read_text())
|
print(lava_job_submitter.structured_log_file.read_text())
|
||||||
finally:
|
finally:
|
||||||
end_time = datetime.now()
|
end_time = datetime.now(tz=UTC)
|
||||||
print("---- Reproduction log stats ----")
|
print("---- Reproduction log stats ----")
|
||||||
print(f"Start time: {start_time}")
|
print(f"Start time: {start_time}")
|
||||||
print(f"End time: {end_time}")
|
print(f"End time: {end_time}")
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
#
|
#
|
||||||
# SPDX-License-Identifier: MIT
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
from datetime import datetime, timedelta
|
from datetime import UTC, datetime, timedelta
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from lava.exceptions import MesaCIKnownIssueException, MesaCITimeoutError
|
from lava.exceptions import MesaCIKnownIssueException, MesaCITimeoutError
|
||||||
@ -76,35 +76,35 @@ def test_gitlab_section(method, collapsed, expectation):
|
|||||||
def test_gl_sections():
|
def test_gl_sections():
|
||||||
lines = [
|
lines = [
|
||||||
{
|
{
|
||||||
"dt": datetime.now(),
|
"dt": datetime.now(tz=UTC),
|
||||||
"lvl": "debug",
|
"lvl": "debug",
|
||||||
"msg": "Received signal: <STARTRUN> 0_setup-ssh-server 10145749_1.3.2.3.1",
|
"msg": "Received signal: <STARTRUN> 0_setup-ssh-server 10145749_1.3.2.3.1",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dt": datetime.now(),
|
"dt": datetime.now(tz=UTC),
|
||||||
"lvl": "debug",
|
"lvl": "debug",
|
||||||
"msg": "Received signal: <STARTRUN> 0_mesa 5971831_1.3.2.3.1",
|
"msg": "Received signal: <STARTRUN> 0_mesa 5971831_1.3.2.3.1",
|
||||||
},
|
},
|
||||||
# Redundant log message which triggers the same Gitlab Section, it
|
# Redundant log message which triggers the same Gitlab Section, it
|
||||||
# should be ignored, unless the id is different
|
# should be ignored, unless the id is different
|
||||||
{
|
{
|
||||||
"dt": datetime.now(),
|
"dt": datetime.now(tz=UTC),
|
||||||
"lvl": "target",
|
"lvl": "target",
|
||||||
"msg": "[ 7.778836] <LAVA_SIGNAL_STARTRUN 0_mesa 5971831_1.3.2.3.1>",
|
"msg": "[ 7.778836] <LAVA_SIGNAL_STARTRUN 0_mesa 5971831_1.3.2.3.1>",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dt": datetime.now(),
|
"dt": datetime.now(tz=UTC),
|
||||||
"lvl": "debug",
|
"lvl": "debug",
|
||||||
"msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
|
"msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
|
||||||
},
|
},
|
||||||
# Another redundant log message
|
# Another redundant log message
|
||||||
{
|
{
|
||||||
"dt": datetime.now(),
|
"dt": datetime.now(tz=UTC),
|
||||||
"lvl": "target",
|
"lvl": "target",
|
||||||
"msg": "[ 16.997829] <LAVA_SIGNAL_STARTTC mesa-ci_iris-kbl-traces>",
|
"msg": "[ 16.997829] <LAVA_SIGNAL_STARTTC mesa-ci_iris-kbl-traces>",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dt": datetime.now(),
|
"dt": datetime.now(tz=UTC),
|
||||||
"lvl": "target",
|
"lvl": "target",
|
||||||
"msg": "<LAVA_SIGNAL_ENDTC mesa-ci_iris-kbl-traces>",
|
"msg": "<LAVA_SIGNAL_ENDTC mesa-ci_iris-kbl-traces>",
|
||||||
},
|
},
|
||||||
@ -140,12 +140,12 @@ def test_gl_sections():
|
|||||||
def test_log_follower_flush():
|
def test_log_follower_flush():
|
||||||
lines = [
|
lines = [
|
||||||
{
|
{
|
||||||
"dt": datetime.now(),
|
"dt": datetime.now(tz=UTC),
|
||||||
"lvl": "debug",
|
"lvl": "debug",
|
||||||
"msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
|
"msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dt": datetime.now(),
|
"dt": datetime.now(tz=UTC),
|
||||||
"lvl": "target",
|
"lvl": "target",
|
||||||
"msg": "<LAVA_SIGNAL_ENDTC mesa-ci_iris-kbl-traces>",
|
"msg": "<LAVA_SIGNAL_ENDTC mesa-ci_iris-kbl-traces>",
|
||||||
},
|
},
|
||||||
@ -292,7 +292,7 @@ WATCHDOG_SCENARIOS = {
|
|||||||
def test_log_follower_watchdog(frozen_time, timedelta_kwargs, exception):
|
def test_log_follower_watchdog(frozen_time, timedelta_kwargs, exception):
|
||||||
lines = [
|
lines = [
|
||||||
{
|
{
|
||||||
"dt": datetime.now(),
|
"dt": datetime.now(tz=UTC),
|
||||||
"lvl": "debug",
|
"lvl": "debug",
|
||||||
"msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
|
"msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
|
||||||
},
|
},
|
||||||
@ -401,7 +401,7 @@ def test_detect_a6xx_gpu_recovery_failure(frozen_time):
|
|||||||
log_follower = LogFollower()
|
log_follower = LogFollower()
|
||||||
lava_log_hints = LAVALogHints(log_follower=log_follower)
|
lava_log_hints = LAVALogHints(log_follower=log_follower)
|
||||||
failure_message = {
|
failure_message = {
|
||||||
"dt": datetime.now().isoformat(),
|
"dt": datetime.now(tz=UTC).isoformat(),
|
||||||
"msg": A6XX_GPU_RECOVERY_FAILURE_MESSAGE[0],
|
"msg": A6XX_GPU_RECOVERY_FAILURE_MESSAGE[0],
|
||||||
"lvl": "feedback",
|
"lvl": "feedback",
|
||||||
}
|
}
|
||||||
@ -410,13 +410,13 @@ def test_detect_a6xx_gpu_recovery_failure(frozen_time):
|
|||||||
lava_log_hints.detect_a6xx_gpu_recovery_failure(failure_message)
|
lava_log_hints.detect_a6xx_gpu_recovery_failure(failure_message)
|
||||||
# Simulate the passage of time within the watch period
|
# Simulate the passage of time within the watch period
|
||||||
frozen_time.tick(1)
|
frozen_time.tick(1)
|
||||||
failure_message["dt"] = datetime.now().isoformat()
|
failure_message["dt"] = datetime.now(tz=UTC).isoformat()
|
||||||
|
|
||||||
def test_detect_a6xx_gpu_recovery_success(frozen_time):
|
def test_detect_a6xx_gpu_recovery_success(frozen_time):
|
||||||
log_follower = LogFollower()
|
log_follower = LogFollower()
|
||||||
lava_log_hints = LAVALogHints(log_follower=log_follower)
|
lava_log_hints = LAVALogHints(log_follower=log_follower)
|
||||||
failure_message = {
|
failure_message = {
|
||||||
"dt": datetime.now().isoformat(),
|
"dt": datetime.now(tz=UTC).isoformat(),
|
||||||
"msg": A6XX_GPU_RECOVERY_FAILURE_MESSAGE[0],
|
"msg": A6XX_GPU_RECOVERY_FAILURE_MESSAGE[0],
|
||||||
"lvl": "feedback",
|
"lvl": "feedback",
|
||||||
}
|
}
|
||||||
@ -424,12 +424,12 @@ def test_detect_a6xx_gpu_recovery_success(frozen_time):
|
|||||||
for _ in range(A6XX_GPU_RECOVERY_FAILURE_MAX_COUNT - 1):
|
for _ in range(A6XX_GPU_RECOVERY_FAILURE_MAX_COUNT - 1):
|
||||||
lava_log_hints.detect_a6xx_gpu_recovery_failure(failure_message)
|
lava_log_hints.detect_a6xx_gpu_recovery_failure(failure_message)
|
||||||
frozen_time.tick(1)
|
frozen_time.tick(1)
|
||||||
failure_message["dt"] = datetime.now().isoformat()
|
failure_message["dt"] = datetime.now(tz=UTC).isoformat()
|
||||||
|
|
||||||
# Simulate the passage of time outside of the watch period
|
# Simulate the passage of time outside of the watch period
|
||||||
frozen_time.tick(60 * A6XX_GPU_RECOVERY_WATCH_PERIOD_MIN + 1)
|
frozen_time.tick(60 * A6XX_GPU_RECOVERY_WATCH_PERIOD_MIN + 1)
|
||||||
failure_message = {
|
failure_message = {
|
||||||
"dt": datetime.now().isoformat(),
|
"dt": datetime.now(tz=UTC).isoformat(),
|
||||||
"msg": A6XX_GPU_RECOVERY_FAILURE_MESSAGE[1],
|
"msg": A6XX_GPU_RECOVERY_FAILURE_MESSAGE[1],
|
||||||
"lvl": "feedback",
|
"lvl": "feedback",
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
-r ../requirements.txt
|
-r ../requirements.txt
|
||||||
filelock==3.12.4
|
filelock==3.12.4
|
||||||
fire==0.5.0
|
fire==0.5.0
|
||||||
freezegun==1.1.0
|
freezegun==1.5.1
|
||||||
hypothesis==6.67.1
|
hypothesis==6.67.1
|
||||||
mock==5.1.0
|
mock==5.1.0
|
||||||
polars==0.19.3
|
polars==0.19.3
|
||||||
|
Loading…
Reference in New Issue
Block a user