2022-07-16 05:13:00 +08:00
|
|
|
#!/usr/bin/env python3
|
2023-03-23 02:23:47 +08:00
|
|
|
# For the dependencies, see the requirements.txt
|
2022-07-16 05:13:00 +08:00
|
|
|
|
2023-11-07 09:22:56 +08:00
|
|
|
import logging
|
2022-07-16 05:13:00 +08:00
|
|
|
import re
|
2023-11-07 09:49:48 +08:00
|
|
|
import traceback
|
2022-07-28 21:36:38 +08:00
|
|
|
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace
|
2023-10-28 11:45:03 +08:00
|
|
|
from collections import OrderedDict
|
2023-07-17 12:30:59 +08:00
|
|
|
from copy import deepcopy
|
2022-07-16 05:13:00 +08:00
|
|
|
from dataclasses import dataclass, field
|
2023-10-24 11:13:01 +08:00
|
|
|
from itertools import accumulate
|
2022-07-28 21:36:38 +08:00
|
|
|
from os import getenv
|
2022-07-16 05:13:00 +08:00
|
|
|
from pathlib import Path
|
2023-11-07 00:08:14 +08:00
|
|
|
from subprocess import check_output
|
2023-10-28 11:45:03 +08:00
|
|
|
from typing import Any, Iterable, Optional, Pattern, TypedDict, Union
|
2022-07-16 05:13:00 +08:00
|
|
|
|
2022-07-28 21:36:38 +08:00
|
|
|
import yaml
|
2022-08-03 05:06:34 +08:00
|
|
|
from filecache import DAY, filecache
|
2022-07-16 05:13:00 +08:00
|
|
|
from gql import Client, gql
|
2023-10-28 11:32:54 +08:00
|
|
|
from gql.transport.requests import RequestsHTTPTransport
|
2022-07-16 05:13:00 +08:00
|
|
|
from graphql import DocumentNode
|
|
|
|
|
2023-10-28 11:45:03 +08:00
|
|
|
|
|
|
|
class DagNode(TypedDict):
|
|
|
|
needs: set[str]
|
|
|
|
stage: str
|
|
|
|
# `name` is redundant but is here for retro-compatibility
|
|
|
|
name: str
|
|
|
|
|
|
|
|
|
|
|
|
# see create_job_needs_dag function for more details
|
|
|
|
Dag = dict[str, DagNode]
|
|
|
|
|
|
|
|
|
2023-10-24 11:33:31 +08:00
|
|
|
StageSeq = OrderedDict[str, set[str]]
|
2022-07-28 21:36:38 +08:00
|
|
|
TOKEN_DIR = Path(getenv("XDG_CONFIG_HOME") or Path.home() / ".config")
|
|
|
|
|
|
|
|
|
|
|
|
def get_token_from_default_dir() -> str:
|
2023-10-31 09:44:39 +08:00
|
|
|
token_file = TOKEN_DIR / "gitlab-token"
|
2022-07-28 21:36:38 +08:00
|
|
|
try:
|
2023-10-31 09:44:39 +08:00
|
|
|
return str(token_file.resolve())
|
2022-07-28 21:36:38 +08:00
|
|
|
except FileNotFoundError as ex:
|
|
|
|
print(
|
|
|
|
f"Could not find {token_file}, please provide a token file as an argument"
|
|
|
|
)
|
|
|
|
raise ex
|
|
|
|
|
|
|
|
|
|
|
|
def get_project_root_dir():
|
|
|
|
root_path = Path(__file__).parent.parent.parent.resolve()
|
|
|
|
gitlab_file = root_path / ".gitlab-ci.yml"
|
|
|
|
assert gitlab_file.exists()
|
|
|
|
|
|
|
|
return root_path
|
2022-07-16 05:13:00 +08:00
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class GitlabGQL:
|
|
|
|
_transport: Any = field(init=False)
|
|
|
|
client: Client = field(init=False)
|
|
|
|
url: str = "https://gitlab.freedesktop.org/api/graphql"
|
2022-07-28 21:36:38 +08:00
|
|
|
token: Optional[str] = None
|
2022-07-16 05:13:00 +08:00
|
|
|
|
2023-10-31 09:44:39 +08:00
|
|
|
def __post_init__(self) -> None:
|
2022-07-16 05:13:00 +08:00
|
|
|
self._setup_gitlab_gql_client()
|
|
|
|
|
2023-10-31 09:44:39 +08:00
|
|
|
def _setup_gitlab_gql_client(self) -> None:
|
2022-07-16 05:13:00 +08:00
|
|
|
# Select your transport with a defined url endpoint
|
2022-07-28 21:36:38 +08:00
|
|
|
headers = {}
|
|
|
|
if self.token:
|
|
|
|
headers["Authorization"] = f"Bearer {self.token}"
|
2023-10-28 11:32:54 +08:00
|
|
|
self._transport = RequestsHTTPTransport(url=self.url, headers=headers)
|
2022-07-16 05:13:00 +08:00
|
|
|
|
|
|
|
# Create a GraphQL client using the defined transport
|
2023-10-28 11:32:54 +08:00
|
|
|
self.client = Client(transport=self._transport, fetch_schema_from_transport=True)
|
2022-07-16 05:13:00 +08:00
|
|
|
|
2022-07-28 21:36:38 +08:00
|
|
|
def query(
|
2023-11-07 09:22:56 +08:00
|
|
|
self,
|
|
|
|
gql_file: Union[Path, str],
|
|
|
|
params: dict[str, Any] = {},
|
|
|
|
operation_name: Optional[str] = None,
|
2023-11-07 09:25:40 +08:00
|
|
|
paginated_key_loc: Iterable[str] = [],
|
2023-11-07 09:22:56 +08:00
|
|
|
disable_cache: bool = False,
|
|
|
|
) -> dict[str, Any]:
|
|
|
|
def run_uncached() -> dict[str, Any]:
|
2023-11-07 09:25:40 +08:00
|
|
|
if paginated_key_loc:
|
|
|
|
return self._sweep_pages(gql_file, params, operation_name, paginated_key_loc)
|
2023-11-07 09:22:56 +08:00
|
|
|
return self._query(gql_file, params, operation_name)
|
|
|
|
|
|
|
|
if disable_cache:
|
|
|
|
return run_uncached()
|
|
|
|
|
2023-11-07 09:49:48 +08:00
|
|
|
try:
|
2023-11-07 09:25:40 +08:00
|
|
|
# Create an auxiliary variable to deliver a cached result and enable catching exceptions
|
|
|
|
# Decorate the query to be cached
|
|
|
|
if paginated_key_loc:
|
|
|
|
result = self._sweep_pages_cached(
|
|
|
|
gql_file, params, operation_name, paginated_key_loc
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
result = self._query_cached(gql_file, params, operation_name)
|
|
|
|
return result # type: ignore
|
2023-11-07 09:49:48 +08:00
|
|
|
except Exception as ex:
|
|
|
|
logging.error(f"Cached query failed with {ex}")
|
|
|
|
# print exception traceback
|
|
|
|
traceback_str = "".join(traceback.format_exception(ex))
|
|
|
|
logging.error(traceback_str)
|
|
|
|
self.invalidate_query_cache()
|
|
|
|
logging.error("Cache invalidated, retrying without cache")
|
|
|
|
finally:
|
|
|
|
return run_uncached()
|
2023-11-07 09:22:56 +08:00
|
|
|
|
|
|
|
def _query(
|
|
|
|
self,
|
|
|
|
gql_file: Union[Path, str],
|
|
|
|
params: dict[str, Any] = {},
|
|
|
|
operation_name: Optional[str] = None,
|
2022-07-28 21:36:38 +08:00
|
|
|
) -> dict[str, Any]:
|
2022-07-16 05:13:00 +08:00
|
|
|
# Provide a GraphQL query
|
2023-10-31 09:44:39 +08:00
|
|
|
source_path: Path = Path(__file__).parent
|
|
|
|
pipeline_query_file: Path = source_path / gql_file
|
2022-07-16 05:13:00 +08:00
|
|
|
|
|
|
|
query: DocumentNode
|
|
|
|
with open(pipeline_query_file, "r") as f:
|
|
|
|
pipeline_query = f.read()
|
|
|
|
query = gql(pipeline_query)
|
|
|
|
|
|
|
|
# Execute the query on the transport
|
2023-11-07 09:22:56 +08:00
|
|
|
return self.client.execute_sync(
|
|
|
|
query, variable_values=params, operation_name=operation_name
|
|
|
|
)
|
2022-07-16 05:13:00 +08:00
|
|
|
|
2023-11-07 09:25:40 +08:00
|
|
|
@filecache(DAY)
|
|
|
|
def _sweep_pages_cached(self, *args, **kwargs):
|
|
|
|
return self._sweep_pages(*args, **kwargs)
|
|
|
|
|
2023-11-07 09:22:56 +08:00
|
|
|
@filecache(DAY)
|
|
|
|
def _query_cached(self, *args, **kwargs):
|
|
|
|
return self._query(*args, **kwargs)
|
|
|
|
|
2023-11-07 09:25:40 +08:00
|
|
|
def _sweep_pages(
|
|
|
|
self, query, params, operation_name=None, paginated_key_loc: Iterable[str] = []
|
|
|
|
) -> dict[str, Any]:
|
|
|
|
"""
|
|
|
|
Retrieve paginated data from a GraphQL API and concatenate the results into a single
|
|
|
|
response.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
query: represents a filepath with the GraphQL query to be executed.
|
|
|
|
params: a dictionary that contains the parameters to be passed to the query. These
|
|
|
|
parameters can be used to filter or modify the results of the query.
|
|
|
|
operation_name: The `operation_name` parameter is an optional parameter that specifies
|
|
|
|
the name of the GraphQL operation to be executed. It is used when making a GraphQL
|
|
|
|
query to specify which operation to execute if there are multiple operations defined
|
|
|
|
in the GraphQL schema. If not provided, the default operation will be executed.
|
|
|
|
paginated_key_loc (Iterable[str]): The `paginated_key_loc` parameter is an iterable of
|
|
|
|
strings that represents the location of the paginated field within the response. It
|
|
|
|
is used to extract the paginated field from the response and append it to the final
|
|
|
|
result. The node has to be a list of objects with a `pageInfo` field that contains
|
|
|
|
at least the `hasNextPage` and `endCursor` fields.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
a dictionary containing the response from the query with the paginated field
|
|
|
|
concatenated.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def fetch_page(cursor: str | None = None) -> dict[str, Any]:
|
|
|
|
if cursor:
|
|
|
|
params["cursor"] = cursor
|
|
|
|
logging.info(
|
|
|
|
f"Found more than 100 elements, paginating. "
|
|
|
|
f"Current cursor at {cursor}"
|
|
|
|
)
|
|
|
|
|
|
|
|
return self._query(query, params, operation_name)
|
|
|
|
|
|
|
|
# Execute the initial query
|
|
|
|
response: dict[str, Any] = fetch_page()
|
|
|
|
|
|
|
|
# Initialize an empty list to store the final result
|
|
|
|
final_partial_field: list[dict[str, Any]] = []
|
|
|
|
|
|
|
|
# Loop until all pages have been retrieved
|
|
|
|
while True:
|
|
|
|
# Get the partial field to be appended to the final result
|
|
|
|
partial_field = response
|
|
|
|
for key in paginated_key_loc:
|
|
|
|
partial_field = partial_field[key]
|
|
|
|
|
|
|
|
# Append the partial field to the final result
|
|
|
|
final_partial_field += partial_field["nodes"]
|
|
|
|
|
|
|
|
# Check if there are more pages to retrieve
|
|
|
|
page_info = partial_field["pageInfo"]
|
|
|
|
if not page_info["hasNextPage"]:
|
|
|
|
break
|
|
|
|
|
|
|
|
# Execute the query with the updated cursor parameter
|
|
|
|
response = fetch_page(page_info["endCursor"])
|
|
|
|
|
|
|
|
# Replace the "nodes" field in the original response with the final result
|
|
|
|
partial_field["nodes"] = final_partial_field
|
|
|
|
return response
|
|
|
|
|
2023-11-07 09:22:56 +08:00
|
|
|
def invalidate_query_cache(self) -> None:
|
|
|
|
logging.warning("Invalidating query cache")
|
|
|
|
try:
|
2023-11-07 09:25:40 +08:00
|
|
|
self._sweep_pages._db.clear()
|
2023-11-07 09:22:56 +08:00
|
|
|
self._query._db.clear()
|
|
|
|
except AttributeError as ex:
|
|
|
|
logging.warning(f"Could not invalidate cache, maybe it was not used in {ex.args}?")
|
2022-08-03 05:06:34 +08:00
|
|
|
|
2022-07-16 05:13:00 +08:00
|
|
|
|
2023-10-28 11:45:03 +08:00
|
|
|
def insert_early_stage_jobs(stage_sequence: StageSeq, jobs_metadata: Dag) -> Dag:
|
|
|
|
pre_processed_dag: dict[str, set[str]] = {}
|
2023-10-24 11:33:31 +08:00
|
|
|
jobs_from_early_stages = list(accumulate(stage_sequence.values(), set.union))
|
2023-10-28 11:45:03 +08:00
|
|
|
for job_name, metadata in jobs_metadata.items():
|
|
|
|
final_needs: set[str] = deepcopy(metadata["needs"])
|
2023-10-24 11:13:01 +08:00
|
|
|
# Pre-process jobs that are not based on needs field
|
|
|
|
# e.g. sanity job in mesa MR pipelines
|
|
|
|
if not final_needs:
|
2023-10-28 11:45:03 +08:00
|
|
|
job_stage: str = jobs_metadata[job_name]["stage"]
|
|
|
|
stage_index: int = list(stage_sequence.keys()).index(job_stage)
|
2023-10-24 11:33:31 +08:00
|
|
|
if stage_index > 0:
|
|
|
|
final_needs |= jobs_from_early_stages[stage_index - 1]
|
|
|
|
pre_processed_dag[job_name] = final_needs
|
2023-10-24 11:13:01 +08:00
|
|
|
|
2023-10-28 11:45:03 +08:00
|
|
|
for job_name, needs in pre_processed_dag.items():
|
|
|
|
jobs_metadata[job_name]["needs"] = needs
|
2023-10-24 11:13:01 +08:00
|
|
|
|
2023-10-28 11:45:03 +08:00
|
|
|
return jobs_metadata
|
2023-10-24 11:33:31 +08:00
|
|
|
|
2023-10-28 11:45:03 +08:00
|
|
|
|
|
|
|
def traverse_dag_needs(jobs_metadata: Dag) -> None:
|
|
|
|
created_jobs = set(jobs_metadata.keys())
|
|
|
|
for job, metadata in jobs_metadata.items():
|
|
|
|
final_needs: set = deepcopy(metadata["needs"]) & created_jobs
|
2023-10-24 11:13:01 +08:00
|
|
|
# Post process jobs that are based on needs field
|
2022-07-16 05:13:00 +08:00
|
|
|
partial = True
|
|
|
|
|
|
|
|
while partial:
|
2023-10-28 11:45:03 +08:00
|
|
|
next_depth: set[str] = {n for dn in final_needs for n in jobs_metadata[dn]["needs"]}
|
|
|
|
partial: bool = not final_needs.issuperset(next_depth)
|
2023-07-17 12:30:59 +08:00
|
|
|
final_needs = final_needs.union(next_depth)
|
2022-07-16 05:13:00 +08:00
|
|
|
|
2023-10-28 11:45:03 +08:00
|
|
|
jobs_metadata[job]["needs"] = final_needs
|
2023-10-24 11:33:31 +08:00
|
|
|
|
|
|
|
|
2023-10-28 11:45:03 +08:00
|
|
|
def extract_stages_and_job_needs(
|
|
|
|
pipeline_jobs: dict[str, Any], pipeline_stages: dict[str, Any]
|
|
|
|
) -> tuple[StageSeq, Dag]:
|
|
|
|
jobs_metadata = Dag()
|
2023-10-24 11:33:31 +08:00
|
|
|
# Record the stage sequence to post process deps that are not based on needs
|
|
|
|
# field, for example: sanity job
|
|
|
|
stage_sequence: OrderedDict[str, set[str]] = OrderedDict()
|
2023-10-28 11:45:03 +08:00
|
|
|
for stage in pipeline_stages["nodes"]:
|
|
|
|
stage_sequence[stage["name"]] = set()
|
|
|
|
|
|
|
|
for job in pipeline_jobs["nodes"]:
|
|
|
|
stage_sequence[job["stage"]["name"]].add(job["name"])
|
|
|
|
dag_job: DagNode = {
|
|
|
|
"name": job["name"],
|
|
|
|
"stage": job["stage"]["name"],
|
|
|
|
"needs": set([j["node"]["name"] for j in job["needs"]["edges"]]),
|
|
|
|
}
|
|
|
|
jobs_metadata[job["name"]] = dag_job
|
|
|
|
|
|
|
|
return stage_sequence, jobs_metadata
|
|
|
|
|
|
|
|
|
|
|
|
def create_job_needs_dag(gl_gql: GitlabGQL, params, disable_cache: bool = True) -> Dag:
|
2023-10-24 11:33:31 +08:00
|
|
|
"""
|
2023-10-28 11:45:03 +08:00
|
|
|
This function creates a Directed Acyclic Graph (DAG) to represent a sequence of jobs, where each
|
|
|
|
job has a set of jobs that it depends on (its "needs") and belongs to a certain "stage".
|
|
|
|
The "name" of the job is used as the key in the dictionary.
|
|
|
|
|
|
|
|
For example, consider the following DAG:
|
|
|
|
|
|
|
|
1. build stage: job1 -> job2 -> job3
|
|
|
|
2. test stage: job2 -> job4
|
|
|
|
|
|
|
|
- The job needs for job3 are: job1, job2
|
|
|
|
- The job needs for job4 are: job2
|
|
|
|
- The job2 needs to wait all jobs from build stage to finish.
|
|
|
|
|
|
|
|
The resulting DAG would look like this:
|
|
|
|
|
|
|
|
dag = {
|
|
|
|
"job1": {"needs": set(), "stage": "build", "name": "job1"},
|
|
|
|
"job2": {"needs": {"job1", "job2", job3"}, "stage": "test", "name": "job2"},
|
|
|
|
"job3": {"needs": {"job1", "job2"}, "stage": "build", "name": "job3"},
|
|
|
|
"job4": {"needs": {"job2"}, "stage": "test", "name": "job4"},
|
|
|
|
}
|
|
|
|
|
|
|
|
To access the job needs, one can do:
|
|
|
|
|
|
|
|
dag["job3"]["needs"]
|
|
|
|
|
|
|
|
This will return the set of jobs that job3 needs: {"job1", "job2"}
|
2023-10-24 11:33:31 +08:00
|
|
|
|
|
|
|
Args:
|
|
|
|
gl_gql (GitlabGQL): The `gl_gql` parameter is an instance of the `GitlabGQL` class, which is
|
|
|
|
used to make GraphQL queries to the GitLab API.
|
2023-10-28 11:45:03 +08:00
|
|
|
params (dict): The `params` parameter is a dictionary that contains the necessary parameters
|
|
|
|
for the GraphQL query. It is used to specify the details of the pipeline for which the
|
|
|
|
job needs DAG is being created.
|
2023-10-24 11:33:31 +08:00
|
|
|
The specific keys and values in the `params` dictionary will depend on
|
|
|
|
the requirements of the GraphQL query being executed
|
2023-10-28 11:45:03 +08:00
|
|
|
disable_cache (bool): The `disable_cache` parameter is a boolean that specifies whether the
|
2023-10-24 11:33:31 +08:00
|
|
|
|
|
|
|
Returns:
|
2023-10-28 11:45:03 +08:00
|
|
|
The final DAG (Directed Acyclic Graph) representing the job dependencies sourced from needs
|
|
|
|
or stages rule.
|
2023-10-24 11:33:31 +08:00
|
|
|
"""
|
2023-10-28 11:45:03 +08:00
|
|
|
stages_jobs_gql = gl_gql.query(
|
|
|
|
"pipeline_details.gql",
|
|
|
|
params=params,
|
|
|
|
paginated_key_loc=["project", "pipeline", "jobs"],
|
|
|
|
disable_cache=disable_cache,
|
|
|
|
)
|
|
|
|
pipeline_data = stages_jobs_gql["project"]["pipeline"]
|
|
|
|
if not pipeline_data:
|
2023-10-24 11:33:31 +08:00
|
|
|
raise RuntimeError(f"Could not find any pipelines for {params}")
|
|
|
|
|
2023-10-28 11:45:03 +08:00
|
|
|
stage_sequence, jobs_metadata = extract_stages_and_job_needs(
|
|
|
|
pipeline_data["jobs"], pipeline_data["stages"]
|
|
|
|
)
|
2023-10-24 11:33:31 +08:00
|
|
|
# Fill the DAG with the job needs from stages that don't have any needs but still need to wait
|
|
|
|
# for previous stages
|
2023-10-28 11:45:03 +08:00
|
|
|
final_dag = insert_early_stage_jobs(stage_sequence, jobs_metadata)
|
2023-10-24 11:33:31 +08:00
|
|
|
# Now that each job has its direct needs filled correctly, update the "needs" field for each job
|
|
|
|
# in the DAG by performing a topological traversal
|
|
|
|
traverse_dag_needs(final_dag)
|
2022-07-16 05:13:00 +08:00
|
|
|
|
2023-10-28 11:45:03 +08:00
|
|
|
return final_dag
|
2022-07-16 05:13:00 +08:00
|
|
|
|
|
|
|
|
|
|
|
def filter_dag(dag: Dag, regex: Pattern) -> Dag:
|
2023-10-28 11:45:03 +08:00
|
|
|
jobs_with_regex: set[str] = {job for job in dag if regex.match(job)}
|
|
|
|
return Dag({job: data for job, data in dag.items() if job in sorted(jobs_with_regex)})
|
2022-07-16 05:13:00 +08:00
|
|
|
|
|
|
|
|
|
|
|
def print_dag(dag: Dag) -> None:
|
2023-10-28 11:45:03 +08:00
|
|
|
for job, data in dag.items():
|
2022-07-16 05:13:00 +08:00
|
|
|
print(f"{job}:")
|
2023-10-28 11:45:03 +08:00
|
|
|
print(f"\t{' '.join(data['needs'])}")
|
2022-07-16 05:13:00 +08:00
|
|
|
print()
|
|
|
|
|
|
|
|
|
2023-10-31 09:44:39 +08:00
|
|
|
def fetch_merged_yaml(gl_gql: GitlabGQL, params) -> dict[str, Any]:
|
2022-07-28 21:36:38 +08:00
|
|
|
gitlab_yml_file = get_project_root_dir() / ".gitlab-ci.yml"
|
2022-08-16 04:07:21 +08:00
|
|
|
content = Path(gitlab_yml_file).read_text().strip()
|
2022-07-28 21:36:38 +08:00
|
|
|
params["content"] = content
|
|
|
|
raw_response = gl_gql.query("job_details.gql", params)
|
2022-08-03 05:06:34 +08:00
|
|
|
if merged_yaml := raw_response["ciConfig"]["mergedYaml"]:
|
|
|
|
return yaml.safe_load(merged_yaml)
|
|
|
|
|
|
|
|
gl_gql.invalidate_query_cache()
|
|
|
|
raise ValueError(
|
|
|
|
"""
|
2022-07-28 21:36:38 +08:00
|
|
|
Could not fetch any content for merged YAML,
|
|
|
|
please verify if the git SHA exists in remote.
|
2022-08-03 05:06:34 +08:00
|
|
|
Maybe you forgot to `git push`? """
|
|
|
|
)
|
2022-07-28 21:36:38 +08:00
|
|
|
|
|
|
|
|
|
|
|
def recursive_fill(job, relationship_field, target_data, acc_data: dict, merged_yaml):
|
|
|
|
if relatives := job.get(relationship_field):
|
|
|
|
if isinstance(relatives, str):
|
|
|
|
relatives = [relatives]
|
|
|
|
|
|
|
|
for relative in relatives:
|
|
|
|
parent_job = merged_yaml[relative]
|
2023-10-31 09:44:39 +08:00
|
|
|
acc_data = recursive_fill(parent_job, acc_data, merged_yaml) # type: ignore
|
2022-07-28 21:36:38 +08:00
|
|
|
|
|
|
|
acc_data |= job.get(target_data, {})
|
|
|
|
|
|
|
|
return acc_data
|
|
|
|
|
|
|
|
|
|
|
|
def get_variables(job, merged_yaml, project_path, sha) -> dict[str, str]:
|
|
|
|
p = get_project_root_dir() / ".gitlab-ci" / "image-tags.yml"
|
|
|
|
image_tags = yaml.safe_load(p.read_text())
|
|
|
|
|
|
|
|
variables = image_tags["variables"]
|
|
|
|
variables |= merged_yaml["variables"]
|
|
|
|
variables |= job["variables"]
|
|
|
|
variables["CI_PROJECT_PATH"] = project_path
|
|
|
|
variables["CI_PROJECT_NAME"] = project_path.split("/")[1]
|
|
|
|
variables["CI_REGISTRY_IMAGE"] = "registry.freedesktop.org/${CI_PROJECT_PATH}"
|
|
|
|
variables["CI_COMMIT_SHA"] = sha
|
|
|
|
|
|
|
|
while recurse_among_variables_space(variables):
|
|
|
|
pass
|
|
|
|
|
|
|
|
return variables
|
|
|
|
|
|
|
|
|
|
|
|
# Based on: https://stackoverflow.com/a/2158532/1079223
|
|
|
|
def flatten(xs):
|
|
|
|
for x in xs:
|
|
|
|
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
|
|
|
|
yield from flatten(x)
|
|
|
|
else:
|
|
|
|
yield x
|
|
|
|
|
|
|
|
|
|
|
|
def get_full_script(job) -> list[str]:
|
|
|
|
script = []
|
|
|
|
for script_part in ("before_script", "script", "after_script"):
|
|
|
|
script.append(f"# {script_part}")
|
|
|
|
lines = flatten(job.get(script_part, []))
|
|
|
|
script.extend(lines)
|
|
|
|
script.append("")
|
|
|
|
|
|
|
|
return script
|
|
|
|
|
|
|
|
|
|
|
|
def recurse_among_variables_space(var_graph) -> bool:
|
|
|
|
updated = False
|
|
|
|
for var, value in var_graph.items():
|
|
|
|
value = str(value)
|
|
|
|
dep_vars = []
|
|
|
|
if match := re.findall(r"(\$[{]?[\w\d_]*[}]?)", value):
|
|
|
|
all_dep_vars = [v.lstrip("${").rstrip("}") for v in match]
|
|
|
|
# print(value, match, all_dep_vars)
|
|
|
|
dep_vars = [v for v in all_dep_vars if v in var_graph]
|
|
|
|
|
|
|
|
for dep_var in dep_vars:
|
|
|
|
dep_value = str(var_graph[dep_var])
|
|
|
|
new_value = var_graph[var]
|
|
|
|
new_value = new_value.replace(f"${{{dep_var}}}", dep_value)
|
|
|
|
new_value = new_value.replace(f"${dep_var}", dep_value)
|
|
|
|
var_graph[var] = new_value
|
|
|
|
updated |= dep_value != new_value
|
|
|
|
|
|
|
|
return updated
|
|
|
|
|
|
|
|
|
2023-03-29 22:19:04 +08:00
|
|
|
def get_job_final_definition(job_name, merged_yaml, project_path, sha):
|
2022-07-28 21:36:38 +08:00
|
|
|
job = merged_yaml[job_name]
|
|
|
|
variables = get_variables(job, merged_yaml, project_path, sha)
|
|
|
|
|
|
|
|
print("# --------- variables ---------------")
|
|
|
|
for var, value in sorted(variables.items()):
|
|
|
|
print(f"export {var}={value!r}")
|
|
|
|
|
|
|
|
# TODO: Recurse into needs to get full script
|
|
|
|
# TODO: maybe create a extra yaml file to avoid too much rework
|
|
|
|
script = get_full_script(job)
|
|
|
|
print()
|
|
|
|
print()
|
|
|
|
print("# --------- full script ---------------")
|
|
|
|
print("\n".join(script))
|
|
|
|
|
|
|
|
if image := variables.get("MESA_IMAGE"):
|
|
|
|
print()
|
|
|
|
print()
|
|
|
|
print("# --------- container image ---------------")
|
|
|
|
print(image)
|
|
|
|
|
|
|
|
|
2023-10-28 11:49:12 +08:00
|
|
|
def from_sha_to_pipeline_iid(gl_gql: GitlabGQL, params) -> str:
|
|
|
|
result = gl_gql.query("pipeline_utils.gql", params)
|
|
|
|
|
|
|
|
return result["project"]["pipelines"]["nodes"][0]["iid"]
|
|
|
|
|
|
|
|
|
2022-07-16 05:13:00 +08:00
|
|
|
def parse_args() -> Namespace:
|
2022-07-28 21:36:38 +08:00
|
|
|
parser = ArgumentParser(
|
|
|
|
formatter_class=ArgumentDefaultsHelpFormatter,
|
|
|
|
description="CLI and library with utility functions to debug jobs via Gitlab GraphQL",
|
|
|
|
epilog=f"""Example:
|
2023-11-07 00:08:14 +08:00
|
|
|
{Path(__file__).name} --rev HEAD --print-job-dag""",
|
2022-07-28 21:36:38 +08:00
|
|
|
)
|
2022-07-16 05:13:00 +08:00
|
|
|
parser.add_argument("-pp", "--project-path", type=str, default="mesa/mesa")
|
2022-07-28 21:36:38 +08:00
|
|
|
parser.add_argument("--sha", "--rev", type=str, required=True)
|
|
|
|
parser.add_argument(
|
|
|
|
"--regex",
|
|
|
|
type=str,
|
|
|
|
required=False,
|
|
|
|
help="Regex pattern for the job name to be considered",
|
|
|
|
)
|
|
|
|
parser.add_argument("--print-dag", action="store_true", help="Print job needs DAG")
|
|
|
|
parser.add_argument(
|
|
|
|
"--print-merged-yaml",
|
|
|
|
action="store_true",
|
|
|
|
help="Print the resulting YAML for the specific SHA",
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"--print-job-manifest", type=str, help="Print the resulting job data"
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"--gitlab-token-file",
|
|
|
|
type=str,
|
|
|
|
default=get_token_from_default_dir(),
|
|
|
|
help="force GitLab token, otherwise it's read from $XDG_CONFIG_HOME/gitlab-token",
|
|
|
|
)
|
|
|
|
|
|
|
|
args = parser.parse_args()
|
2023-11-07 00:38:34 +08:00
|
|
|
args.gitlab_token = Path(args.gitlab_token_file).read_text().strip()
|
2022-07-28 21:36:38 +08:00
|
|
|
return args
|
2022-07-16 05:13:00 +08:00
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
args = parse_args()
|
2022-07-28 21:36:38 +08:00
|
|
|
gl_gql = GitlabGQL(token=args.gitlab_token)
|
2023-11-07 00:08:14 +08:00
|
|
|
|
|
|
|
sha = check_output(['git', 'rev-parse', args.sha]).decode('ascii').strip()
|
|
|
|
|
|
|
|
args.iid = from_sha_to_pipeline_iid(gl_gql, {"projectPath": args.project_path, "sha": sha})
|
2022-07-16 05:13:00 +08:00
|
|
|
|
|
|
|
if args.print_dag:
|
2023-10-28 11:49:12 +08:00
|
|
|
dag = create_job_needs_dag(
|
|
|
|
gl_gql, {"projectPath": args.project_path, "iid": args.iid}, disable_cache=True
|
2022-07-16 05:13:00 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
if args.regex:
|
|
|
|
dag = filter_dag(dag, re.compile(args.regex))
|
|
|
|
print_dag(dag)
|
|
|
|
|
2022-07-28 21:36:38 +08:00
|
|
|
if args.print_merged_yaml:
|
|
|
|
print(
|
|
|
|
fetch_merged_yaml(
|
2023-11-07 00:08:14 +08:00
|
|
|
gl_gql, {"projectPath": args.project_path, "sha": sha}
|
2022-07-28 21:36:38 +08:00
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
if args.print_job_manifest:
|
|
|
|
merged_yaml = fetch_merged_yaml(
|
2023-11-07 00:08:14 +08:00
|
|
|
gl_gql, {"projectPath": args.project_path, "sha": sha}
|
2022-07-28 21:36:38 +08:00
|
|
|
)
|
2023-03-29 22:19:04 +08:00
|
|
|
get_job_final_definition(
|
2023-11-07 00:08:14 +08:00
|
|
|
args.print_job_manifest, merged_yaml, args.project_path, sha
|
2022-07-28 21:36:38 +08:00
|
|
|
)
|
|
|
|
|
2022-07-16 05:13:00 +08:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
main()
|