2023-11-14 23:53:20 +08:00
|
|
|
# Types of CI pipelines:
|
|
|
|
# | pipeline name | context | description |
|
|
|
|
# |----------------------|-----------|-------------------------------------------------------------|
|
|
|
|
# | merge pipeline | mesa/mesa | pipeline running for an MR; if it passes the MR gets merged |
|
|
|
|
# | pre-merge pipeline | mesa/mesa | same as above, except its status doesn't affect the MR |
|
|
|
|
# | post-merge pipeline | mesa/mesa | pipeline immediately after merging |
|
|
|
|
# | fork pipeline | fork | pipeline running in a user fork |
|
|
|
|
# | scheduled pipeline | mesa/mesa | nightly pipelines, running every morning at 4am UTC |
|
2023-11-14 21:07:17 +08:00
|
|
|
# | direct-push pipeline | mesa/mesa | when commits are pushed directly to mesa/mesa, bypassing Marge and its gating pipeline |
|
|
|
|
#
|
|
|
|
# Note that the release branches maintained by the release manager fall under
|
|
|
|
# the "direct push" category.
|
2023-11-14 23:53:20 +08:00
|
|
|
#
|
|
|
|
# "context" indicates the permissions that the jobs get; notably, any
|
|
|
|
# container created in mesa/mesa gets pushed immediately for everyone to use
|
|
|
|
# as soon as the image tag change is merged.
|
|
|
|
#
|
|
|
|
# Merge pipelines contain all jobs that must pass before the MR can be merged.
|
|
|
|
# Pre-merge pipelines contain the exact same jobs as merge pipelines.
|
|
|
|
# Post-merge pipelines contain *only* the `pages` job that deploys the new
|
|
|
|
# version of the website.
|
|
|
|
# Fork pipelines contain everything.
|
|
|
|
# Scheduled pipelines only contain the container+build jobs, and some extra
|
|
|
|
# test jobs (typically "full" variants of pre-merge jobs that only run 1/X
|
|
|
|
# test cases), but not a repeat of the merge pipeline jobs.
|
2023-11-14 21:07:17 +08:00
|
|
|
# Direct-push pipelines contain the same jobs as merge pipelines.
|
2023-11-14 23:53:20 +08:00
|
|
|
|
2023-02-27 10:05:59 +08:00
|
|
|
workflow:
|
|
|
|
rules:
|
2023-08-17 00:09:17 +08:00
|
|
|
# do not duplicate pipelines on merge pipelines
|
2023-09-09 02:15:15 +08:00
|
|
|
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS && $CI_PIPELINE_SOURCE == "push"
|
2023-08-17 00:09:17 +08:00
|
|
|
when: never
|
2023-07-04 00:37:46 +08:00
|
|
|
# merge pipeline
|
2023-11-15 00:04:02 +08:00
|
|
|
- if: &is-merge-attempt $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"
|
2023-02-27 10:05:59 +08:00
|
|
|
variables:
|
2024-04-24 20:24:30 +08:00
|
|
|
KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${KERNEL_TAG}
|
2023-02-27 10:05:59 +08:00
|
|
|
MESA_CI_PERFORMANCE_ENABLED: 1
|
2023-07-04 00:30:38 +08:00
|
|
|
VALVE_INFRA_VANGOGH_JOB_PRIORITY: "" # Empty tags are ignored by gitlab
|
2023-07-04 00:37:46 +08:00
|
|
|
# post-merge pipeline
|
2023-11-01 21:03:49 +08:00
|
|
|
- if: &is-post-merge $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "push"
|
2023-11-16 03:27:37 +08:00
|
|
|
# nightly pipeline
|
|
|
|
- if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
|
|
|
|
variables:
|
2024-04-24 20:24:30 +08:00
|
|
|
KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${KERNEL_TAG}
|
2023-11-16 03:27:37 +08:00
|
|
|
JOB_PRIORITY: 50
|
|
|
|
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
|
2023-11-14 21:07:17 +08:00
|
|
|
# pipeline for direct pushes that bypassed the CI
|
|
|
|
- if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
|
|
|
|
variables:
|
2024-04-24 20:24:30 +08:00
|
|
|
KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${KERNEL_TAG}
|
2023-11-14 21:07:17 +08:00
|
|
|
JOB_PRIORITY: 40
|
|
|
|
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
|
2023-12-01 17:51:24 +08:00
|
|
|
# pre-merge or fork pipeline
|
|
|
|
- if: $FORCE_KERNEL_TAG != null
|
2023-03-09 19:36:25 +08:00
|
|
|
variables:
|
2024-04-24 20:24:30 +08:00
|
|
|
KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${FORCE_KERNEL_TAG}
|
2023-06-10 11:16:56 +08:00
|
|
|
JOB_PRIORITY: 50
|
|
|
|
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
|
2023-12-01 17:51:24 +08:00
|
|
|
- if: $FORCE_KERNEL_TAG == null
|
2023-06-10 11:16:56 +08:00
|
|
|
variables:
|
2024-04-24 20:24:30 +08:00
|
|
|
KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${KERNEL_TAG}
|
2023-08-14 17:34:52 +08:00
|
|
|
JOB_PRIORITY: 50
|
2023-07-04 00:30:38 +08:00
|
|
|
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
|
2023-02-27 10:05:59 +08:00
|
|
|
|
2023-06-10 11:16:56 +08:00
|
|
|
|
2019-01-20 19:21:45 +08:00
|
|
|
variables:
|
2020-03-07 05:23:20 +08:00
|
|
|
FDO_UPSTREAM_REPO: mesa/mesa
|
2022-09-07 19:38:36 +08:00
|
|
|
MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb
|
CI: reduce bandwidth for git pull
Over the last 7 days, git pulls represented a total of 1.7 TB.
On those 1.7 TB, we can see:
- ~300 GB for the CI farm on hetzner
- ~730 GB for the CI farm on packet.net
- ~680 GB for the rest of the world
We can not really change the rest of the world*, but we can
certainly reduce the egress costs towards our CI farms.
Right now, the gitlab runners are not doing a good job at
caching the git trees for the various jobs we make, and
we end up with a lot of cache-misses. A typical pipeline
ends up with a good 2.8GB of git pull data. (a compressed
archive of the mesa folder accounts for 280MB)
In this patch, we implemented what was suggested in
https://gitlab.com/gitlab-org/gitlab/-/issues/215591#note_334642576
- we host a brand new MinIO server on packet
- jobs can upload files on 2 locations:
* git-cache/<namespace>/<project>/<branch-name>.tar.gz
* artifacts/<namespace>/<project>/<pipeline-id>/
- the authorization is handled by gitlab with short tokens
valid only for the time of the job is running
- whenever a job runs, the runner are configured to execute
(eval) $CI_PRE_CLONE_SCRIPT
- this variable is set globally to download the current cache
from the MinIO packet server, unpack it and replace the
possibly out of date cache found on the runner
- then git fetch is run by the runner, and only the delta
between the upstream tree and the local tree gets pulled.
We can rebuild the git cache in a schedule job (once a day
seems sufficient), and then we can stop the cache miss
entirely.
First results showed that instead of pulling 280MB of data
in my fork, I got a pull of only 250KB. That should help us.
* arguably, there are other farms in the rest of the world, so
hopefully we can change those too.
Reviewed-by: Michel Dänzer <mdaenzer@redhat.com>
Reviewed-by: Peter Hutterer <peter.hutterer@who-t.net>
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5428>
2020-06-11 23:16:28 +08:00
|
|
|
CI_PRE_CLONE_SCRIPT: |-
|
|
|
|
set -o xtrace
|
2021-12-03 15:07:47 +08:00
|
|
|
wget -q -O download-git-cache.sh ${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/download-git-cache.sh
|
|
|
|
bash download-git-cache.sh
|
|
|
|
rm download-git-cache.sh
|
CI: reduce bandwidth for git pull
Over the last 7 days, git pulls represented a total of 1.7 TB.
On those 1.7 TB, we can see:
- ~300 GB for the CI farm on hetzner
- ~730 GB for the CI farm on packet.net
- ~680 GB for the rest of the world
We can not really change the rest of the world*, but we can
certainly reduce the egress costs towards our CI farms.
Right now, the gitlab runners are not doing a good job at
caching the git trees for the various jobs we make, and
we end up with a lot of cache-misses. A typical pipeline
ends up with a good 2.8GB of git pull data. (a compressed
archive of the mesa folder accounts for 280MB)
In this patch, we implemented what was suggested in
https://gitlab.com/gitlab-org/gitlab/-/issues/215591#note_334642576
- we host a brand new MinIO server on packet
- jobs can upload files on 2 locations:
* git-cache/<namespace>/<project>/<branch-name>.tar.gz
* artifacts/<namespace>/<project>/<pipeline-id>/
- the authorization is handled by gitlab with short tokens
valid only for the time of the job is running
- whenever a job runs, the runner are configured to execute
(eval) $CI_PRE_CLONE_SCRIPT
- this variable is set globally to download the current cache
from the MinIO packet server, unpack it and replace the
possibly out of date cache found on the runner
- then git fetch is run by the runner, and only the delta
between the upstream tree and the local tree gets pulled.
We can rebuild the git cache in a schedule job (once a day
seems sufficient), and then we can stop the cache miss
entirely.
First results showed that instead of pulling 280MB of data
in my fork, I got a pull of only 250KB. That should help us.
* arguably, there are other farms in the rest of the world, so
hopefully we can change those too.
Reviewed-by: Michel Dänzer <mdaenzer@redhat.com>
Reviewed-by: Peter Hutterer <peter.hutterer@who-t.net>
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5428>
2020-06-11 23:16:28 +08:00
|
|
|
set +o xtrace
|
2024-04-25 03:40:18 +08:00
|
|
|
S3_JWT_FILE: /s3_jwt
|
2023-06-08 23:38:22 +08:00
|
|
|
S3_HOST: s3.freedesktop.org
|
2024-04-24 20:24:30 +08:00
|
|
|
# This bucket is used to fetch the kernel image
|
|
|
|
S3_KERNEL_BUCKET: mesa-rootfs
|
2024-04-24 22:01:29 +08:00
|
|
|
# Bucket for git cache
|
|
|
|
S3_GITCACHE_BUCKET: git-cache
|
|
|
|
# Bucket for the pipeline artifacts pushed to S3
|
|
|
|
S3_ARTIFACTS_BUCKET: artifacts
|
|
|
|
# Buckets for traces
|
|
|
|
S3_TRACIE_RESULTS_BUCKET: mesa-tracie-results
|
|
|
|
S3_TRACIE_PUBLIC_BUCKET: mesa-tracie-public
|
|
|
|
S3_TRACIE_PRIVATE_BUCKET: mesa-tracie-private
|
2021-06-10 23:24:48 +08:00
|
|
|
# per-pipeline artifact storage on MinIO
|
2024-04-24 22:01:29 +08:00
|
|
|
PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/${S3_ARTIFACTS_BUCKET}/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
|
2021-06-10 23:29:39 +08:00
|
|
|
# per-job artifact storage on MinIO
|
|
|
|
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
|
2021-06-12 17:19:36 +08:00
|
|
|
# reference images stored for traces
|
2024-04-24 22:01:29 +08:00
|
|
|
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE: "${S3_HOST}/${S3_TRACIE_RESULTS_BUCKET}/$FDO_UPSTREAM_REPO"
|
2023-06-15 16:49:40 +08:00
|
|
|
# For individual CI farm status see .ci-farms folder
|
|
|
|
# Disable farm with `git mv .ci-farms{,-disabled}/$farm_name`
|
|
|
|
# Re-enable farm with `git mv .ci-farms{-disabled,}/$farm_name`
|
|
|
|
# NEVER MIX FARM MAINTENANCE WITH ANY OTHER CHANGE IN THE SAME MERGE REQUEST!
|
2023-12-14 23:23:08 +08:00
|
|
|
ARTIFACTS_BASE_URL: https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts
|
2023-12-15 20:00:04 +08:00
|
|
|
# Python scripts for structured logger
|
|
|
|
PYTHONPATH: "$PYTHONPATH:$CI_PROJECT_DIR/install"
|
2024-03-21 21:11:26 +08:00
|
|
|
# Drop once deqp-runner is upgraded to > 0.18.0
|
|
|
|
MESA_VK_ABORT_ON_DEVICE_LOSS: 1
|
2024-04-01 23:19:37 +08:00
|
|
|
# Avoid the wall of "Unsupported SPIR-V capability" warnings in CI job log, hiding away useful output
|
|
|
|
MESA_SPIRV_LOG_LEVEL: error
|
2019-04-02 15:24:00 +08:00
|
|
|
|
2021-12-02 21:13:10 +08:00
|
|
|
default:
|
2024-04-25 03:41:07 +08:00
|
|
|
id_tokens:
|
|
|
|
S3_JWT:
|
|
|
|
aud: https://s3.freedesktop.org
|
2021-12-02 21:13:10 +08:00
|
|
|
before_script:
|
2022-12-12 00:46:41 +08:00
|
|
|
- >
|
|
|
|
export SCRIPTS_DIR=$(mktemp -d) &&
|
|
|
|
curl -L -s --retry 4 -f --retry-all-errors --retry-delay 60 -O --output-dir "${SCRIPTS_DIR}" "${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/setup-test-env.sh" &&
|
|
|
|
. ${SCRIPTS_DIR}/setup-test-env.sh &&
|
2024-04-25 03:40:18 +08:00
|
|
|
echo -n "${S3_JWT}" > "${S3_JWT_FILE}" &&
|
|
|
|
unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables
|
2021-12-02 21:13:10 +08:00
|
|
|
|
|
|
|
after_script:
|
2023-08-11 03:52:14 +08:00
|
|
|
# Work around https://gitlab.com/gitlab-org/gitlab/-/issues/20338
|
|
|
|
- find -name '*.log' -exec mv {} {}.txt \;
|
|
|
|
|
2021-12-02 21:13:10 +08:00
|
|
|
- >
|
|
|
|
set +x
|
|
|
|
|
2024-04-25 03:40:18 +08:00
|
|
|
test -e "${S3_JWT_FILE}" &&
|
|
|
|
export S3_JWT="$(<${S3_JWT_FILE})" &&
|
|
|
|
rm "${S3_JWT_FILE}"
|
2021-12-02 21:13:10 +08:00
|
|
|
|
2023-03-04 03:26:52 +08:00
|
|
|
# Retry when job fails. Failed jobs can be found in the Mesa CI Daily Reports:
|
|
|
|
# https://gitlab.freedesktop.org/mesa/mesa/-/issues/?sort=created_date&state=opened&label_name%5B%5D=CI%20daily
|
2022-07-08 02:32:45 +08:00
|
|
|
retry:
|
2023-03-04 03:26:52 +08:00
|
|
|
max: 1
|
2023-05-18 22:32:35 +08:00
|
|
|
# Ignore runner_unsupported, stale_schedule, archived_failure, or
|
|
|
|
# unmet_prerequisites
|
|
|
|
when:
|
|
|
|
- api_failure
|
|
|
|
- runner_system_failure
|
|
|
|
- script_failure
|
|
|
|
- job_execution_timeout
|
|
|
|
- scheduler_failure
|
|
|
|
- data_integrity_failure
|
|
|
|
- unknown_failure
|
2022-07-08 02:32:45 +08:00
|
|
|
|
2023-09-08 19:17:37 +08:00
|
|
|
stages:
|
|
|
|
- sanity
|
|
|
|
- container
|
|
|
|
- git-archive
|
|
|
|
- build-x86_64
|
|
|
|
- build-misc
|
2023-12-15 20:00:04 +08:00
|
|
|
- code-validation
|
2023-09-08 19:17:37 +08:00
|
|
|
- amd
|
|
|
|
- intel
|
|
|
|
- nouveau
|
|
|
|
- arm
|
|
|
|
- broadcom
|
|
|
|
- freedreno
|
|
|
|
- etnaviv
|
|
|
|
- software-renderer
|
|
|
|
- layered-backends
|
|
|
|
- deploy
|
|
|
|
|
2019-04-02 15:24:00 +08:00
|
|
|
include:
|
2020-03-07 05:23:20 +08:00
|
|
|
- project: 'freedesktop/ci-templates'
|
2023-01-19 23:09:13 +08:00
|
|
|
ref: 16bc29078de5e0a067ff84a1a199a3760d3b3811
|
2020-11-25 00:02:13 +08:00
|
|
|
file:
|
2020-11-19 01:23:29 +08:00
|
|
|
- '/templates/ci-fairy.yml'
|
2020-12-03 00:37:16 +08:00
|
|
|
- project: 'freedesktop/ci-templates'
|
2020-12-16 01:02:04 +08:00
|
|
|
ref: *ci-templates-commit
|
2020-12-03 00:37:16 +08:00
|
|
|
file:
|
2022-10-14 10:19:31 +08:00
|
|
|
- '/templates/alpine.yml'
|
2020-11-25 00:02:13 +08:00
|
|
|
- '/templates/debian.yml'
|
2021-05-12 22:42:37 +08:00
|
|
|
- '/templates/fedora.yml'
|
2021-11-10 05:38:33 +08:00
|
|
|
- local: '.gitlab-ci/image-tags.yml'
|
2021-06-10 18:10:10 +08:00
|
|
|
- local: '.gitlab-ci/lava/lava-gitlab-ci.yml'
|
2022-04-08 19:29:04 +08:00
|
|
|
- local: '.gitlab-ci/container/gitlab-ci.yml'
|
|
|
|
- local: '.gitlab-ci/build/gitlab-ci.yml'
|
|
|
|
- local: '.gitlab-ci/test/gitlab-ci.yml'
|
2023-07-11 23:18:21 +08:00
|
|
|
- local: '.gitlab-ci/farm-rules.yml'
|
2019-10-31 03:39:08 +08:00
|
|
|
- local: '.gitlab-ci/test-source-dep.yml'
|
2023-03-16 18:08:28 +08:00
|
|
|
- local: 'docs/gitlab-ci.yml'
|
2024-03-15 02:14:53 +08:00
|
|
|
- local: 'src/**/ci/gitlab-ci.yml'
|
2019-09-18 22:03:36 +08:00
|
|
|
|
2020-07-07 21:02:35 +08:00
|
|
|
|
2020-09-08 18:20:39 +08:00
|
|
|
# YAML anchors for rule conditions
|
|
|
|
# --------------------------------
|
|
|
|
.rules-anchors:
|
2023-09-16 01:13:07 +08:00
|
|
|
# Pre-merge pipeline
|
|
|
|
- &is-pre-merge '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
2020-09-08 18:20:39 +08:00
|
|
|
|
|
|
|
|
2023-10-31 01:45:40 +08:00
|
|
|
.never-post-merge-rules:
|
|
|
|
rules:
|
2023-11-01 21:03:49 +08:00
|
|
|
- if: *is-post-merge
|
2023-10-31 01:45:40 +08:00
|
|
|
when: never
|
|
|
|
|
|
|
|
|
2023-09-19 21:09:10 +08:00
|
|
|
.container+build-rules:
|
2020-04-03 18:50:11 +08:00
|
|
|
rules:
|
2023-09-15 19:09:43 +08:00
|
|
|
# Run when re-enabling a disabled farm, but not when disabling it
|
2023-09-15 19:03:54 +08:00
|
|
|
- !reference [.disable-farm-mr-rules, rules]
|
2023-11-01 21:03:49 +08:00
|
|
|
# Never run immediately after merging, as we just ran everything
|
|
|
|
- !reference [.never-post-merge-rules, rules]
|
2023-12-01 20:20:00 +08:00
|
|
|
# Build everything in merge pipelines, if any files affecting the pipeline
|
|
|
|
# were changed
|
2023-11-01 21:03:49 +08:00
|
|
|
- if: *is-merge-attempt
|
2023-09-19 21:09:10 +08:00
|
|
|
changes: &all_paths
|
|
|
|
- VERSION
|
|
|
|
- bin/git_sha1_gen.py
|
|
|
|
- bin/install_megadrivers.py
|
|
|
|
- bin/symbols-check.py
|
|
|
|
# GitLab CI
|
|
|
|
- .gitlab-ci.yml
|
|
|
|
- .gitlab-ci/**/*
|
2023-12-24 16:09:13 +08:00
|
|
|
- .ci-farms/*
|
2023-09-19 21:09:10 +08:00
|
|
|
# Meson
|
|
|
|
- meson*
|
|
|
|
- build-support/**/*
|
|
|
|
- subprojects/**/*
|
2023-11-04 03:56:58 +08:00
|
|
|
# clang format
|
|
|
|
- .clang-format
|
|
|
|
- .clang-format-include
|
|
|
|
- .clang-format-ignore
|
2023-09-19 21:09:10 +08:00
|
|
|
# Source code
|
|
|
|
- include/**/*
|
|
|
|
- src/**/*
|
2020-06-29 17:33:13 +08:00
|
|
|
when: on_success
|
2023-12-01 20:19:21 +08:00
|
|
|
# Same as above, but for pre-merge pipelines
|
|
|
|
- if: *is-pre-merge
|
|
|
|
changes:
|
|
|
|
*all_paths
|
|
|
|
when: manual
|
2023-12-01 20:20:00 +08:00
|
|
|
# Skip everything for pre-merge and merge pipelines which don't change
|
|
|
|
# anything in the build
|
2023-11-15 00:04:02 +08:00
|
|
|
- if: *is-merge-attempt
|
2023-09-14 18:47:40 +08:00
|
|
|
when: never
|
2023-12-01 20:19:21 +08:00
|
|
|
- if: *is-pre-merge
|
|
|
|
when: never
|
2023-11-14 21:07:17 +08:00
|
|
|
# Build everything after someone bypassed the CI
|
|
|
|
- if: *is-direct-push
|
|
|
|
when: on_success
|
2023-11-16 03:27:37 +08:00
|
|
|
# Build everything in scheduled pipelines
|
|
|
|
- if: *is-scheduled-pipeline
|
|
|
|
when: on_success
|
2023-12-01 20:20:00 +08:00
|
|
|
# Allow building everything in fork pipelines, but build nothing unless
|
|
|
|
# manually triggered
|
2023-09-13 20:30:07 +08:00
|
|
|
- when: manual
|
CI: reduce bandwidth for git pull
Over the last 7 days, git pulls represented a total of 1.7 TB.
On those 1.7 TB, we can see:
- ~300 GB for the CI farm on hetzner
- ~730 GB for the CI farm on packet.net
- ~680 GB for the rest of the world
We can not really change the rest of the world*, but we can
certainly reduce the egress costs towards our CI farms.
Right now, the gitlab runners are not doing a good job at
caching the git trees for the various jobs we make, and
we end up with a lot of cache-misses. A typical pipeline
ends up with a good 2.8GB of git pull data. (a compressed
archive of the mesa folder accounts for 280MB)
In this patch, we implemented what was suggested in
https://gitlab.com/gitlab-org/gitlab/-/issues/215591#note_334642576
- we host a brand new MinIO server on packet
- jobs can upload files on 2 locations:
* git-cache/<namespace>/<project>/<branch-name>.tar.gz
* artifacts/<namespace>/<project>/<pipeline-id>/
- the authorization is handled by gitlab with short tokens
valid only for the time of the job is running
- whenever a job runs, the runner are configured to execute
(eval) $CI_PRE_CLONE_SCRIPT
- this variable is set globally to download the current cache
from the MinIO packet server, unpack it and replace the
possibly out of date cache found on the runner
- then git fetch is run by the runner, and only the delta
between the upstream tree and the local tree gets pulled.
We can rebuild the git cache in a schedule job (once a day
seems sufficient), and then we can stop the cache miss
entirely.
First results showed that instead of pulling 280MB of data
in my fork, I got a pull of only 250KB. That should help us.
* arguably, there are other farms in the rest of the world, so
hopefully we can change those too.
Reviewed-by: Michel Dänzer <mdaenzer@redhat.com>
Reviewed-by: Peter Hutterer <peter.hutterer@who-t.net>
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5428>
2020-06-11 23:16:28 +08:00
|
|
|
|
2023-09-19 21:09:10 +08:00
|
|
|
|
|
|
|
.ci-deqp-artifacts:
|
|
|
|
artifacts:
|
|
|
|
name: "mesa_${CI_JOB_NAME}"
|
|
|
|
when: always
|
|
|
|
untracked: false
|
|
|
|
paths:
|
|
|
|
# Watch out! Artifacts are relative to the build dir.
|
|
|
|
# https://gitlab.com/gitlab-org/gitlab-ce/commit/8788fb925706cad594adf6917a6c5f6587dd1521
|
|
|
|
- artifacts
|
|
|
|
- _build/meson-logs/*.txt
|
|
|
|
- _build/meson-logs/strace
|
|
|
|
|
CI: reduce bandwidth for git pull
Over the last 7 days, git pulls represented a total of 1.7 TB.
On those 1.7 TB, we can see:
- ~300 GB for the CI farm on hetzner
- ~730 GB for the CI farm on packet.net
- ~680 GB for the rest of the world
We can not really change the rest of the world*, but we can
certainly reduce the egress costs towards our CI farms.
Right now, the gitlab runners are not doing a good job at
caching the git trees for the various jobs we make, and
we end up with a lot of cache-misses. A typical pipeline
ends up with a good 2.8GB of git pull data. (a compressed
archive of the mesa folder accounts for 280MB)
In this patch, we implemented what was suggested in
https://gitlab.com/gitlab-org/gitlab/-/issues/215591#note_334642576
- we host a brand new MinIO server on packet
- jobs can upload files on 2 locations:
* git-cache/<namespace>/<project>/<branch-name>.tar.gz
* artifacts/<namespace>/<project>/<pipeline-id>/
- the authorization is handled by gitlab with short tokens
valid only for the time of the job is running
- whenever a job runs, the runner are configured to execute
(eval) $CI_PRE_CLONE_SCRIPT
- this variable is set globally to download the current cache
from the MinIO packet server, unpack it and replace the
possibly out of date cache found on the runner
- then git fetch is run by the runner, and only the delta
between the upstream tree and the local tree gets pulled.
We can rebuild the git cache in a schedule job (once a day
seems sufficient), and then we can stop the cache miss
entirely.
First results showed that instead of pulling 280MB of data
in my fork, I got a pull of only 250KB. That should help us.
* arguably, there are other farms in the rest of the world, so
hopefully we can change those too.
Reviewed-by: Michel Dänzer <mdaenzer@redhat.com>
Reviewed-by: Peter Hutterer <peter.hutterer@who-t.net>
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5428>
2020-06-11 23:16:28 +08:00
|
|
|
# Git archive
|
|
|
|
|
|
|
|
make git archive:
|
2020-11-19 01:23:29 +08:00
|
|
|
extends:
|
|
|
|
- .fdo.ci-fairy
|
CI: reduce bandwidth for git pull
Over the last 7 days, git pulls represented a total of 1.7 TB.
On those 1.7 TB, we can see:
- ~300 GB for the CI farm on hetzner
- ~730 GB for the CI farm on packet.net
- ~680 GB for the rest of the world
We can not really change the rest of the world*, but we can
certainly reduce the egress costs towards our CI farms.
Right now, the gitlab runners are not doing a good job at
caching the git trees for the various jobs we make, and
we end up with a lot of cache-misses. A typical pipeline
ends up with a good 2.8GB of git pull data. (a compressed
archive of the mesa folder accounts for 280MB)
In this patch, we implemented what was suggested in
https://gitlab.com/gitlab-org/gitlab/-/issues/215591#note_334642576
- we host a brand new MinIO server on packet
- jobs can upload files on 2 locations:
* git-cache/<namespace>/<project>/<branch-name>.tar.gz
* artifacts/<namespace>/<project>/<pipeline-id>/
- the authorization is handled by gitlab with short tokens
valid only for the time of the job is running
- whenever a job runs, the runner are configured to execute
(eval) $CI_PRE_CLONE_SCRIPT
- this variable is set globally to download the current cache
from the MinIO packet server, unpack it and replace the
possibly out of date cache found on the runner
- then git fetch is run by the runner, and only the delta
between the upstream tree and the local tree gets pulled.
We can rebuild the git cache in a schedule job (once a day
seems sufficient), and then we can stop the cache miss
entirely.
First results showed that instead of pulling 280MB of data
in my fork, I got a pull of only 250KB. That should help us.
* arguably, there are other farms in the rest of the world, so
hopefully we can change those too.
Reviewed-by: Michel Dänzer <mdaenzer@redhat.com>
Reviewed-by: Peter Hutterer <peter.hutterer@who-t.net>
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5428>
2020-06-11 23:16:28 +08:00
|
|
|
stage: git-archive
|
2020-07-07 21:02:35 +08:00
|
|
|
rules:
|
2022-06-29 01:43:34 +08:00
|
|
|
- !reference [.scheduled_pipeline-rules, rules]
|
CI: reduce bandwidth for git pull
Over the last 7 days, git pulls represented a total of 1.7 TB.
On those 1.7 TB, we can see:
- ~300 GB for the CI farm on hetzner
- ~730 GB for the CI farm on packet.net
- ~680 GB for the rest of the world
We can not really change the rest of the world*, but we can
certainly reduce the egress costs towards our CI farms.
Right now, the gitlab runners are not doing a good job at
caching the git trees for the various jobs we make, and
we end up with a lot of cache-misses. A typical pipeline
ends up with a good 2.8GB of git pull data. (a compressed
archive of the mesa folder accounts for 280MB)
In this patch, we implemented what was suggested in
https://gitlab.com/gitlab-org/gitlab/-/issues/215591#note_334642576
- we host a brand new MinIO server on packet
- jobs can upload files on 2 locations:
* git-cache/<namespace>/<project>/<branch-name>.tar.gz
* artifacts/<namespace>/<project>/<pipeline-id>/
- the authorization is handled by gitlab with short tokens
valid only for the time of the job is running
- whenever a job runs, the runner are configured to execute
(eval) $CI_PRE_CLONE_SCRIPT
- this variable is set globally to download the current cache
from the MinIO packet server, unpack it and replace the
possibly out of date cache found on the runner
- then git fetch is run by the runner, and only the delta
between the upstream tree and the local tree gets pulled.
We can rebuild the git cache in a schedule job (once a day
seems sufficient), and then we can stop the cache miss
entirely.
First results showed that instead of pulling 280MB of data
in my fork, I got a pull of only 250KB. That should help us.
* arguably, there are other farms in the rest of the world, so
hopefully we can change those too.
Reviewed-by: Michel Dänzer <mdaenzer@redhat.com>
Reviewed-by: Peter Hutterer <peter.hutterer@who-t.net>
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5428>
2020-06-11 23:16:28 +08:00
|
|
|
# ensure we are running on packet
|
|
|
|
tags:
|
|
|
|
- packet.net
|
|
|
|
script:
|
2020-12-04 21:55:54 +08:00
|
|
|
# Compactify the .git directory
|
|
|
|
- git gc --aggressive
|
2023-06-14 04:40:21 +08:00
|
|
|
# Download & cache the perfetto subproject as well.
|
|
|
|
- rm -rf subprojects/perfetto ; mkdir -p subprojects/perfetto && curl https://android.googlesource.com/platform/external/perfetto/+archive/$(grep 'revision =' subprojects/perfetto.wrap | cut -d ' ' -f3).tar.gz | tar zxf - -C subprojects/perfetto
|
CI: reduce bandwidth for git pull
Over the last 7 days, git pulls represented a total of 1.7 TB.
On those 1.7 TB, we can see:
- ~300 GB for the CI farm on hetzner
- ~730 GB for the CI farm on packet.net
- ~680 GB for the rest of the world
We can not really change the rest of the world*, but we can
certainly reduce the egress costs towards our CI farms.
Right now, the gitlab runners are not doing a good job at
caching the git trees for the various jobs we make, and
we end up with a lot of cache-misses. A typical pipeline
ends up with a good 2.8GB of git pull data. (a compressed
archive of the mesa folder accounts for 280MB)
In this patch, we implemented what was suggested in
https://gitlab.com/gitlab-org/gitlab/-/issues/215591#note_334642576
- we host a brand new MinIO server on packet
- jobs can upload files on 2 locations:
* git-cache/<namespace>/<project>/<branch-name>.tar.gz
* artifacts/<namespace>/<project>/<pipeline-id>/
- the authorization is handled by gitlab with short tokens
valid only for the time of the job is running
- whenever a job runs, the runner are configured to execute
(eval) $CI_PRE_CLONE_SCRIPT
- this variable is set globally to download the current cache
from the MinIO packet server, unpack it and replace the
possibly out of date cache found on the runner
- then git fetch is run by the runner, and only the delta
between the upstream tree and the local tree gets pulled.
We can rebuild the git cache in a schedule job (once a day
seems sufficient), and then we can stop the cache miss
entirely.
First results showed that instead of pulling 280MB of data
in my fork, I got a pull of only 250KB. That should help us.
* arguably, there are other farms in the rest of the world, so
hopefully we can change those too.
Reviewed-by: Michel Dänzer <mdaenzer@redhat.com>
Reviewed-by: Peter Hutterer <peter.hutterer@who-t.net>
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5428>
2020-06-11 23:16:28 +08:00
|
|
|
# compress the current folder
|
|
|
|
- tar -cvzf ../$CI_PROJECT_NAME.tar.gz .
|
|
|
|
|
2024-04-25 03:40:18 +08:00
|
|
|
- ci-fairy s3cp --token-file "${S3_JWT_FILE}" ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/git-cache/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
|
CI: reduce bandwidth for git pull
Over the last 7 days, git pulls represented a total of 1.7 TB.
On those 1.7 TB, we can see:
- ~300 GB for the CI farm on hetzner
- ~730 GB for the CI farm on packet.net
- ~680 GB for the rest of the world
We can not really change the rest of the world*, but we can
certainly reduce the egress costs towards our CI farms.
Right now, the gitlab runners are not doing a good job at
caching the git trees for the various jobs we make, and
we end up with a lot of cache-misses. A typical pipeline
ends up with a good 2.8GB of git pull data. (a compressed
archive of the mesa folder accounts for 280MB)
In this patch, we implemented what was suggested in
https://gitlab.com/gitlab-org/gitlab/-/issues/215591#note_334642576
- we host a brand new MinIO server on packet
- jobs can upload files on 2 locations:
* git-cache/<namespace>/<project>/<branch-name>.tar.gz
* artifacts/<namespace>/<project>/<pipeline-id>/
- the authorization is handled by gitlab with short tokens
valid only for the time of the job is running
- whenever a job runs, the runner are configured to execute
(eval) $CI_PRE_CLONE_SCRIPT
- this variable is set globally to download the current cache
from the MinIO packet server, unpack it and replace the
possibly out of date cache found on the runner
- then git fetch is run by the runner, and only the delta
between the upstream tree and the local tree gets pulled.
We can rebuild the git cache in a schedule job (once a day
seems sufficient), and then we can stop the cache miss
entirely.
First results showed that instead of pulling 280MB of data
in my fork, I got a pull of only 250KB. That should help us.
* arguably, there are other farms in the rest of the world, so
hopefully we can change those too.
Reviewed-by: Michel Dänzer <mdaenzer@redhat.com>
Reviewed-by: Peter Hutterer <peter.hutterer@who-t.net>
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5428>
2020-06-11 23:16:28 +08:00
|
|
|
|
2020-08-06 23:37:33 +08:00
|
|
|
# Sanity checks of MR settings and commit logs
|
2020-11-19 01:48:47 +08:00
|
|
|
sanity:
|
2020-11-19 01:23:29 +08:00
|
|
|
extends:
|
|
|
|
- .fdo.ci-fairy
|
2020-08-06 23:37:33 +08:00
|
|
|
stage: sanity
|
|
|
|
rules:
|
2023-11-07 02:45:51 +08:00
|
|
|
- if: *is-pre-merge
|
2020-08-06 23:37:33 +08:00
|
|
|
when: on_success
|
2023-11-14 23:52:32 +08:00
|
|
|
- when: never
|
2020-12-04 01:58:09 +08:00
|
|
|
variables:
|
|
|
|
GIT_STRATEGY: none
|
2020-08-06 23:37:33 +08:00
|
|
|
script:
|
2020-12-01 21:27:42 +08:00
|
|
|
# ci-fairy check-commits --junit-xml=check-commits.xml
|
2020-11-19 01:48:47 +08:00
|
|
|
- ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
|
2024-01-31 15:56:18 +08:00
|
|
|
- |
|
|
|
|
set -eu
|
|
|
|
image_tags=(
|
|
|
|
DEBIAN_BASE_TAG
|
|
|
|
DEBIAN_BUILD_TAG
|
|
|
|
DEBIAN_X86_64_TEST_ANDROID_TAG
|
|
|
|
DEBIAN_X86_64_TEST_GL_TAG
|
|
|
|
DEBIAN_X86_64_TEST_VK_TAG
|
|
|
|
ALPINE_X86_64_BUILD_TAG
|
|
|
|
ALPINE_X86_64_LAVA_SSH_TAG
|
|
|
|
FEDORA_X86_64_BUILD_TAG
|
|
|
|
KERNEL_ROOTFS_TAG
|
|
|
|
KERNEL_TAG
|
|
|
|
PKG_REPO_REV
|
|
|
|
WINDOWS_X64_MSVC_TAG
|
|
|
|
WINDOWS_X64_BUILD_TAG
|
|
|
|
WINDOWS_X64_TEST_TAG
|
|
|
|
)
|
|
|
|
for var in "${image_tags[@]}"
|
|
|
|
do
|
2024-01-31 15:57:58 +08:00
|
|
|
if [ "$(echo -n "${!var}" | wc -c)" -gt 20 ]
|
2024-01-31 15:56:18 +08:00
|
|
|
then
|
2024-01-31 15:57:58 +08:00
|
|
|
echo "$var is too long; please make sure it is at most 20 chars."
|
2024-01-31 15:56:18 +08:00
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
done
|
2020-11-19 01:48:47 +08:00
|
|
|
artifacts:
|
|
|
|
when: on_failure
|
|
|
|
reports:
|
|
|
|
junit: check-*.xml
|
2023-11-09 18:32:33 +08:00
|
|
|
tags:
|
|
|
|
- placeholder-job
|
2023-06-20 21:37:22 +08:00
|
|
|
|
|
|
|
|
2024-03-22 17:30:32 +08:00
|
|
|
mr-label-maker-test:
|
|
|
|
extends:
|
|
|
|
- .fdo.ci-fairy
|
|
|
|
stage: sanity
|
|
|
|
rules:
|
|
|
|
- !reference [.mr-label-maker-rules, rules]
|
|
|
|
variables:
|
|
|
|
GIT_STRATEGY: fetch
|
|
|
|
timeout: 10m
|
|
|
|
script:
|
|
|
|
- set -eu
|
|
|
|
- python3 -m venv .venv
|
|
|
|
- source .venv/bin/activate
|
|
|
|
- pip install git+https://gitlab.freedesktop.org/freedesktop/mr-label-maker
|
|
|
|
- mr-label-maker --dry-run --mr $CI_MERGE_REQUEST_IID
|
|
|
|
|
2023-06-20 21:37:22 +08:00
|
|
|
# Jobs that need to pass before spending hardware resources on further testing
|
|
|
|
.required-for-hardware-jobs:
|
|
|
|
needs:
|
|
|
|
- job: clang-format
|
|
|
|
optional: true
|
|
|
|
- job: rustfmt
|
|
|
|
optional: true
|