mesa/.gitlab-ci.yml

666 lines
15 KiB
YAML
Raw Normal View History

variables:
UPSTREAM_REPO: mesa/mesa
include:
- project: 'wayland/ci-templates'
# Must be the same as in .gitlab-ci/lava-gitlab-ci.yml
ref: 0a9bdd33a98f05af6761ab118b5074952242aab0
file: '/templates/debian.yml'
include:
- local: '.gitlab-ci/lava-gitlab-ci.yml'
stages:
- container
- build
- test
# When to automatically run the CI
.ci-run-policy:
rules:
- when: on_success
retry:
max: 2
when:
- runner_system_failure
# Cancel CI run if a newer commit is pushed to the same branch
interruptible: true
.ci-deqp-artifacts:
artifacts:
when: always
untracked: false
paths:
# Watch out! Artifacts are relative to the build dir.
# https://gitlab.com/gitlab-org/gitlab-ce/commit/8788fb925706cad594adf6917a6c5f6587dd1521
- artifacts
# Build the "normal" (non-LAVA) CI docker images.
#
# DEBIAN_TAG is the tag of the docker image used by later stage jobs. If the
# image doesn't exist yet, the container stage job generates it.
#
# In order to generate a new image, one should generally change the tag.
# While removing the image from the registry would also work, that's not
# recommended except for ephemeral images during development: Replacing
# an image after a significant amount of time might pull in newer
# versions of gcc/clang or other packages, which might break the build
# with older commits using the same tag.
#
# After merging a change resulting in generating a new image to the
# main repository, it's recommended to remove the image from the source
# repository's container registry, so that the image from the main
# repository's registry will be used there as well.
.container:
stage: container
extends:
- .ci-run-policy
rules:
# Run pipeline by default for merge requests changing files affecting it
- if: '$CI_MERGE_REQUEST_SOURCE_BRANCH_NAME == $CI_COMMIT_REF_NAME'
changes:
- VERSION
- bin/**/*
# GitLab CI
- .gitlab-ci.yml
- .gitlab-ci/**/*
# Meson
- meson*
- build-support/**/*
- subprojects/**/*
# SCons
- SConstruct
- scons/**/*
- common.py
# Source code
- include/**/*
- src/**/*
when: on_success
# Always run pipeline by default in the main project
- if: '$CI_PROJECT_PATH == "mesa/mesa"'
when: on_success
# Otherwise, allow triggering jobs manually
- when: manual
variables:
DEBIAN_VERSION: buster-slim
REPO_SUFFIX: $CI_JOB_NAME
DEBIAN_EXEC: 'bash .gitlab-ci/container/${CI_JOB_NAME}.sh'
# no need to pull the whole repo to build the container image
GIT_STRATEGY: none
# Debian 10 based x86 build image
x86_build:
extends:
- .debian@container-ifnot-exists
- .container
variables:
DEBIAN_TAG: &x86_build "2019-11-13"
.use-x86_build:
variables:
TAG: *x86_build
image: "$CI_REGISTRY_IMAGE/debian/x86_build:$TAG"
needs:
- x86_build
# Debian 10 based x86 test image for GL
x86_test-gl:
extends: x86_build
variables:
DEBIAN_TAG: &x86_test-gl "2019-12-18"
# Debian 10 based x86 test image for VK
x86_test-vk:
extends: x86_build
variables:
DEBIAN_TAG: &x86_test-vk "2019-12-18"
# Can only be triggered manually on personal branches because RADV is the only
# driver that does Vulkan testing at the moment.
rules:
# Never build the test image for VK by default in the main project.
- if: '$CI_PROJECT_PATH == "mesa/mesa"'
when: never
# Never build the test image for VK by default for merge requests.
- if: '$CI_MERGE_REQUEST_SOURCE_BRANCH_NAME == $CI_COMMIT_REF_NAME'
when: never
# Otherwise, allow building it manually for personal branches.
- when: manual
# Debian 9 based x86 build image (old LLVM)
x86_build_old:
extends: x86_build
variables:
DEBIAN_TAG: &x86_build_old "2019-09-18"
DEBIAN_VERSION: stretch-slim
.use-x86_build_old:
variables:
TAG: *x86_build_old
image: "$CI_REGISTRY_IMAGE/debian/x86_build_old:$TAG"
needs:
- x86_build_old
# Debian 10 based ARM build image
arm_build:
extends:
- .debian@container-ifnot-exists@arm64v8
- .container
variables:
DEBIAN_TAG: &arm_build "2019-11-13"
.use-arm_build:
variables:
TAG: *arm_build
image: "$CI_REGISTRY_IMAGE/debian/arm_build:$TAG"
needs:
- arm_build
# Debian 10 based ARM test image
arm_test:
extends: arm_build
variables:
DEBIAN_TAG: &arm_test "2019-12-18"
.use-arm_test:
variables:
TAG: *arm_test
image: "$CI_REGISTRY_IMAGE/debian/arm_test:$TAG"
needs:
- meson-arm64
- arm_test
# BUILD
# Shared between windows and Linux
.build-common:
extends: .ci-run-policy
stage: build
artifacts:
when: always
paths:
- _build/meson-logs/*.txt
# scons:
- build/*/config.log
- shader-db
# Just Linux
.build-linux:
extends: .build-common
variables:
CCACHE_COMPILERCHECK: "content"
CCACHE_COMPRESS: "true"
CCACHE_DIR: /cache/mesa/ccache
# Use ccache transparently, and print stats before/after
before_script:
- export PATH="/usr/lib/ccache:$PATH"
- export CCACHE_BASEDIR="$PWD"
- ccache --show-stats
after_script:
- ccache --show-stats
.build-windows:
extends: .build-common
tags:
- mesa-windows
cache:
key: ${CI_JOB_NAME}
paths:
- subprojects/packagecache
.meson-build:
extends:
- .build-linux
- .use-x86_build
script:
- .gitlab-ci/meson-build.sh
.scons-build:
extends:
- .build-linux
- .use-x86_build
variables:
SCONSFLAGS: "-j4"
script:
- .gitlab-ci/scons-build.sh
meson-testing:
extends:
- .meson-build
- .ci-deqp-artifacts
variables:
UNWIND: "true"
DRI_LOADERS: >
-D glx=dri
-D gbm=true
-D egl=true
-D platforms=x11,drm,surfaceless
GALLIUM_ST: >
-D dri3=true
GALLIUM_DRIVERS: "swrast"
VULKAN_DRIVERS: amd
BUILDTYPE: "debugoptimized"
script:
- .gitlab-ci/meson-build.sh
- .gitlab-ci/prepare-artifacts.sh
meson-main:
extends: .meson-build
variables:
UNWIND: "true"
DRI_LOADERS: >
-D glx=dri
-D gbm=true
-D egl=true
-D platforms=x11,wayland,drm,surfaceless
DRI_DRIVERS: "i915,i965,r100,r200,nouveau"
GALLIUM_ST: >
-D dri3=true
-D gallium-extra-hud=true
-D gallium-vdpau=true
-D gallium-xvmc=true
-D gallium-omx=bellagio
-D gallium-va=true
-D gallium-xa=true
-D gallium-nine=true
-D gallium-opencl=disabled
GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swr,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink"
LLVM_VERSION: "7"
EXTRA_OPTION: >
-D osmesa=gallium
-D tools=all
script:
- .gitlab-ci/meson-build.sh
- .gitlab-ci/run-shader-db.sh
.meson-cross:
extends:
- .meson-build
variables:
UNWIND: "false"
DRI_LOADERS: >
-D glx=disabled
-D gbm=false
-D egl=true
-D platforms=surfaceless
-D osmesa=none
GALLIUM_ST: >
-D dri3=false
-D gallium-vdpau=false
-D gallium-xvmc=false
-D gallium-omx=disabled
-D gallium-va=false
-D gallium-xa=false
-D gallium-nine=false
.meson-arm:
extends:
- .meson-cross
- .use-arm_build
variables:
VULKAN_DRIVERS: freedreno
GALLIUM_DRIVERS: "etnaviv,freedreno,kmsro,lima,nouveau,panfrost,swrast,tegra,v3d,vc4"
EXTRA_OPTION: >
-D I-love-half-baked-turnips=true
tags:
- aarch64
meson-armhf:
extends: .meson-arm
variables:
CROSS: armhf
LLVM_VERSION: "7"
meson-arm64:
extends:
- .meson-arm
- .ci-deqp-artifacts
variables:
BUILDTYPE: "debugoptimized"
VULKAN_DRIVERS: "freedreno,amd"
script:
- .gitlab-ci/meson-build.sh
- .gitlab-ci/prepare-artifacts.sh
meson-clang:
extends: .meson-build
variables:
UNWIND: "true"
DRI_LOADERS: >
-D glvnd=true
DRI_DRIVERS: "auto"
GALLIUM_DRIVERS: "auto"
VULKAN_DRIVERS: intel,amd,freedreno
CC: "ccache clang-8"
CXX: "ccache clang++-8"
.meson-windows:
extends:
- .build-windows
before_script:
- $ENV:ARCH = "x86"
- $ENV:VERSION = "2019\Community"
script:
- cmd /C .gitlab-ci\meson-build.bat
scons-swr:
extends: .scons-build
variables:
SCONS_TARGET: "swr=1"
SCONS_CHECK_COMMAND: "true"
LLVM_VERSION: "6.0"
scons-win64:
extends: .scons-build
variables:
SCONS_TARGET: platform=windows machine=x86_64
SCONS_CHECK_COMMAND: "true"
meson-clover:
extends: .meson-build
variables:
UNWIND: "true"
DRI_LOADERS: >
-D glx=disabled
-D egl=false
-D gbm=false
GALLIUM_ST: >
-D dri3=false
-D gallium-vdpau=false
-D gallium-xvmc=false
-D gallium-omx=disabled
-D gallium-va=false
-D gallium-xa=false
-D gallium-nine=false
-D gallium-opencl=icd
script:
- export GALLIUM_DRIVERS="r600,radeonsi"
- .gitlab-ci/meson-build.sh
- LLVM_VERSION=8 .gitlab-ci/meson-build.sh
- export GALLIUM_DRIVERS="i915,r600"
- LLVM_VERSION=6.0 .gitlab-ci/meson-build.sh
- LLVM_VERSION=7 .gitlab-ci/meson-build.sh
meson-clover-old-llvm:
extends:
- meson-clover
- .use-x86_build_old
variables:
UNWIND: "false"
DRI_LOADERS: >
-D glx=disabled
-D egl=false
-D gbm=false
-D platforms=drm,surfaceless
GALLIUM_DRIVERS: "i915,r600"
script:
- LLVM_VERSION=3.9 .gitlab-ci/meson-build.sh
- LLVM_VERSION=4.0 .gitlab-ci/meson-build.sh
- LLVM_VERSION=5.0 .gitlab-ci/meson-build.sh
meson-vulkan:
extends: .meson-build
variables:
UNWIND: "false"
DRI_LOADERS: >
-D glx=disabled
-D gbm=false
-D egl=false
-D platforms=x11,wayland,drm
-D osmesa=none
GALLIUM_ST: >
-D dri3=true
-D gallium-vdpau=false
-D gallium-xvmc=false
-D gallium-omx=disabled
-D gallium-va=false
-D gallium-xa=false
-D gallium-nine=false
-D gallium-opencl=disabled
-D b_sanitize=undefined
-D c_args=-fno-sanitize-recover=all
-D cpp_args=-fno-sanitize-recover=all
UBSAN_OPTIONS: "print_stacktrace=1"
VULKAN_DRIVERS: intel,amd,freedreno
LLVM_VERSION: "8"
EXTRA_OPTION: >
-D vulkan-overlay-layer=true
# While the main point of this build is testing the i386 cross build,
# we also use this one to test some other options that are exclusive
# with meson-main's choices (classic swrast and osmesa)
meson-i386:
extends: .meson-cross
variables:
CROSS: i386
VULKAN_DRIVERS: intel
DRI_DRIVERS: "swrast"
GALLIUM_DRIVERS: "iris"
EXTRA_OPTION: >
-D vulkan-overlay-layer=true
-D llvm=false
-D osmesa=classic
meson-mingw32-x86_64:
extends: .meson-build
variables:
UNWIND: "false"
DRI_DRIVERS: ""
GALLIUM_DRIVERS: "swrast"
EXTRA_OPTION: >
-Dllvm=false
-Dosmesa=gallium
--cross-file=.gitlab-ci/x86_64-w64-mingw32
scons:
extends: .scons-build
variables:
SCONS_TARGET: "llvm=1"
SCONS_CHECK_COMMAND: "scons llvm=1 force_scons=1 check"
script:
- SCONS_TARGET="" SCONS_CHECK_COMMAND="scons check force_scons=1" .gitlab-ci/scons-build.sh
- LLVM_VERSION=8 .gitlab-ci/scons-build.sh
scons-old-llvm:
extends:
- scons
- .use-x86_build_old
script:
- LLVM_VERSION=3.9 .gitlab-ci/scons-build.sh
.test:
extends:
- .ci-run-policy
stage: test
variables:
GIT_STRATEGY: none # testing doesn't build anything from source
before_script:
# Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY
- rm -rf install
- tar -xf artifacts/install.tar
- LD_LIBRARY_PATH=install/lib find install/lib -name "*.so" -print -exec ldd {} \;
artifacts:
when: always
name: "$CI_JOB_NAME-$CI_COMMIT_REF_NAME"
paths:
- results/
dependencies:
- meson-testing
.test-gl:
extends:
- .test
variables:
TAG: *x86_test-gl
image: "$CI_REGISTRY_IMAGE/debian/x86_test-gl:$TAG"
needs:
- meson-testing
- x86_test-gl
.test-vk:
extends:
- .test
variables:
TAG: *x86_test-vk
image: "$CI_REGISTRY_IMAGE/debian/x86_test-vk:$TAG"
needs:
- meson-testing
- x86_test-vk
.piglit-test:
extends: .test-gl
artifacts:
when: on_failure
name: "$CI_JOB_NAME-$CI_COMMIT_REF_NAME"
paths:
- summary/
variables:
LIBGL_ALWAYS_SOFTWARE: 1
PIGLIT_NO_WINDOW: 1
script:
- artifacts/piglit/run.sh
piglit-quick_gl:
extends: .piglit-test
variables:
LP_NUM_THREADS: 0
NIR_VALIDATE: 0
PIGLIT_OPTIONS: >
--process-isolation false
-x arb_gpu_shader5
-x egl_ext_device_
-x egl_ext_platform_device
-x glx-multithread-clearbuffer
-x glx-multithread-shader-compile
-x max-texture-size
-x maxsize
PIGLIT_PROFILES: quick_gl
piglit-glslparser:
extends: .piglit-test
variables:
LP_NUM_THREADS: 0
NIR_VALIDATE: 0
PIGLIT_PROFILES: glslparser
piglit-quick_shader:
extends: .piglit-test
variables:
LP_NUM_THREADS: 1
NIR_VALIDATE: 0
PIGLIT_PROFILES: quick_shader
.deqp-test:
variables:
DEQP_SKIPS: deqp-default-skips.txt
script:
- ./artifacts/deqp-runner.sh
.deqp-test-gl:
extends:
- .test-gl
- .deqp-test
.deqp-test-vk:
extends:
- .test-vk
- .deqp-test
variables:
DEQP_VER: vk
test-llvmpipe-gles2:
variables:
DEQP_VER: gles2
ci: Use cts_runner for our dEQP runs. This runner is a little project by Bas, written in C++, that spawns threads that then loop grabbing chunks of the (randomly shuffled but consistently so) test list and hand it to a dEQP instance. As the remaining list gets shorter, so do the chunks, so hopefully the threads all complete effectively at once. It also handles restarting after crashes automatically. I've extended the runner a bit to do what I was doing in the bash scripts before, like the skip list and expected failures handling. This project should also be a good baseline for extending to handle retesting of intermittent failures. By switching to it, we can have the swrast tests just take up one job slot on the shared runners and keep their allotment of CPUs busy, instead of taking up job slots with single-threaded dEQP jobs. It will also let us (eventually, once I reprovision) switch the freedreno runners over to threading within the job instead of running concurrent jobs, so that memory scribbles in one pipeline don't affect unrelated pipelines, and I can experiment with their parallelism (particularly on a306 where we are frequently backed up) without trashing other people's jobs. What we lose in this process is per-test output in the log (not a big loss, I think, since we summarize fails at the end and reducing log length keeps chrome from choking on our logs so badly). We also drop the renderer sanity checking, since it's not saving qpa files for us to go poke through. Given that all the drivers involved have fail lists, if we got the wrong renderer somehow, we'd get a job failure anyway. v2: Rebase on droppong of the autoscale cluster and the arm64 build/test split. Use a script to deduplicate the cts-runner build. v3: Rebase on the amd64 build/test container split. Acked-by: Daniel Stone <daniels@collabora.com> (v1) Reviewed-by: Tomeu Vizoso <tomeu.vizoso@collabora.com> (v2)
2019-11-05 02:54:41 +08:00
DEQP_PARALLEL: 4
NIR_VALIDATE: 0
ci: Use cts_runner for our dEQP runs. This runner is a little project by Bas, written in C++, that spawns threads that then loop grabbing chunks of the (randomly shuffled but consistently so) test list and hand it to a dEQP instance. As the remaining list gets shorter, so do the chunks, so hopefully the threads all complete effectively at once. It also handles restarting after crashes automatically. I've extended the runner a bit to do what I was doing in the bash scripts before, like the skip list and expected failures handling. This project should also be a good baseline for extending to handle retesting of intermittent failures. By switching to it, we can have the swrast tests just take up one job slot on the shared runners and keep their allotment of CPUs busy, instead of taking up job slots with single-threaded dEQP jobs. It will also let us (eventually, once I reprovision) switch the freedreno runners over to threading within the job instead of running concurrent jobs, so that memory scribbles in one pipeline don't affect unrelated pipelines, and I can experiment with their parallelism (particularly on a306 where we are frequently backed up) without trashing other people's jobs. What we lose in this process is per-test output in the log (not a big loss, I think, since we summarize fails at the end and reducing log length keeps chrome from choking on our logs so badly). We also drop the renderer sanity checking, since it's not saving qpa files for us to go poke through. Given that all the drivers involved have fail lists, if we got the wrong renderer somehow, we'd get a job failure anyway. v2: Rebase on droppong of the autoscale cluster and the arm64 build/test split. Use a script to deduplicate the cts-runner build. v3: Rebase on the amd64 build/test container split. Acked-by: Daniel Stone <daniels@collabora.com> (v1) Reviewed-by: Tomeu Vizoso <tomeu.vizoso@collabora.com> (v2)
2019-11-05 02:54:41 +08:00
# Don't use threads inside llvmpipe, we've already got all 4 cores
# busy with DEQP_PARALLEL.
LP_NUM_THREADS: 0
DEQP_EXPECTED_FAILS: deqp-llvmpipe-fails.txt
LIBGL_ALWAYS_SOFTWARE: "true"
extends: .deqp-test-gl
test-softpipe-gles2:
extends: test-llvmpipe-gles2
variables:
DEQP_EXPECTED_FAILS: deqp-softpipe-fails.txt
ci: Use cts_runner for our dEQP runs. This runner is a little project by Bas, written in C++, that spawns threads that then loop grabbing chunks of the (randomly shuffled but consistently so) test list and hand it to a dEQP instance. As the remaining list gets shorter, so do the chunks, so hopefully the threads all complete effectively at once. It also handles restarting after crashes automatically. I've extended the runner a bit to do what I was doing in the bash scripts before, like the skip list and expected failures handling. This project should also be a good baseline for extending to handle retesting of intermittent failures. By switching to it, we can have the swrast tests just take up one job slot on the shared runners and keep their allotment of CPUs busy, instead of taking up job slots with single-threaded dEQP jobs. It will also let us (eventually, once I reprovision) switch the freedreno runners over to threading within the job instead of running concurrent jobs, so that memory scribbles in one pipeline don't affect unrelated pipelines, and I can experiment with their parallelism (particularly on a306 where we are frequently backed up) without trashing other people's jobs. What we lose in this process is per-test output in the log (not a big loss, I think, since we summarize fails at the end and reducing log length keeps chrome from choking on our logs so badly). We also drop the renderer sanity checking, since it's not saving qpa files for us to go poke through. Given that all the drivers involved have fail lists, if we got the wrong renderer somehow, we'd get a job failure anyway. v2: Rebase on droppong of the autoscale cluster and the arm64 build/test split. Use a script to deduplicate the cts-runner build. v3: Rebase on the amd64 build/test container split. Acked-by: Daniel Stone <daniels@collabora.com> (v1) Reviewed-by: Tomeu Vizoso <tomeu.vizoso@collabora.com> (v2)
2019-11-05 02:54:41 +08:00
DEQP_SKIPS: deqp-softpipe-skips.txt
GALLIUM_DRIVER: "softpipe"
test-softpipe-gles3:
parallel: 2
variables:
DEQP_VER: gles3
extends: test-softpipe-gles2
test-softpipe-gles31:
parallel: 4
variables:
DEQP_VER: gles31
extends: test-softpipe-gles2
arm64_a630_gles2:
extends:
- .deqp-test-gl
- .use-arm_test
variables:
DEQP_VER: gles2
DEQP_EXPECTED_FAILS: deqp-freedreno-a630-fails.txt
DEQP_SKIPS: deqp-freedreno-a630-skips.txt
NIR_VALIDATE: 0
DEQP_PARALLEL: 4
FLAKES_CHANNEL: "#freedreno-ci"
tags:
- mesa-cheza
dependencies:
- meson-arm64
arm64_a630_gles31:
extends: arm64_a630_gles2
variables:
DEQP_VER: gles31
arm64_a630_gles3:
extends: arm64_a630_gles2
variables:
DEQP_VER: gles3
arm64_a306_gles2:
extends: arm64_a630_gles2
variables:
DEQP_EXPECTED_FAILS: deqp-freedreno-a307-fails.txt
DEQP_SKIPS: deqp-default-skips.txt
tags:
- db410c
# RADV CI
.test-radv:
variables:
VK_DRIVER: radeon
RADV_DEBUG: checkir
# Can only be triggered manually on personal branches because RADV is the only
# driver that does Vulkan testing at the moment.
rules:
# Never test RADV by default in the main project.
- if: '$CI_PROJECT_PATH == "mesa/mesa"'
when: never
# Never test RADV by default for merge requests.
- if: '$CI_MERGE_REQUEST_SOURCE_BRANCH_NAME == $CI_COMMIT_REF_NAME'
when: never
# Otherwise, allow testing RADV if the test image for VK has been manually
# started.
- when: on_success
radv_polaris10_vkcts:
extends:
- .deqp-test-vk
- .test-radv
variables:
DEQP_PARALLEL: 4
DEQP_SKIPS: deqp-radv-polaris10-skips.txt
tags:
- polaris10