Compare commits

..

No commits in common. "v2.0.4" and "bl-v1.0.y" have entirely different histories.

6332 changed files with 496912 additions and 1725328 deletions

View file

@ -40,11 +40,11 @@ meson.build @dbaker @eric
##########
# NIR
/src/compiler/nir/ @gfxstrand
/src/compiler/nir/ @jekstrand
# Vulkan
/src/vulkan/ @eric @gfxstrand
/include/vulkan/ @eric @gfxstrand
/src/vulkan/ @eric @jekstrand
/include/vulkan/ @eric @jekstrand
#############
@ -79,9 +79,12 @@ meson.build @dbaker @eric
/src/glx/*glvnd* @kbrenneman
# Haiku
/include/HaikuGL/ @kallisti5
/src/egl/drivers/haiku/ @kallisti5
/src/gallium/frontends/hgl/ @kallisti5
/src/gallium/targets/haiku-softpipe/ @kallisti5
/src/gallium/winsys/sw/hgl/ @kallisti5
/src/hgl/ @kallisti5
# Loader - DRI/classic
/src/loader/ @xexaxo
@ -120,16 +123,16 @@ meson.build @dbaker @eric
/src/gallium/drivers/freedreno/ @robclark
# Imagination
/include/drm-uapi/pvr_drm.h @CreativeCylon @frankbinns @MTCoster
/src/imagination/ @CreativeCylon @frankbinns @MTCoster
/include/drm-uapi/pvr_drm.h @CreativeCylon @frankbinns
/src/imagination/ @CreativeCylon @frankbinns
/src/imagination/rogue/ @simon-perretta-img
# Intel
/include/drm-uapi/i915_drm.h @kwg @llandwerlin @gfxstrand @idr
/include/pci_ids/i*_pci_ids.h @kwg @llandwerlin @gfxstrand @idr
/src/intel/ @kwg @llandwerlin @gfxstrand @idr
/src/gallium/winsys/iris/ @kwg @llandwerlin @gfxstrand @idr
/src/gallium/drivers/iris/ @kwg @llandwerlin @gfxstrand @idr
/include/drm-uapi/i915_drm.h @kwg @llandwerlin @jekstrand @idr
/include/pci_ids/i*_pci_ids.h @kwg @llandwerlin @jekstrand @idr
/src/intel/ @kwg @llandwerlin @jekstrand @idr
/src/gallium/winsys/iris/ @kwg @llandwerlin @jekstrand @idr
/src/gallium/drivers/iris/ @kwg @llandwerlin @jekstrand @idr
/src/gallium/drivers/i915/ @anholt
# Microsoft
@ -137,16 +140,9 @@ meson.build @dbaker @eric
/src/gallium/drivers/d3d12/ @jenatali
# Panfrost
/src/panfrost/ @bbrezillon
/src/panfrost/midgard @italove
/src/gallium/drivers/panfrost/ @bbrezillon
# R300
/src/gallium/drivers/r300/ @ondracka @gawin
# VirGL - Video
/src/gallium/drivers/virgl/virgl_video.* @flynnjiang
/src/virtio/virtio-gpu/virgl_video_hw.h @flynnjiang
/src/panfrost/ @alyssa
/src/panfrost/vulkan/ @bbrezillon
/src/gallium/drivers/panfrost/ @alyssa
# VMware
/src/gallium/drivers/svga/ @brianp @charmainel

View file

@ -1 +1 @@
24.0.1
22.3.5

View file

@ -34,15 +34,15 @@ MESA_VK_LIB_SUFFIX_intel_hasvk := intel_hasvk
MESA_VK_LIB_SUFFIX_freedreno := freedreno
MESA_VK_LIB_SUFFIX_broadcom := broadcom
MESA_VK_LIB_SUFFIX_panfrost := panfrost
MESA_VK_LIB_SUFFIX_virtio := virtio
MESA_VK_LIB_SUFFIX_virtio-experimental := virtio
MESA_VK_LIB_SUFFIX_swrast := lvp
include $(CLEAR_VARS)
LOCAL_SHARED_LIBRARIES := libc libdl libdrm libm liblog libcutils libz libc++ libnativewindow libsync libhardware
LOCAL_STATIC_LIBRARIES := libexpat libarect libelf
LOCAL_HEADER_LIBRARIES := libnativebase_headers hwvulkan_headers
MESON_GEN_PKGCONFIGS := cutils expat hardware libdrm:$(LIBDRM_VERSION) nativewindow sync zlib:1.2.11 libelf
LOCAL_HEADER_LIBRARIES := libnativebase_headers hwvulkan_headers libbacktrace_headers
MESON_GEN_PKGCONFIGS := backtrace cutils expat hardware libdrm:$(LIBDRM_VERSION) nativewindow sync zlib:1.2.11 libelf
LOCAL_CFLAGS += $(BOARD_MESA3D_CFLAGS)
ifneq ($(filter swrast,$(BOARD_MESA3D_GALLIUM_DRIVERS) $(BOARD_MESA3D_VULKAN_DRIVERS)),)
@ -61,15 +61,9 @@ LOCAL_SHARED_LIBRARIES += libdrm_intel
MESON_GEN_PKGCONFIGS += libdrm_intel:$(LIBDRM_VERSION)
endif
ifneq ($(filter radeonsi,$(BOARD_MESA3D_GALLIUM_DRIVERS)),)
ifneq ($(MESON_GEN_LLVM_STUB),)
LOCAL_CFLAGS += -DFORCE_BUILD_AMDGPU # instructs LLVM to declare LLVMInitializeAMDGPU* functions
# The flag is required for the Android-x86 LLVM port that follows the AOSP LLVM porting rules
# https://osdn.net/projects/android-x86/scm/git/external-llvm-project
endif
endif
ifneq ($(filter radeonsi amd,$(BOARD_MESA3D_GALLIUM_DRIVERS) $(BOARD_MESA3D_VULKAN_DRIVERS)),)
MESON_GEN_LLVM_STUB := true
LOCAL_CFLAGS += -DFORCE_BUILD_AMDGPU # instructs LLVM to declare LLVMInitializeAMDGPU* functions
LOCAL_SHARED_LIBRARIES += libdrm_amdgpu
MESON_GEN_PKGCONFIGS += libdrm_amdgpu:$(LIBDRM_VERSION)
endif
@ -164,7 +158,6 @@ include $(BUILD_PREBUILT)
endif
endef
ifneq ($(strip $(BOARD_MESA3D_GALLIUM_DRIVERS)),)
# Module 'libgallium_dri', produces '/vendor/lib{64}/dri/libgallium_dri.so'
# This module also trigger DRI symlinks creation process
$(eval $(call mesa3d-lib,libgallium_dri,.so.0,dri,MESA3D_GALLIUM_DRI_BIN))
@ -177,7 +170,6 @@ $(eval $(call mesa3d-lib,libEGL_mesa,.so.1,egl,MESA3D_LIBEGL_BIN))
$(eval $(call mesa3d-lib,libGLESv1_CM_mesa,.so.1,egl,MESA3D_LIBGLESV1_BIN))
# Module 'libGLESv2_mesa', produces '/vendor/lib{64}/egl/libGLESv2_mesa.so'
$(eval $(call mesa3d-lib,libGLESv2_mesa,.so.2,egl,MESA3D_LIBGLESV2_BIN))
endif
# Modules 'vulkan.{driver_name}', produces '/vendor/lib{64}/hw/vulkan.{driver_name}.so' HAL
$(foreach driver,$(BOARD_MESA3D_VULKAN_DRIVERS), \

View file

@ -88,12 +88,9 @@ MESON_GEN_NINJA := \
-Dgallium-drivers=$(subst $(space),$(comma),$(BOARD_MESA3D_GALLIUM_DRIVERS)) \
-Dvulkan-drivers=$(subst $(space),$(comma),$(subst radeon,amd,$(BOARD_MESA3D_VULKAN_DRIVERS))) \
-Dgbm=enabled \
-Degl=$(if $(BOARD_MESA3D_GALLIUM_DRIVERS),enabled,disabled) \
-Dllvm=$(if $(MESON_GEN_LLVM_STUB),enabled,disabled) \
-Degl=enabled \
-Dcpp_rtti=false \
-Dlmsensors=disabled \
-Dandroid-libbacktrace=disabled \
$(BOARD_MESA3D_MESON_ARGS) \
MESON_BUILD := PATH=/usr/bin:/bin:/sbin:$$PATH ninja -C $(MESON_OUT_DIR)/build
@ -205,9 +202,7 @@ define m-c-flags
endef
define filter-c-flags
$(filter-out -std=gnu++17 -std=gnu++14 -std=gnu99 -fno-rtti \
-enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang \
-ftrivial-auto-var-init=zero,
$(filter-out -std=gnu++17 -std=gnu++14 -std=gnu99 -fno-rtti, \
$(patsubst -W%,, $1))
endef
@ -293,7 +288,7 @@ $(MESON_OUT_DIR)/install/.install.timestamp: $(MESON_OUT_DIR)/.build.timestamp
rm -rf $(dir $@)
mkdir -p $(dir $@)
DESTDIR=$(call relative-to-absolute,$(dir $@)) $(MESON_BUILD) install
$(if $(BOARD_MESA3D_GALLIUM_DRIVERS),$(MESON_COPY_LIBGALLIUM))
$(MESON_COPY_LIBGALLIUM)
touch $@
$($(M_TARGET_PREFIX)MESA3D_LIBGBM_BIN) $(MESA3D_GLES_BINS): $(MESON_OUT_DIR)/install/.install.timestamp

2
bin/ci/.gitignore vendored
View file

@ -1,2 +0,0 @@
schema.graphql
gitlab_gql.py.cache*

View file

@ -1,413 +0,0 @@
#!/usr/bin/env python3
# Copyright © 2020 - 2022 Collabora Ltd.
# Authors:
# Tomeu Vizoso <tomeu.vizoso@collabora.com>
# David Heidelberg <david.heidelberg@collabora.com>
#
# For the dependencies, see the requirements.txt
# SPDX-License-Identifier: MIT
"""
Helper script to restrict running only required CI jobs
and show the job(s) logs.
"""
import argparse
import re
import sys
import time
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from itertools import chain
from subprocess import check_output
from typing import TYPE_CHECKING, Iterable, Literal, Optional
import gitlab
from colorama import Fore, Style
from gitlab_common import (
get_gitlab_project,
read_token,
wait_for_pipeline,
pretty_duration,
)
from gitlab_gql import GitlabGQL, create_job_needs_dag, filter_dag, print_dag
if TYPE_CHECKING:
from gitlab_gql import Dag
GITLAB_URL = "https://gitlab.freedesktop.org"
REFRESH_WAIT_LOG = 10
REFRESH_WAIT_JOBS = 6
URL_START = "\033]8;;"
URL_END = "\033]8;;\a"
STATUS_COLORS = {
"created": "",
"running": Fore.BLUE,
"success": Fore.GREEN,
"failed": Fore.RED,
"canceled": Fore.MAGENTA,
"manual": "",
"pending": "",
"skipped": "",
}
COMPLETED_STATUSES = ["success", "failed"]
def print_job_status(job, new_status=False) -> None:
"""It prints a nice, colored job status with a link to the job."""
if job.status == "canceled":
return
if job.duration:
duration = job.duration
elif job.started_at:
duration = time.perf_counter() - time.mktime(job.started_at.timetuple())
print(
STATUS_COLORS[job.status]
+ "🞋 job "
+ URL_START
+ f"{job.web_url}\a{job.name}"
+ URL_END
+ (f" has new status: {job.status}" if new_status else f" :: {job.status}")
+ (f" ({pretty_duration(duration)})" if job.started_at else "")
+ Style.RESET_ALL
)
def pretty_wait(sec: int) -> None:
"""shows progressbar in dots"""
for val in range(sec, 0, -1):
print(f"{val} seconds", end="\r")
time.sleep(1)
def monitor_pipeline(
project,
pipeline,
target_jobs_regex: re.Pattern,
dependencies,
force_manual: bool,
stress: int,
) -> tuple[Optional[int], Optional[int]]:
"""Monitors pipeline and delegate canceling jobs"""
statuses: dict[str, str] = defaultdict(str)
target_statuses: dict[str, str] = defaultdict(str)
stress_status_counter = defaultdict(lambda: defaultdict(int))
target_id = None
while True:
deps_failed = []
to_cancel = []
for job in pipeline.jobs.list(all=True, sort="desc"):
# target jobs
if target_jobs_regex.fullmatch(job.name):
target_id = job.id
if stress and job.status in ["success", "failed"]:
if (
stress < 0
or sum(stress_status_counter[job.name].values()) < stress
):
enable_job(project, job, "retry", force_manual)
stress_status_counter[job.name][job.status] += 1
else:
enable_job(project, job, "target", force_manual)
print_job_status(job, job.status not in target_statuses[job.name])
target_statuses[job.name] = job.status
continue
# all jobs
if job.status != statuses[job.name]:
print_job_status(job, True)
statuses[job.name] = job.status
# run dependencies and cancel the rest
if job.name in dependencies:
enable_job(project, job, "dep", True)
if job.status == "failed":
deps_failed.append(job.name)
else:
to_cancel.append(job)
cancel_jobs(project, to_cancel)
if stress:
enough = True
for job_name, status in stress_status_counter.items():
print(
f"{job_name}\tsucc: {status['success']}; "
f"fail: {status['failed']}; "
f"total: {sum(status.values())} of {stress}",
flush=False,
)
if stress < 0 or sum(status.values()) < stress:
enough = False
if not enough:
pretty_wait(REFRESH_WAIT_JOBS)
continue
print("---------------------------------", flush=False)
if len(target_statuses) == 1 and {"running"}.intersection(
target_statuses.values()
):
return target_id, None
if (
{"failed"}.intersection(target_statuses.values())
and not set(["running", "pending"]).intersection(target_statuses.values())
):
return None, 1
if (
{"skipped"}.intersection(target_statuses.values())
and not {"running", "pending"}.intersection(target_statuses.values())
):
print(
Fore.RED,
"Target in skipped state, aborting. Failed dependencies:",
deps_failed,
Fore.RESET,
)
return None, 1
if {"success", "manual"}.issuperset(target_statuses.values()):
return None, 0
pretty_wait(REFRESH_WAIT_JOBS)
def enable_job(
project, job, action_type: Literal["target", "dep", "retry"], force_manual: bool
) -> None:
"""enable job"""
if (
(job.status in ["success", "failed"] and action_type != "retry")
or (job.status == "manual" and not force_manual)
or job.status in ["skipped", "running", "created", "pending"]
):
return
pjob = project.jobs.get(job.id, lazy=True)
if job.status in ["success", "failed", "canceled"]:
pjob.retry()
else:
pjob.play()
if action_type == "target":
jtype = "🞋 "
elif action_type == "retry":
jtype = ""
else:
jtype = "(dependency)"
print(Fore.MAGENTA + f"{jtype} job {job.name} manually enabled" + Style.RESET_ALL)
def cancel_job(project, job) -> None:
"""Cancel GitLab job"""
if job.status in [
"canceled",
"success",
"failed",
"skipped",
]:
return
pjob = project.jobs.get(job.id, lazy=True)
pjob.cancel()
print(f"{job.name}", end=" ")
def cancel_jobs(project, to_cancel) -> None:
"""Cancel unwanted GitLab jobs"""
if not to_cancel:
return
with ThreadPoolExecutor(max_workers=6) as exe:
part = partial(cancel_job, project)
exe.map(part, to_cancel)
print()
def print_log(project, job_id) -> None:
"""Print job log into output"""
printed_lines = 0
while True:
job = project.jobs.get(job_id)
# GitLab's REST API doesn't offer pagination for logs, so we have to refetch it all
lines = job.trace().decode("raw_unicode_escape").splitlines()
for line in lines[printed_lines:]:
print(line)
printed_lines = len(lines)
if job.status in COMPLETED_STATUSES:
print(Fore.GREEN + f"Job finished: {job.web_url}" + Style.RESET_ALL)
return
pretty_wait(REFRESH_WAIT_LOG)
def parse_args() -> None:
"""Parse args"""
parser = argparse.ArgumentParser(
description="Tool to trigger a subset of container jobs "
+ "and monitor the progress of a test job",
epilog="Example: mesa-monitor.py --rev $(git rev-parse HEAD) "
+ '--target ".*traces" ',
)
parser.add_argument(
"--target",
metavar="target-job",
help="Target job regex. For multiple targets, separate with pipe | character",
required=True,
)
parser.add_argument(
"--token",
metavar="token",
help="force GitLab token, otherwise it's read from ~/.config/gitlab-token",
)
parser.add_argument(
"--force-manual", action="store_true", help="Force jobs marked as manual"
)
parser.add_argument(
"--stress",
default=0,
type=int,
help="Stresstest job(s). Number or repetitions or -1 for infinite.",
)
parser.add_argument(
"--project",
default="mesa",
help="GitLab project in the format <user>/<project> or just <project>",
)
mutex_group1 = parser.add_mutually_exclusive_group()
mutex_group1.add_argument(
"--rev", default="HEAD", metavar="revision", help="repository git revision (default: HEAD)"
)
mutex_group1.add_argument(
"--pipeline-url",
help="URL of the pipeline to use, instead of auto-detecting it.",
)
mutex_group1.add_argument(
"--mr",
type=int,
help="ID of a merge request; the latest pipeline in that MR will be used.",
)
args = parser.parse_args()
# argparse doesn't support groups inside add_mutually_exclusive_group(),
# which means we can't just put `--project` and `--rev` in a group together,
# we have to do this by heand instead.
if args.pipeline_url and args.project != parser.get_default("project"):
# weird phrasing but it's the error add_mutually_exclusive_group() gives
parser.error("argument --project: not allowed with argument --pipeline-url")
return args
def print_detected_jobs(
target_dep_dag: "Dag", dependency_jobs: Iterable[str], target_jobs: Iterable[str]
) -> None:
def print_job_set(color: str, kind: str, job_set: Iterable[str]):
print(
color + f"Running {len(job_set)} {kind} jobs: ",
"\n",
", ".join(sorted(job_set)),
Fore.RESET,
"\n",
)
print(Fore.YELLOW + "Detected target job and its dependencies:", "\n")
print_dag(target_dep_dag)
print_job_set(Fore.MAGENTA, "dependency", dependency_jobs)
print_job_set(Fore.BLUE, "target", target_jobs)
def find_dependencies(target_jobs_regex: re.Pattern, project_path: str, iid: int) -> set[str]:
gql_instance = GitlabGQL()
dag = create_job_needs_dag(
gql_instance, {"projectPath": project_path.path_with_namespace, "iid": iid}
)
target_dep_dag = filter_dag(dag, target_jobs_regex)
if not target_dep_dag:
print(Fore.RED + "The job(s) were not found in the pipeline." + Fore.RESET)
sys.exit(1)
dependency_jobs = set(chain.from_iterable(d["needs"] for d in target_dep_dag.values()))
target_jobs = set(target_dep_dag.keys())
print_detected_jobs(target_dep_dag, dependency_jobs, target_jobs)
return target_jobs.union(dependency_jobs)
if __name__ == "__main__":
try:
t_start = time.perf_counter()
args = parse_args()
token = read_token(args.token)
gl = gitlab.Gitlab(url=GITLAB_URL,
private_token=token,
retry_transient_errors=True)
REV: str = args.rev
if args.pipeline_url:
assert args.pipeline_url.startswith(GITLAB_URL)
url_path = args.pipeline_url[len(GITLAB_URL):]
url_path_components = url_path.split("/")
project_name = "/".join(url_path_components[1:3])
assert url_path_components[3] == "-"
assert url_path_components[4] == "pipelines"
pipeline_id = int(url_path_components[5])
cur_project = gl.projects.get(project_name)
pipe = cur_project.pipelines.get(pipeline_id)
REV = pipe.sha
else:
mesa_project = gl.projects.get("mesa/mesa")
projects = [mesa_project]
if args.mr:
REV = mesa_project.mergerequests.get(args.mr).sha
else:
REV = check_output(['git', 'rev-parse', REV]).decode('ascii').strip()
projects.append(get_gitlab_project(gl, args.project))
(pipe, cur_project) = wait_for_pipeline(projects, REV)
print(f"Revision: {REV}")
print(f"Pipeline: {pipe.web_url}")
target_jobs_regex = re.compile(args.target.strip())
deps = set()
if args.target:
print("🞋 job: " + Fore.BLUE + args.target + Style.RESET_ALL)
deps = find_dependencies(
target_jobs_regex=target_jobs_regex, iid=pipe.iid, project_path=cur_project
)
target_job_id, ret = monitor_pipeline(
cur_project, pipe, target_jobs_regex, deps, args.force_manual, args.stress
)
if target_job_id:
print_log(cur_project, target_job_id)
t_end = time.perf_counter()
spend_minutes = (t_end - t_start) / 60
print(f"⏲ Duration of script execution: {spend_minutes:0.1f} minutes")
sys.exit(ret)
except KeyboardInterrupt:
sys.exit(1)

View file

@ -1,10 +0,0 @@
#!/usr/bin/env bash
set -eu
this_dir=$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")")
readonly this_dir
exec \
"$this_dir/../python-venv.sh" \
"$this_dir/requirements.txt" \
"$this_dir/ci_run_n_monitor.py" "$@"

View file

@ -1,334 +0,0 @@
import argparse
import logging
from datetime import datetime
from pathlib import Path
from structured_logger import StructuredLogger
class CustomLogger:
def __init__(self, log_file):
self.log_file = log_file
self.logger = StructuredLogger(file_name=self.log_file)
def get_last_dut_job(self):
"""
Gets the details of the most recent DUT job.
Returns:
dict: Details of the most recent DUT job.
Raises:
ValueError: If no DUT jobs are found in the logger's data.
"""
try:
job = self.logger.data["dut_jobs"][-1]
except KeyError:
raise ValueError(
"No DUT jobs found. Please create a job via create_dut_job call."
)
return job
def update(self, **kwargs):
"""
Updates the log file with provided key-value pairs.
Args:
**kwargs: Key-value pairs to be updated.
"""
with self.logger.edit_context():
for key, value in kwargs.items():
self.logger.data[key] = value
def create_dut_job(self, **kwargs):
"""
Creates a new DUT job with provided key-value pairs.
Args:
**kwargs: Key-value pairs for the new DUT job.
"""
with self.logger.edit_context():
if "dut_jobs" not in self.logger.data:
self.logger.data["dut_jobs"] = []
new_job = {
"status": "",
"submitter_start_time": datetime.now().isoformat(),
"dut_submit_time": "",
"dut_start_time": "",
"dut_end_time": "",
"dut_name": "",
"dut_state": "pending",
"dut_job_phases": [],
**kwargs,
}
self.logger.data["dut_jobs"].append(new_job)
def update_dut_job(self, key, value):
"""
Updates the last DUT job with a key-value pair.
Args:
key : The key to be updated.
value: The value to be assigned.
"""
with self.logger.edit_context():
job = self.get_last_dut_job()
job[key] = value
def update_status_fail(self, reason=""):
"""
Sets the status of the last DUT job to 'fail' and logs the failure reason.
Args:
reason (str, optional): The reason for the failure. Defaults to "".
"""
with self.logger.edit_context():
job = self.get_last_dut_job()
job["status"] = "fail"
job["dut_job_fail_reason"] = reason
def create_job_phase(self, phase_name):
"""
Creates a new job phase for the last DUT job.
Args:
phase_name : The name of the new job phase.
"""
with self.logger.edit_context():
job = self.get_last_dut_job()
if job["dut_job_phases"] and job["dut_job_phases"][-1]["end_time"] == "":
# If the last phase exists and its end time is empty, set the end time
job["dut_job_phases"][-1]["end_time"] = datetime.now().isoformat()
# Create a new phase
phase_data = {
"name": phase_name,
"start_time": datetime.now().isoformat(),
"end_time": "",
}
job["dut_job_phases"].append(phase_data)
def check_dut_timings(self, job):
"""
Check the timing sequence of a job to ensure logical consistency.
The function verifies that the job's submission time is not earlier than its start time and that
the job's end time is not earlier than its start time. If either of these conditions is found to be true,
an error is logged for each instance of inconsistency.
Args:
job (dict): A dictionary containing timing information of a job. Expected keys are 'dut_start_time',
'dut_submit_time', and 'dut_end_time'.
Returns:
None: This function does not return a value; it logs errors if timing inconsistencies are detected.
The function checks the following:
- If 'dut_start_time' and 'dut_submit_time' are both present and correctly sequenced.
- If 'dut_start_time' and 'dut_end_time' are both present and correctly sequenced.
"""
# Check if the start time and submit time exist
if job.get("dut_start_time") and job.get("dut_submit_time"):
# If they exist, check if the submission time is before the start time
if job["dut_start_time"] < job["dut_submit_time"]:
logging.error("Job submission is happening before job start.")
# Check if the start time and end time exist
if job.get("dut_start_time") and job.get("dut_end_time"):
# If they exist, check if the end time is after the start time
if job["dut_end_time"] < job["dut_start_time"]:
logging.error("Job ended before it started.")
# Method to update DUT start, submit and end time
def update_dut_time(self, value, custom_time):
"""
Updates DUT start, submit, and end times.
Args:
value : Specifies which DUT time to update. Options: 'start', 'submit', 'end'.
custom_time : Custom time to set. If None, use current time.
Raises:
ValueError: If an invalid argument is provided for value.
"""
with self.logger.edit_context():
job = self.get_last_dut_job()
timestamp = custom_time if custom_time else datetime.now().isoformat()
if value == "start":
job["dut_start_time"] = timestamp
job["dut_state"] = "running"
elif value == "submit":
job["dut_submit_time"] = timestamp
job["dut_state"] = "submitted"
elif value == "end":
job["dut_end_time"] = timestamp
job["dut_state"] = "finished"
else:
raise ValueError(
"Error: Invalid argument provided for --update-dut-time. Use 'start', 'submit', 'end'."
)
# check the sanity of the partial structured log
self.check_dut_timings(job)
def close_dut_job(self):
"""
Closes the most recent DUT (Device Under Test) job in the logger's data.
The method performs the following operations:
1. Validates if there are any DUT jobs in the logger's data.
2. If the last phase of the most recent DUT job has an empty end time, it sets the end time to the current time.
Raises:
ValueError: If no DUT jobs are found in the logger's data.
"""
with self.logger.edit_context():
job = self.get_last_dut_job()
# Check if the last phase exists and its end time is empty, then set the end time
if job["dut_job_phases"] and job["dut_job_phases"][-1]["end_time"] == "":
job["dut_job_phases"][-1]["end_time"] = datetime.now().isoformat()
def close(self):
"""
Closes the most recent DUT (Device Under Test) job in the logger's data.
The method performs the following operations:
1. Determines the combined status of all DUT jobs.
2. Sets the submitter's end time to the current time.
3. Updates the DUT attempt counter to reflect the total number of DUT jobs.
"""
with self.logger.edit_context():
job_status = []
for job in self.logger.data["dut_jobs"]:
if "status" in job:
job_status.append(job["status"])
if not job_status:
job_combined_status = "null"
else:
# Get job_combined_status
if "pass" in job_status:
job_combined_status = "pass"
else:
job_combined_status = "fail"
self.logger.data["job_combined_status"] = job_combined_status
self.logger.data["dut_attempt_counter"] = len(self.logger.data["dut_jobs"])
job["submitter_end_time"] = datetime.now().isoformat()
def process_args(args):
# Function to process key-value pairs and call corresponding logger methods
def process_key_value_pairs(args_list, action_func):
if not args_list:
raise ValueError(
f"No key-value pairs provided for {action_func.__name__.replace('_', '-')}"
)
if len(args_list) % 2 != 0:
raise ValueError(
f"Incomplete key-value pairs for {action_func.__name__.replace('_', '-')}"
)
kwargs = dict(zip(args_list[::2], args_list[1::2]))
action_func(**kwargs)
# Create a CustomLogger object with the specified log file path
custom_logger = CustomLogger(Path(args.log_file))
if args.update:
process_key_value_pairs(args.update, custom_logger.update)
if args.create_dut_job:
process_key_value_pairs(args.create_dut_job, custom_logger.create_dut_job)
if args.update_dut_job:
key, value = args.update_dut_job
custom_logger.update_dut_job(key, value)
if args.create_job_phase:
custom_logger.create_job_phase(args.create_job_phase)
if args.update_status_fail:
custom_logger.update_status_fail(args.update_status_fail)
if args.update_dut_time:
if len(args.update_dut_time) == 2:
action, custom_time = args.update_dut_time
elif len(args.update_dut_time) == 1:
action, custom_time = args.update_dut_time[0], None
else:
raise ValueError("Invalid number of values for --update-dut-time")
if action in ["start", "end", "submit"]:
custom_logger.update_dut_time(action, custom_time)
else:
raise ValueError(
"Error: Invalid argument provided for --update-dut-time. Use 'start', 'submit', 'end'."
)
if args.close_dut_job:
custom_logger.close_dut_job()
if args.close:
custom_logger.close()
def main():
parser = argparse.ArgumentParser(description="Custom Logger Command Line Tool")
parser.add_argument("log_file", help="Path to the log file")
parser.add_argument(
"--update",
nargs=argparse.ZERO_OR_MORE,
metavar=("key", "value"),
help="Update a key-value pair e.g., --update key1 value1 key2 value2)",
)
parser.add_argument(
"--create-dut-job",
nargs=argparse.ZERO_OR_MORE,
metavar=("key", "value"),
help="Create a new DUT job with key-value pairs (e.g., --create-dut-job key1 value1 key2 value2)",
)
parser.add_argument(
"--update-dut-job",
nargs=argparse.ZERO_OR_MORE,
metavar=("key", "value"),
help="Update a key-value pair in DUT job",
)
parser.add_argument(
"--create-job-phase",
help="Create a new job phase (e.g., --create-job-phase name)",
)
parser.add_argument(
"--update-status-fail",
help="Update fail as the status and log the failure reason (e.g., --update-status-fail reason)",
)
parser.add_argument(
"--update-dut-time",
nargs=argparse.ZERO_OR_MORE,
metavar=("action", "custom_time"),
help="Update DUT start and end time. Provide action ('start', 'submit', 'end') and custom_time (e.g., '2023-01-01T12:00:00')",
)
parser.add_argument(
"--close-dut-job",
action="store_true",
help="Close the dut job by updating end time of last dut job)",
)
parser.add_argument(
"--close",
action="store_true",
help="Updates combined status, submitter's end time and DUT attempt counter",
)
args = parser.parse_args()
process_args(args)
if __name__ == "__main__":
main()

View file

@ -1,11 +0,0 @@
#!/bin/sh
# Helper script to download the schema GraphQL from Gitlab to enable IDEs to
# assist the developer to edit gql files
SOURCE_DIR=$(dirname "$(realpath "$0")")
(
cd $SOURCE_DIR || exit 1
gql-cli https://gitlab.freedesktop.org/api/graphql --print-schema > schema.graphql
)

View file

@ -1,63 +0,0 @@
#!/usr/bin/env python3
# Copyright © 2020 - 2022 Collabora Ltd.
# Authors:
# Tomeu Vizoso <tomeu.vizoso@collabora.com>
# David Heidelberg <david.heidelberg@collabora.com>
#
# SPDX-License-Identifier: MIT
'''Shared functions between the scripts.'''
import os
import time
from typing import Optional
def pretty_duration(seconds):
"""Pretty print duration"""
hours, rem = divmod(seconds, 3600)
minutes, seconds = divmod(rem, 60)
if hours:
return f"{hours:0.0f}h{minutes:0.0f}m{seconds:0.0f}s"
if minutes:
return f"{minutes:0.0f}m{seconds:0.0f}s"
return f"{seconds:0.0f}s"
def get_gitlab_project(glab, name: str):
"""Finds a specified gitlab project for given user"""
if "/" in name:
project_path = name
else:
glab.auth()
username = glab.user.username
project_path = f"{username}/{name}"
return glab.projects.get(project_path)
def read_token(token_arg: Optional[str]) -> str:
"""pick token from args or file"""
if token_arg:
return token_arg
return (
open(os.path.expanduser("~/.config/gitlab-token"), encoding="utf-8")
.readline()
.rstrip()
)
def wait_for_pipeline(projects, sha: str, timeout=None):
"""await until pipeline appears in Gitlab"""
project_names = [project.path_with_namespace for project in projects]
print(f"⏲ for the pipeline to appear in {project_names}..", end="")
start_time = time.time()
while True:
for project in projects:
pipelines = project.pipelines.list(sha=sha)
if pipelines:
print("", flush=True)
return (pipelines[0], project)
print("", end=".", flush=True)
if timeout and time.time() - start_time > timeout:
print(" not found", flush=True)
return (None, None)
time.sleep(1)

View file

@ -1,548 +0,0 @@
#!/usr/bin/env python3
# For the dependencies, see the requirements.txt
import logging
import re
import traceback
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace
from collections import OrderedDict
from copy import deepcopy
from dataclasses import dataclass, field
from itertools import accumulate
from os import getenv
from pathlib import Path
from subprocess import check_output
from textwrap import dedent
from typing import Any, Iterable, Optional, Pattern, TypedDict, Union
import yaml
from filecache import DAY, filecache
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
from graphql import DocumentNode
class DagNode(TypedDict):
needs: set[str]
stage: str
# `name` is redundant but is here for retro-compatibility
name: str
# see create_job_needs_dag function for more details
Dag = dict[str, DagNode]
StageSeq = OrderedDict[str, set[str]]
TOKEN_DIR = Path(getenv("XDG_CONFIG_HOME") or Path.home() / ".config")
def get_token_from_default_dir() -> str:
token_file = TOKEN_DIR / "gitlab-token"
try:
return str(token_file.resolve())
except FileNotFoundError as ex:
print(
f"Could not find {token_file}, please provide a token file as an argument"
)
raise ex
def get_project_root_dir():
root_path = Path(__file__).parent.parent.parent.resolve()
gitlab_file = root_path / ".gitlab-ci.yml"
assert gitlab_file.exists()
return root_path
@dataclass
class GitlabGQL:
_transport: Any = field(init=False)
client: Client = field(init=False)
url: str = "https://gitlab.freedesktop.org/api/graphql"
token: Optional[str] = None
def __post_init__(self) -> None:
self._setup_gitlab_gql_client()
def _setup_gitlab_gql_client(self) -> None:
# Select your transport with a defined url endpoint
headers = {}
if self.token:
headers["Authorization"] = f"Bearer {self.token}"
self._transport = RequestsHTTPTransport(url=self.url, headers=headers)
# Create a GraphQL client using the defined transport
self.client = Client(transport=self._transport, fetch_schema_from_transport=True)
def query(
self,
gql_file: Union[Path, str],
params: dict[str, Any] = {},
operation_name: Optional[str] = None,
paginated_key_loc: Iterable[str] = [],
disable_cache: bool = False,
) -> dict[str, Any]:
def run_uncached() -> dict[str, Any]:
if paginated_key_loc:
return self._sweep_pages(gql_file, params, operation_name, paginated_key_loc)
return self._query(gql_file, params, operation_name)
if disable_cache:
return run_uncached()
try:
# Create an auxiliary variable to deliver a cached result and enable catching exceptions
# Decorate the query to be cached
if paginated_key_loc:
result = self._sweep_pages_cached(
gql_file, params, operation_name, paginated_key_loc
)
else:
result = self._query_cached(gql_file, params, operation_name)
return result # type: ignore
except Exception as ex:
logging.error(f"Cached query failed with {ex}")
# print exception traceback
traceback_str = "".join(traceback.format_exception(ex))
logging.error(traceback_str)
self.invalidate_query_cache()
logging.error("Cache invalidated, retrying without cache")
finally:
return run_uncached()
def _query(
self,
gql_file: Union[Path, str],
params: dict[str, Any] = {},
operation_name: Optional[str] = None,
) -> dict[str, Any]:
# Provide a GraphQL query
source_path: Path = Path(__file__).parent
pipeline_query_file: Path = source_path / gql_file
query: DocumentNode
with open(pipeline_query_file, "r") as f:
pipeline_query = f.read()
query = gql(pipeline_query)
# Execute the query on the transport
return self.client.execute_sync(
query, variable_values=params, operation_name=operation_name
)
@filecache(DAY)
def _sweep_pages_cached(self, *args, **kwargs):
return self._sweep_pages(*args, **kwargs)
@filecache(DAY)
def _query_cached(self, *args, **kwargs):
return self._query(*args, **kwargs)
def _sweep_pages(
self, query, params, operation_name=None, paginated_key_loc: Iterable[str] = []
) -> dict[str, Any]:
"""
Retrieve paginated data from a GraphQL API and concatenate the results into a single
response.
Args:
query: represents a filepath with the GraphQL query to be executed.
params: a dictionary that contains the parameters to be passed to the query. These
parameters can be used to filter or modify the results of the query.
operation_name: The `operation_name` parameter is an optional parameter that specifies
the name of the GraphQL operation to be executed. It is used when making a GraphQL
query to specify which operation to execute if there are multiple operations defined
in the GraphQL schema. If not provided, the default operation will be executed.
paginated_key_loc (Iterable[str]): The `paginated_key_loc` parameter is an iterable of
strings that represents the location of the paginated field within the response. It
is used to extract the paginated field from the response and append it to the final
result. The node has to be a list of objects with a `pageInfo` field that contains
at least the `hasNextPage` and `endCursor` fields.
Returns:
a dictionary containing the response from the query with the paginated field
concatenated.
"""
def fetch_page(cursor: str | None = None) -> dict[str, Any]:
if cursor:
params["cursor"] = cursor
logging.info(
f"Found more than 100 elements, paginating. "
f"Current cursor at {cursor}"
)
return self._query(query, params, operation_name)
# Execute the initial query
response: dict[str, Any] = fetch_page()
# Initialize an empty list to store the final result
final_partial_field: list[dict[str, Any]] = []
# Loop until all pages have been retrieved
while True:
# Get the partial field to be appended to the final result
partial_field = response
for key in paginated_key_loc:
partial_field = partial_field[key]
# Append the partial field to the final result
final_partial_field += partial_field["nodes"]
# Check if there are more pages to retrieve
page_info = partial_field["pageInfo"]
if not page_info["hasNextPage"]:
break
# Execute the query with the updated cursor parameter
response = fetch_page(page_info["endCursor"])
# Replace the "nodes" field in the original response with the final result
partial_field["nodes"] = final_partial_field
return response
def invalidate_query_cache(self) -> None:
logging.warning("Invalidating query cache")
try:
self._sweep_pages._db.clear()
self._query._db.clear()
except AttributeError as ex:
logging.warning(f"Could not invalidate cache, maybe it was not used in {ex.args}?")
def insert_early_stage_jobs(stage_sequence: StageSeq, jobs_metadata: Dag) -> Dag:
pre_processed_dag: dict[str, set[str]] = {}
jobs_from_early_stages = list(accumulate(stage_sequence.values(), set.union))
for job_name, metadata in jobs_metadata.items():
final_needs: set[str] = deepcopy(metadata["needs"])
# Pre-process jobs that are not based on needs field
# e.g. sanity job in mesa MR pipelines
if not final_needs:
job_stage: str = jobs_metadata[job_name]["stage"]
stage_index: int = list(stage_sequence.keys()).index(job_stage)
if stage_index > 0:
final_needs |= jobs_from_early_stages[stage_index - 1]
pre_processed_dag[job_name] = final_needs
for job_name, needs in pre_processed_dag.items():
jobs_metadata[job_name]["needs"] = needs
return jobs_metadata
def traverse_dag_needs(jobs_metadata: Dag) -> None:
created_jobs = set(jobs_metadata.keys())
for job, metadata in jobs_metadata.items():
final_needs: set = deepcopy(metadata["needs"]) & created_jobs
# Post process jobs that are based on needs field
partial = True
while partial:
next_depth: set[str] = {n for dn in final_needs for n in jobs_metadata[dn]["needs"]}
partial: bool = not final_needs.issuperset(next_depth)
final_needs = final_needs.union(next_depth)
jobs_metadata[job]["needs"] = final_needs
def extract_stages_and_job_needs(
pipeline_jobs: dict[str, Any], pipeline_stages: dict[str, Any]
) -> tuple[StageSeq, Dag]:
jobs_metadata = Dag()
# Record the stage sequence to post process deps that are not based on needs
# field, for example: sanity job
stage_sequence: OrderedDict[str, set[str]] = OrderedDict()
for stage in pipeline_stages["nodes"]:
stage_sequence[stage["name"]] = set()
for job in pipeline_jobs["nodes"]:
stage_sequence[job["stage"]["name"]].add(job["name"])
dag_job: DagNode = {
"name": job["name"],
"stage": job["stage"]["name"],
"needs": set([j["node"]["name"] for j in job["needs"]["edges"]]),
}
jobs_metadata[job["name"]] = dag_job
return stage_sequence, jobs_metadata
def create_job_needs_dag(gl_gql: GitlabGQL, params, disable_cache: bool = True) -> Dag:
"""
This function creates a Directed Acyclic Graph (DAG) to represent a sequence of jobs, where each
job has a set of jobs that it depends on (its "needs") and belongs to a certain "stage".
The "name" of the job is used as the key in the dictionary.
For example, consider the following DAG:
1. build stage: job1 -> job2 -> job3
2. test stage: job2 -> job4
- The job needs for job3 are: job1, job2
- The job needs for job4 are: job2
- The job2 needs to wait all jobs from build stage to finish.
The resulting DAG would look like this:
dag = {
"job1": {"needs": set(), "stage": "build", "name": "job1"},
"job2": {"needs": {"job1", "job2", job3"}, "stage": "test", "name": "job2"},
"job3": {"needs": {"job1", "job2"}, "stage": "build", "name": "job3"},
"job4": {"needs": {"job2"}, "stage": "test", "name": "job4"},
}
To access the job needs, one can do:
dag["job3"]["needs"]
This will return the set of jobs that job3 needs: {"job1", "job2"}
Args:
gl_gql (GitlabGQL): The `gl_gql` parameter is an instance of the `GitlabGQL` class, which is
used to make GraphQL queries to the GitLab API.
params (dict): The `params` parameter is a dictionary that contains the necessary parameters
for the GraphQL query. It is used to specify the details of the pipeline for which the
job needs DAG is being created.
The specific keys and values in the `params` dictionary will depend on
the requirements of the GraphQL query being executed
disable_cache (bool): The `disable_cache` parameter is a boolean that specifies whether the
Returns:
The final DAG (Directed Acyclic Graph) representing the job dependencies sourced from needs
or stages rule.
"""
stages_jobs_gql = gl_gql.query(
"pipeline_details.gql",
params=params,
paginated_key_loc=["project", "pipeline", "jobs"],
disable_cache=disable_cache,
)
pipeline_data = stages_jobs_gql["project"]["pipeline"]
if not pipeline_data:
raise RuntimeError(f"Could not find any pipelines for {params}")
stage_sequence, jobs_metadata = extract_stages_and_job_needs(
pipeline_data["jobs"], pipeline_data["stages"]
)
# Fill the DAG with the job needs from stages that don't have any needs but still need to wait
# for previous stages
final_dag = insert_early_stage_jobs(stage_sequence, jobs_metadata)
# Now that each job has its direct needs filled correctly, update the "needs" field for each job
# in the DAG by performing a topological traversal
traverse_dag_needs(final_dag)
return final_dag
def filter_dag(dag: Dag, regex: Pattern) -> Dag:
jobs_with_regex: set[str] = {job for job in dag if regex.fullmatch(job)}
return Dag({job: data for job, data in dag.items() if job in sorted(jobs_with_regex)})
def print_dag(dag: Dag) -> None:
for job, data in dag.items():
print(f"{job}:")
print(f"\t{' '.join(data['needs'])}")
print()
def fetch_merged_yaml(gl_gql: GitlabGQL, params) -> dict[str, Any]:
params["content"] = dedent("""\
include:
- local: .gitlab-ci.yml
""")
raw_response = gl_gql.query("job_details.gql", params)
if merged_yaml := raw_response["ciConfig"]["mergedYaml"]:
return yaml.safe_load(merged_yaml)
gl_gql.invalidate_query_cache()
raise ValueError(
"""
Could not fetch any content for merged YAML,
please verify if the git SHA exists in remote.
Maybe you forgot to `git push`? """
)
def recursive_fill(job, relationship_field, target_data, acc_data: dict, merged_yaml):
if relatives := job.get(relationship_field):
if isinstance(relatives, str):
relatives = [relatives]
for relative in relatives:
parent_job = merged_yaml[relative]
acc_data = recursive_fill(parent_job, acc_data, merged_yaml) # type: ignore
acc_data |= job.get(target_data, {})
return acc_data
def get_variables(job, merged_yaml, project_path, sha) -> dict[str, str]:
p = get_project_root_dir() / ".gitlab-ci" / "image-tags.yml"
image_tags = yaml.safe_load(p.read_text())
variables = image_tags["variables"]
variables |= merged_yaml["variables"]
variables |= job["variables"]
variables["CI_PROJECT_PATH"] = project_path
variables["CI_PROJECT_NAME"] = project_path.split("/")[1]
variables["CI_REGISTRY_IMAGE"] = "registry.freedesktop.org/${CI_PROJECT_PATH}"
variables["CI_COMMIT_SHA"] = sha
while recurse_among_variables_space(variables):
pass
return variables
# Based on: https://stackoverflow.com/a/2158532/1079223
def flatten(xs):
for x in xs:
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
yield from flatten(x)
else:
yield x
def get_full_script(job) -> list[str]:
script = []
for script_part in ("before_script", "script", "after_script"):
script.append(f"# {script_part}")
lines = flatten(job.get(script_part, []))
script.extend(lines)
script.append("")
return script
def recurse_among_variables_space(var_graph) -> bool:
updated = False
for var, value in var_graph.items():
value = str(value)
dep_vars = []
if match := re.findall(r"(\$[{]?[\w\d_]*[}]?)", value):
all_dep_vars = [v.lstrip("${").rstrip("}") for v in match]
# print(value, match, all_dep_vars)
dep_vars = [v for v in all_dep_vars if v in var_graph]
for dep_var in dep_vars:
dep_value = str(var_graph[dep_var])
new_value = var_graph[var]
new_value = new_value.replace(f"${{{dep_var}}}", dep_value)
new_value = new_value.replace(f"${dep_var}", dep_value)
var_graph[var] = new_value
updated |= dep_value != new_value
return updated
def print_job_final_definition(job_name, merged_yaml, project_path, sha):
job = merged_yaml[job_name]
variables = get_variables(job, merged_yaml, project_path, sha)
print("# --------- variables ---------------")
for var, value in sorted(variables.items()):
print(f"export {var}={value!r}")
# TODO: Recurse into needs to get full script
# TODO: maybe create a extra yaml file to avoid too much rework
script = get_full_script(job)
print()
print()
print("# --------- full script ---------------")
print("\n".join(script))
if image := variables.get("MESA_IMAGE"):
print()
print()
print("# --------- container image ---------------")
print(image)
def from_sha_to_pipeline_iid(gl_gql: GitlabGQL, params) -> str:
result = gl_gql.query("pipeline_utils.gql", params)
return result["project"]["pipelines"]["nodes"][0]["iid"]
def parse_args() -> Namespace:
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
description="CLI and library with utility functions to debug jobs via Gitlab GraphQL",
epilog=f"""Example:
{Path(__file__).name} --print-dag""",
)
parser.add_argument("-pp", "--project-path", type=str, default="mesa/mesa")
parser.add_argument("--sha", "--rev", type=str, default='HEAD')
parser.add_argument(
"--regex",
type=str,
required=False,
help="Regex pattern for the job name to be considered",
)
mutex_group_print = parser.add_mutually_exclusive_group()
mutex_group_print.add_argument(
"--print-dag",
action="store_true",
help="Print job needs DAG",
)
mutex_group_print.add_argument(
"--print-merged-yaml",
action="store_true",
help="Print the resulting YAML for the specific SHA",
)
mutex_group_print.add_argument(
"--print-job-manifest",
metavar='JOB_NAME',
type=str,
help="Print the resulting job data"
)
parser.add_argument(
"--gitlab-token-file",
type=str,
default=get_token_from_default_dir(),
help="force GitLab token, otherwise it's read from $XDG_CONFIG_HOME/gitlab-token",
)
args = parser.parse_args()
args.gitlab_token = Path(args.gitlab_token_file).read_text().strip()
return args
def main():
args = parse_args()
gl_gql = GitlabGQL(token=args.gitlab_token)
sha = check_output(['git', 'rev-parse', args.sha]).decode('ascii').strip()
if args.print_dag:
iid = from_sha_to_pipeline_iid(gl_gql, {"projectPath": args.project_path, "sha": sha})
dag = create_job_needs_dag(
gl_gql, {"projectPath": args.project_path, "iid": iid}, disable_cache=True
)
if args.regex:
dag = filter_dag(dag, re.compile(args.regex))
print_dag(dag)
if args.print_merged_yaml or args.print_job_manifest:
merged_yaml = fetch_merged_yaml(
gl_gql, {"projectPath": args.project_path, "sha": sha}
)
if args.print_merged_yaml:
print(yaml.dump(merged_yaml, indent=2))
if args.print_job_manifest:
print_job_final_definition(
args.print_job_manifest, merged_yaml, args.project_path, sha
)
if __name__ == "__main__":
main()

View file

@ -1,10 +0,0 @@
#!/usr/bin/env bash
set -eu
this_dir=$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")")
readonly this_dir
exec \
"$this_dir/../python-venv.sh" \
"$this_dir/requirements.txt" \
"$this_dir/gitlab_gql.py" "$@"

View file

@ -1,7 +0,0 @@
query getCiConfigData($projectPath: ID!, $sha: String, $content: String!) {
ciConfig(projectPath: $projectPath, sha: $sha, content: $content) {
errors
mergedYaml
__typename
}
}

View file

@ -1,67 +0,0 @@
#!/usr/bin/env python3
# Copyright © 2020 - 2023 Collabora Ltd.
# Authors:
# David Heidelberg <david.heidelberg@collabora.com>
#
# SPDX-License-Identifier: MIT
"""
Monitors Marge-bot and return number of assigned MRs.
"""
import argparse
import time
import sys
from datetime import datetime, timezone
from dateutil import parser
import gitlab
from gitlab_common import read_token, pretty_duration
REFRESH_WAIT = 30
MARGE_BOT_USER_ID = 9716
def parse_args() -> None:
"""Parse args"""
parse = argparse.ArgumentParser(
description="Tool to show merge requests assigned to the marge-bot",
)
parse.add_argument(
"--wait", action="store_true", help="wait until CI is free",
)
parse.add_argument(
"--token",
metavar="token",
help="force GitLab token, otherwise it's read from ~/.config/gitlab-token",
)
return parse.parse_args()
if __name__ == "__main__":
args = parse_args()
token = read_token(args.token)
gl = gitlab.Gitlab(url="https://gitlab.freedesktop.org", private_token=token)
project = gl.projects.get("mesa/mesa")
while True:
mrs = project.mergerequests.list(assignee_id=MARGE_BOT_USER_ID, scope="all", state="opened", get_all=True)
jobs_num = len(mrs)
for mr in mrs:
updated = parser.parse(mr.updated_at)
now = datetime.now(timezone.utc)
diff = (now - updated).total_seconds()
print(
f"\u001b]8;;{mr.web_url}\u001b\\{mr.title}\u001b]8;;\u001b\\ ({pretty_duration(diff)})"
)
print("Job waiting: " + str(jobs_num))
if jobs_num == 0:
sys.exit(0)
if not args.wait:
sys.exit(min(jobs_num, 127))
time.sleep(REFRESH_WAIT)

View file

@ -1,10 +0,0 @@
#!/usr/bin/env bash
set -eu
this_dir=$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")")
readonly this_dir
exec \
"$this_dir/../python-venv.sh" \
"$this_dir/requirements.txt" \
"$this_dir/marge_queue.py" "$@"

View file

@ -1,35 +0,0 @@
query jobs($projectPath: ID!, $iid: ID!, $cursor: String) {
project(fullPath: $projectPath) {
id
pipeline(iid: $iid) {
id
iid
complete
stages {
nodes {
name
}
}
jobs(after: $cursor) {
pageInfo {
hasNextPage
endCursor
}
count
nodes {
name
needs {
edges {
node {
name
}
}
}
stage {
name
}
}
}
}
}
}

View file

@ -1,9 +0,0 @@
query sha2pipelineIID($projectPath: ID!, $sha: String!) {
project(fullPath: $projectPath) {
pipelines(last: 1, sha:$sha){
nodes {
iid
}
}
}
}

View file

@ -1,8 +0,0 @@
colorama==0.4.5
filecache==0.81
gql==3.4.0
python-dateutil==2.8.2
python-gitlab==3.5.0
PyYAML==6.0.1
ruamel.yaml.clib==0.2.8
ruamel.yaml==0.17.21

View file

@ -1,294 +0,0 @@
"""
A structured logging utility supporting multiple data formats such as CSV, JSON,
and YAML.
The main purpose of this script, besides having relevant information available
in a condensed and deserialized.
This script defines a protocol for different file handling strategies and provides
implementations for CSV, JSON, and YAML formats. The main class, StructuredLogger,
allows for easy interaction with log data, enabling users to load, save, increment,
set, and append fields in the log. The script also includes context managers for
file locking and editing log data to ensure data integrity and avoid race conditions.
"""
import json
import os
from collections.abc import MutableMapping, MutableSequence
from contextlib import contextmanager
from datetime import datetime
from pathlib import Path
from typing import Any, Protocol
import fire
from filelock import FileLock
try:
import polars as pl
CSV_LIB_EXCEPTION = None
except ImportError as e:
CSV_LIB_EXCEPTION: ImportError = e
try:
from ruamel.yaml import YAML
YAML_LIB_EXCEPTION = None
except ImportError as e:
YAML_LIB_EXCEPTION: ImportError = e
class ContainerProxy:
"""
A proxy class that wraps a mutable container object (such as a dictionary or
a list) and calls a provided save_callback function whenever the container
or its contents
are changed.
"""
def __init__(self, container, save_callback):
self.container = container
self.save_callback = save_callback
def __getitem__(self, key):
value = self.container[key]
if isinstance(value, (MutableMapping, MutableSequence)):
return ContainerProxy(value, self.save_callback)
return value
def __setitem__(self, key, value):
self.container[key] = value
self.save_callback()
def __delitem__(self, key):
del self.container[key]
self.save_callback()
def __getattr__(self, name):
attr = getattr(self.container, name)
if callable(attr):
def wrapper(*args, **kwargs):
result = attr(*args, **kwargs)
self.save_callback()
return result
return wrapper
return attr
def __iter__(self):
return iter(self.container)
def __len__(self):
return len(self.container)
def __repr__(self):
return repr(self.container)
class AutoSaveDict(dict):
"""
A subclass of the built-in dict class with additional functionality to
automatically save changes to the dictionary. It maintains a timestamp of
the last modification and automatically wraps nested mutable containers
using ContainerProxy.
"""
timestamp_key = "_timestamp"
def __init__(self, *args, save_callback, register_timestamp=True, **kwargs):
self.save_callback = save_callback
self.__register_timestamp = register_timestamp
self.__heartbeat()
super().__init__(*args, **kwargs)
self.__wrap_dictionaries()
def __heartbeat(self):
if self.__register_timestamp:
self[AutoSaveDict.timestamp_key] = datetime.now().isoformat()
def __save(self):
self.__heartbeat()
self.save_callback()
def __wrap_dictionaries(self):
for key, value in self.items():
if isinstance(value, MutableMapping) and not isinstance(
value, AutoSaveDict
):
self[key] = AutoSaveDict(
value, save_callback=self.save_callback, register_timestamp=False
)
def __setitem__(self, key, value):
if isinstance(value, MutableMapping) and not isinstance(value, AutoSaveDict):
value = AutoSaveDict(
value, save_callback=self.save_callback, register_timestamp=False
)
super().__setitem__(key, value)
if self.__register_timestamp and key == AutoSaveDict.timestamp_key:
return
self.__save()
def __getitem__(self, key):
value = super().__getitem__(key)
if isinstance(value, (MutableMapping, MutableSequence)):
return ContainerProxy(value, self.__save)
return value
def __delitem__(self, key):
super().__delitem__(key)
self.__save()
def pop(self, *args, **kwargs):
result = super().pop(*args, **kwargs)
self.__save()
return result
def update(self, *args, **kwargs):
super().update(*args, **kwargs)
self.__wrap_dictionaries()
self.__save()
class StructuredLoggerStrategy(Protocol):
def load_data(self, file_path: Path) -> dict:
pass
def save_data(self, file_path: Path, data: dict) -> None:
pass
class CSVStrategy:
def __init__(self) -> None:
if CSV_LIB_EXCEPTION:
raise RuntimeError(
"Can't parse CSV files. Missing library"
) from CSV_LIB_EXCEPTION
def load_data(self, file_path: Path) -> dict:
dicts: list[dict[str, Any]] = pl.read_csv(
file_path, try_parse_dates=True
).to_dicts()
data = {}
for d in dicts:
for k, v in d.items():
if k != AutoSaveDict.timestamp_key and k in data:
if isinstance(data[k], list):
data[k].append(v)
continue
data[k] = [data[k], v]
else:
data[k] = v
return data
def save_data(self, file_path: Path, data: dict) -> None:
pl.DataFrame(data).write_csv(file_path)
class JSONStrategy:
def load_data(self, file_path: Path) -> dict:
return json.loads(file_path.read_text())
def save_data(self, file_path: Path, data: dict) -> None:
with open(file_path, "w") as f:
json.dump(data, f, indent=2)
class YAMLStrategy:
def __init__(self):
if YAML_LIB_EXCEPTION:
raise RuntimeError(
"Can't parse YAML files. Missing library"
) from YAML_LIB_EXCEPTION
self.yaml = YAML()
self.yaml.indent(sequence=4, offset=2)
self.yaml.default_flow_style = False
self.yaml.representer.add_representer(AutoSaveDict, self.represent_dict)
@classmethod
def represent_dict(cls, dumper, data):
return dumper.represent_mapping("tag:yaml.org,2002:map", data)
def load_data(self, file_path: Path) -> dict:
return self.yaml.load(file_path.read_text())
def save_data(self, file_path: Path, data: dict) -> None:
with open(file_path, "w") as f:
self.yaml.dump(data, f)
class StructuredLogger:
def __init__(
self, file_name: str, strategy: StructuredLoggerStrategy = None, truncate=False
):
self.file_name: str = file_name
self.file_path = Path(self.file_name)
self._data: AutoSaveDict = AutoSaveDict(save_callback=self.save_data)
if strategy is None:
self.strategy: StructuredLoggerStrategy = self.guess_strategy_from_file(
self.file_path
)
else:
self.strategy = strategy
if not self.file_path.exists():
Path.mkdir(self.file_path.parent, exist_ok=True)
self.save_data()
return
if truncate:
with self.get_lock():
os.truncate(self.file_path, 0)
self.save_data()
def load_data(self):
self._data = self.strategy.load_data(self.file_path)
def save_data(self):
self.strategy.save_data(self.file_path, self._data)
@property
def data(self) -> AutoSaveDict:
return self._data
@contextmanager
def get_lock(self):
with FileLock(f"{self.file_path}.lock", timeout=10):
yield
@contextmanager
def edit_context(self):
"""
Context manager that ensures proper loading and saving of log data when
performing multiple modifications.
"""
with self.get_lock():
try:
self.load_data()
yield
finally:
self.save_data()
@staticmethod
def guess_strategy_from_file(file_path: Path) -> StructuredLoggerStrategy:
file_extension = file_path.suffix.lower().lstrip(".")
return StructuredLogger.get_strategy(file_extension)
@staticmethod
def get_strategy(strategy_name: str) -> StructuredLoggerStrategy:
strategies = {
"csv": CSVStrategy,
"json": JSONStrategy,
"yaml": YAMLStrategy,
"yml": YAMLStrategy,
}
try:
return strategies[strategy_name]()
except KeyError as e:
raise ValueError(f"Unknown strategy for: {strategy_name}") from e
if __name__ == "__main__":
fire.Fire(StructuredLogger)

View file

@ -1,5 +0,0 @@
filelock==3.12.4
fire==0.5.0
mock==5.1.0
polars==0.19.3
pytest==7.4.2

View file

@ -1,669 +0,0 @@
import logging
import subprocess
from datetime import datetime
import pytest
from custom_logger import CustomLogger
@pytest.fixture
def tmp_log_file(tmp_path):
return tmp_path / "test_log.json"
@pytest.fixture
def custom_logger(tmp_log_file):
return CustomLogger(tmp_log_file)
def run_script_with_args(args):
import custom_logger
script_path = custom_logger.__file__
return subprocess.run(
["python3", str(script_path), *args], capture_output=True, text=True
)
# Test case for missing log file
@pytest.mark.parametrize(
"key, value", [("dut_attempt_counter", "1"), ("job_combined_status", "pass")]
)
def test_missing_log_file_argument(key, value):
result = run_script_with_args(["--update", "key", "value"])
assert result.returncode != 0
# Parametrize test case for valid update arguments
@pytest.mark.parametrize(
"key, value", [("dut_attempt_counter", "1"), ("job_combined_status", "pass")]
)
def test_update_argument_valid(custom_logger, tmp_log_file, key, value):
result = run_script_with_args([str(tmp_log_file), "--update", key, value])
assert result.returncode == 0
# Test case for passing only the key without a value
def test_update_argument_key_only(custom_logger, tmp_log_file):
key = "dut_attempt_counter"
result = run_script_with_args([str(tmp_log_file), "--update", key])
assert result.returncode != 0
# Test case for not passing any key-value pair
def test_update_argument_no_values(custom_logger, tmp_log_file):
result = run_script_with_args([str(tmp_log_file), "--update"])
assert result.returncode == 0
# Parametrize test case for valid arguments
@pytest.mark.parametrize(
"key, value", [("dut_attempt_counter", "1"), ("job_combined_status", "pass")]
)
def test_create_argument_valid(custom_logger, tmp_log_file, key, value):
result = run_script_with_args([str(tmp_log_file), "--create-dut-job", key, value])
assert result.returncode == 0
# Test case for passing only the key without a value
def test_create_argument_key_only(custom_logger, tmp_log_file):
key = "dut_attempt_counter"
result = run_script_with_args([str(tmp_log_file), "--create-dut-job", key])
assert result.returncode != 0
# Test case for not passing any key-value pair
def test_create_argument_no_values(custom_logger, tmp_log_file):
result = run_script_with_args([str(tmp_log_file), "--create-dut-job"])
assert result.returncode == 0
# Test case for updating a DUT job
@pytest.mark.parametrize(
"key, value", [("status", "hung"), ("dut_state", "Canceling"), ("dut_name", "asus")]
)
def test_update_dut_job(custom_logger, tmp_log_file, key, value):
result = run_script_with_args([str(tmp_log_file), "--update-dut-job", key, value])
assert result.returncode != 0
result = run_script_with_args([str(tmp_log_file), "--create-dut-job", key, value])
assert result.returncode == 0
result = run_script_with_args([str(tmp_log_file), "--update-dut-job", key, value])
assert result.returncode == 0
# Test case for updating last DUT job
def test_update_dut_multiple_job(custom_logger, tmp_log_file):
# Create the first DUT job with the first key
result = run_script_with_args(
[str(tmp_log_file), "--create-dut-job", "status", "hung"]
)
assert result.returncode == 0
# Create the second DUT job with the second key
result = run_script_with_args(
[str(tmp_log_file), "--create-dut-job", "dut_state", "Canceling"]
)
assert result.returncode == 0
result = run_script_with_args(
[str(tmp_log_file), "--update-dut-job", "dut_name", "asus"]
)
assert result.returncode == 0
# Parametrize test case for valid phase arguments
@pytest.mark.parametrize(
"phase_name",
[("Phase1"), ("Phase2"), ("Phase3")],
)
def test_create_job_phase_valid(custom_logger, tmp_log_file, phase_name):
custom_logger.create_dut_job(status="pass")
result = run_script_with_args([str(tmp_log_file), "--create-job-phase", phase_name])
assert result.returncode == 0
# Test case for not passing any arguments for create-job-phase
def test_create_job_phase_no_arguments(custom_logger, tmp_log_file):
custom_logger.create_dut_job(status="pass")
result = run_script_with_args([str(tmp_log_file), "--create-job-phase"])
assert result.returncode != 0
# Test case for trying to create a phase job without an existing DUT job
def test_create_job_phase_no_dut_job(custom_logger, tmp_log_file):
phase_name = "Phase1"
result = run_script_with_args([str(tmp_log_file), "--create-job-phase", phase_name])
assert result.returncode != 0
# Combined test cases for valid scenarios
def test_valid_scenarios(custom_logger, tmp_log_file):
valid_update_args = [("dut_attempt_counter", "1"), ("job_combined_status", "pass")]
for key, value in valid_update_args:
result = run_script_with_args([str(tmp_log_file), "--update", key, value])
assert result.returncode == 0
valid_create_args = [
("status", "hung"),
("dut_state", "Canceling"),
("dut_name", "asus"),
("phase_name", "Bootloader"),
]
for key, value in valid_create_args:
result = run_script_with_args(
[str(tmp_log_file), "--create-dut-job", key, value]
)
assert result.returncode == 0
result = run_script_with_args(
[str(tmp_log_file), "--create-dut-job", "status", "hung"]
)
assert result.returncode == 0
result = run_script_with_args(
[str(tmp_log_file), "--update-dut-job", "dut_name", "asus"]
)
assert result.returncode == 0
result = run_script_with_args(
[
str(tmp_log_file),
"--create-job-phase",
"phase_name",
]
)
assert result.returncode == 0
# Parametrize test case for valid update arguments
@pytest.mark.parametrize(
"key, value", [("dut_attempt_counter", "1"), ("job_combined_status", "pass")]
)
def test_update(custom_logger, key, value):
custom_logger.update(**{key: value})
logger_data = custom_logger.logger.data
assert key in logger_data
assert logger_data[key] == value
# Test case for updating with a key that already exists
def test_update_existing_key(custom_logger):
key = "status"
value = "new_value"
custom_logger.logger.data[key] = "old_value"
custom_logger.update(**{key: value})
logger_data = custom_logger.logger.data
assert key in logger_data
assert logger_data[key] == value
# Test case for updating "dut_jobs"
def test_update_dut_jobs(custom_logger):
key1 = "status"
value1 = "fail"
key2 = "state"
value2 = "hung"
custom_logger.create_dut_job(**{key1: value1})
logger_data = custom_logger.logger.data
job1 = logger_data["dut_jobs"][0]
assert key1 in job1
assert job1[key1] == value1
custom_logger.update_dut_job(key2, value2)
logger_data = custom_logger.logger.data
job2 = logger_data["dut_jobs"][0]
assert key2 in job2
assert job2[key2] == value2
# Test case for creating and updating DUT job
def test_create_dut_job(custom_logger):
key = "status"
value1 = "pass"
value2 = "fail"
value3 = "hung"
reason = "job_combined_status"
result = "Finished"
custom_logger.update(**{reason: result})
logger_data = custom_logger.logger.data
assert reason in logger_data
assert logger_data[reason] == result
# Create the first DUT job
custom_logger.create_dut_job(**{key: value1})
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert isinstance(logger_data["dut_jobs"], list)
assert len(logger_data["dut_jobs"]) == 1
assert isinstance(logger_data["dut_jobs"][0], dict)
# Check the values of the keys in the created first DUT job
job1 = logger_data["dut_jobs"][0]
assert key in job1
assert job1[key] == value1
# Create the second DUT job
custom_logger.create_dut_job(**{key: value2})
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert isinstance(logger_data["dut_jobs"], list)
assert len(logger_data["dut_jobs"]) == 2
assert isinstance(logger_data["dut_jobs"][1], dict)
# Check the values of the keys in the created second DUT job
job2 = logger_data["dut_jobs"][1]
assert key in job2
assert job2[key] == value2
# Update the second DUT job with value3
custom_logger.update_dut_job(key, value3)
logger_data = custom_logger.logger.data
# Check the updated value in the second DUT job
job2 = logger_data["dut_jobs"][1]
assert key in job2
assert job2[key] == value3
# Find the index of the last DUT job
last_job_index = len(logger_data["dut_jobs"]) - 1
# Update the last DUT job
custom_logger.update_dut_job("dut_name", "asus")
logger_data = custom_logger.logger.data
# Check the updated value in the last DUT job
job2 = logger_data["dut_jobs"][last_job_index]
assert "dut_name" in job2
assert job2["dut_name"] == "asus"
# Check that "dut_name" is not present in other DUT jobs
for idx, job in enumerate(logger_data["dut_jobs"]):
if idx != last_job_index:
assert job.get("dut_name") == ""
# Test case for updating with missing "dut_jobs" key
def test_update_dut_job_missing_dut_jobs(custom_logger):
key = "status"
value = "fail"
# Attempt to update a DUT job when "dut_jobs" is missing
with pytest.raises(ValueError, match="No DUT jobs found."):
custom_logger.update_dut_job(key, value)
# Test case for creating a job phase
def test_create_job_phase(custom_logger):
custom_logger.create_dut_job(status="pass")
phase_name = "Phase1"
custom_logger.create_job_phase(phase_name)
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert isinstance(logger_data["dut_jobs"], list)
assert len(logger_data["dut_jobs"]) == 1
job = logger_data["dut_jobs"][0]
assert "dut_job_phases" in job
assert isinstance(job["dut_job_phases"], list)
assert len(job["dut_job_phases"]) == 1
phase = job["dut_job_phases"][0]
assert phase["name"] == phase_name
try:
datetime.fromisoformat(phase["start_time"])
assert True
except ValueError:
assert False
assert phase["end_time"] == ""
# Test case for creating multiple phase jobs
def test_create_multiple_phase_jobs(custom_logger):
custom_logger.create_dut_job(status="pass")
phase_data = [
{
"phase_name": "Phase1",
},
{
"phase_name": "Phase2",
},
{
"phase_name": "Phase3",
},
]
for data in phase_data:
phase_name = data["phase_name"]
custom_logger.create_job_phase(phase_name)
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert isinstance(logger_data["dut_jobs"], list)
assert len(logger_data["dut_jobs"]) == 1
job = logger_data["dut_jobs"][0]
assert "dut_job_phases" in job
assert isinstance(job["dut_job_phases"], list)
assert len(job["dut_job_phases"]) == len(phase_data)
for data in phase_data:
phase_name = data["phase_name"]
phase = job["dut_job_phases"][phase_data.index(data)]
assert phase["name"] == phase_name
try:
datetime.fromisoformat(phase["start_time"])
assert True
except ValueError:
assert False
if phase_data.index(data) != len(phase_data) - 1:
try:
datetime.fromisoformat(phase["end_time"])
assert True
except ValueError:
assert False
# Check if the end_time of the last phase is an empty string
last_phase = job["dut_job_phases"][-1]
assert last_phase["end_time"] == ""
# Test case for creating multiple dut jobs and updating phase job for last dut job
def test_create_two_dut_jobs_and_add_phase(custom_logger):
# Create the first DUT job
custom_logger.create_dut_job(status="pass")
# Create the second DUT job
custom_logger.create_dut_job(status="fail")
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert isinstance(logger_data["dut_jobs"], list)
assert len(logger_data["dut_jobs"]) == 2
first_dut_job = logger_data["dut_jobs"][0]
second_dut_job = logger_data["dut_jobs"][1]
# Add a phase to the second DUT job
custom_logger.create_job_phase("Phase1")
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert isinstance(logger_data["dut_jobs"], list)
assert len(logger_data["dut_jobs"]) == 2
first_dut_job = logger_data["dut_jobs"][0]
second_dut_job = logger_data["dut_jobs"][1]
# Check first DUT job does not have a phase
assert not first_dut_job.get("dut_job_phases")
# Check second DUT job has a phase
assert second_dut_job.get("dut_job_phases")
assert isinstance(second_dut_job["dut_job_phases"], list)
assert len(second_dut_job["dut_job_phases"]) == 1
# Test case for updating DUT start time
def test_update_dut_start_time(custom_logger):
custom_logger.create_dut_job(status="pass")
custom_logger.update_dut_time("start", None)
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert len(logger_data["dut_jobs"]) == 1
dut_job = logger_data["dut_jobs"][0]
assert "dut_start_time" in dut_job
assert dut_job["dut_start_time"] != ""
try:
datetime.fromisoformat(dut_job["dut_start_time"])
assert True
except ValueError:
assert False
# Test case for updating DUT submit time
def test_update_dut_submit_time(custom_logger):
custom_time = "2023-11-09T02:37:06Z"
custom_logger.create_dut_job(status="pass")
custom_logger.update_dut_time("submit", custom_time)
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert len(logger_data["dut_jobs"]) == 1
dut_job = logger_data["dut_jobs"][0]
assert "dut_submit_time" in dut_job
try:
datetime.fromisoformat(dut_job["dut_submit_time"])
assert True
except ValueError:
assert False
# Test case for updating DUT end time
def test_update_dut_end_time(custom_logger):
custom_logger.create_dut_job(status="pass")
custom_logger.update_dut_time("end", None)
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert len(logger_data["dut_jobs"]) == 1
dut_job = logger_data["dut_jobs"][0]
assert "dut_end_time" in dut_job
try:
datetime.fromisoformat(dut_job["dut_end_time"])
assert True
except ValueError:
assert False
# Test case for updating DUT time with invalid value
def test_update_dut_time_invalid_value(custom_logger):
custom_logger.create_dut_job(status="pass")
with pytest.raises(
ValueError,
match="Error: Invalid argument provided for --update-dut-time. Use 'start', 'submit', 'end'.",
):
custom_logger.update_dut_time("invalid_value", None)
# Test case for close_dut_job
def test_close_dut_job(custom_logger):
custom_logger.create_dut_job(status="pass")
custom_logger.create_job_phase("Phase1")
custom_logger.create_job_phase("Phase2")
custom_logger.close_dut_job()
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert len(logger_data["dut_jobs"]) == 1
dut_job = logger_data["dut_jobs"][0]
assert "dut_job_phases" in dut_job
dut_job_phases = dut_job["dut_job_phases"]
phase1 = dut_job_phases[0]
assert phase1["name"] == "Phase1"
try:
datetime.fromisoformat(phase1["start_time"])
assert True
except ValueError:
assert False
try:
datetime.fromisoformat(phase1["end_time"])
assert True
except ValueError:
assert False
phase2 = dut_job_phases[1]
assert phase2["name"] == "Phase2"
try:
datetime.fromisoformat(phase2["start_time"])
assert True
except ValueError:
assert False
try:
datetime.fromisoformat(phase2["end_time"])
assert True
except ValueError:
assert False
# Test case for close
def test_close(custom_logger):
custom_logger.create_dut_job(status="pass")
custom_logger.close()
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert len(logger_data["dut_jobs"]) == 1
assert "dut_attempt_counter" in logger_data
assert logger_data["dut_attempt_counter"] == len(logger_data["dut_jobs"])
assert "job_combined_status" in logger_data
assert logger_data["job_combined_status"] != ""
dut_job = logger_data["dut_jobs"][0]
assert "submitter_end_time" in dut_job
try:
datetime.fromisoformat(dut_job["submitter_end_time"])
assert True
except ValueError:
assert False
# Test case for updating status to fail with a reason
def test_update_status_fail_with_reason(custom_logger):
custom_logger.create_dut_job()
reason = "kernel panic"
custom_logger.update_status_fail(reason)
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert len(logger_data["dut_jobs"]) == 1
dut_job = logger_data["dut_jobs"][0]
assert "status" in dut_job
assert dut_job["status"] == "fail"
assert "dut_job_fail_reason" in dut_job
assert dut_job["dut_job_fail_reason"] == reason
# Test case for updating status to fail without providing a reason
def test_update_status_fail_without_reason(custom_logger):
custom_logger.create_dut_job()
custom_logger.update_status_fail()
# Check if the status is updated and fail reason is empty
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert len(logger_data["dut_jobs"]) == 1
dut_job = logger_data["dut_jobs"][0]
assert "status" in dut_job
assert dut_job["status"] == "fail"
assert "dut_job_fail_reason" in dut_job
assert dut_job["dut_job_fail_reason"] == ""
# Test case for check_dut_timings with submission time earlier than start time
def test_check_dut_timings_submission_earlier_than_start(custom_logger, caplog):
custom_logger.create_dut_job()
# Set submission time to be earlier than start time
custom_logger.update_dut_time("start", "2023-01-01T11:00:00")
custom_logger.update_dut_time("submit", "2023-01-01T12:00:00")
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert len(logger_data["dut_jobs"]) == 1
job = logger_data["dut_jobs"][0]
# Call check_dut_timings
custom_logger.check_dut_timings(job)
# Check if an error message is logged
assert "Job submission is happening before job start." in caplog.text
# Test case for check_dut_timings with end time earlier than start time
def test_check_dut_timings_end_earlier_than_start(custom_logger, caplog):
custom_logger.create_dut_job()
# Set end time to be earlier than start time
custom_logger.update_dut_time("end", "2023-01-01T11:00:00")
custom_logger.update_dut_time("start", "2023-01-01T12:00:00")
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert len(logger_data["dut_jobs"]) == 1
job = logger_data["dut_jobs"][0]
# Call check_dut_timings
custom_logger.check_dut_timings(job)
# Check if an error message is logged
assert "Job ended before it started." in caplog.text
# Test case for check_dut_timings with valid timing sequence
def test_check_dut_timings_valid_timing_sequence(custom_logger, caplog):
custom_logger.create_dut_job()
# Set valid timing sequence
custom_logger.update_dut_time("submit", "2023-01-01T12:00:00")
custom_logger.update_dut_time("start", "2023-01-01T12:30:00")
custom_logger.update_dut_time("end", "2023-01-01T13:00:00")
logger_data = custom_logger.logger.data
assert "dut_jobs" in logger_data
assert len(logger_data["dut_jobs"]) == 1
job = logger_data["dut_jobs"][0]
# Call check_dut_timings
custom_logger.check_dut_timings(job)
# Check that no error messages are logged
assert "Job submission is happening before job start." not in caplog.text
assert "Job ended before it started." not in caplog.text

View file

@ -1,182 +0,0 @@
import json
from pathlib import Path
import pytest
from mock import MagicMock, patch
from structured_logger import (
AutoSaveDict,
CSVStrategy,
JSONStrategy,
StructuredLogger,
YAMLStrategy,
)
@pytest.fixture(params=[CSVStrategy, JSONStrategy, YAMLStrategy])
def strategy(request):
return request.param
@pytest.fixture
def file_extension(strategy):
if strategy == CSVStrategy:
return "csv"
elif strategy == JSONStrategy:
return "json"
elif strategy == YAMLStrategy:
return "yaml"
@pytest.fixture
def tmp_file(tmp_path):
return tmp_path / "test.json"
def test_guess_strategy_from_file(tmp_path, strategy, file_extension):
file_name = tmp_path / f"test_guess.{file_extension}"
Path(file_name).touch()
guessed_strategy = StructuredLogger.guess_strategy_from_file(file_name)
assert isinstance(guessed_strategy, strategy)
def test_get_strategy(strategy, file_extension):
result = StructuredLogger.get_strategy(file_extension)
assert isinstance(result, strategy)
def test_invalid_file_extension(tmp_path):
file_name = tmp_path / "test_invalid.xyz"
Path(file_name).touch()
with pytest.raises(ValueError, match="Unknown strategy for: xyz"):
StructuredLogger.guess_strategy_from_file(file_name)
def test_non_existent_file(tmp_path, strategy, file_extension):
file_name = tmp_path / f"non_existent.{file_extension}"
logger = StructuredLogger(file_name, strategy())
assert logger.file_path.exists()
assert "_timestamp" in logger._data
@pytest.fixture
def structured_logger_module():
with patch.dict("sys.modules", {"polars": None, "ruamel.yaml": None}):
import importlib
import structured_logger
importlib.reload(structured_logger)
yield structured_logger
def test_missing_csv_library(tmp_path, structured_logger_module):
with pytest.raises(RuntimeError, match="Can't parse CSV files. Missing library"):
structured_logger_module.CSVStrategy()
def test_missing_yaml_library(tmp_path, structured_logger_module):
with pytest.raises(RuntimeError, match="Can't parse YAML files. Missing library"):
structured_logger_module.YAMLStrategy()
def test_autosavedict_setitem():
save_callback = MagicMock()
d = AutoSaveDict(save_callback=save_callback)
d["key"] = "value"
assert d["key"] == "value"
save_callback.assert_called_once()
def test_autosavedict_delitem():
save_callback = MagicMock()
d = AutoSaveDict({"key": "value"}, save_callback=save_callback)
del d["key"]
assert "key" not in d
save_callback.assert_called_once()
def test_autosavedict_pop():
save_callback = MagicMock()
d = AutoSaveDict({"key": "value"}, save_callback=save_callback)
result = d.pop("key")
assert result == "value"
assert "key" not in d
save_callback.assert_called_once()
def test_autosavedict_update():
save_callback = MagicMock()
d = AutoSaveDict({"key": "old_value"}, save_callback=save_callback)
d.update({"key": "new_value"})
assert d["key"] == "new_value"
save_callback.assert_called_once()
def test_structured_logger_setitem(tmp_file):
logger = StructuredLogger(tmp_file, JSONStrategy())
logger.data["field"] = "value"
with open(tmp_file, "r") as f:
data = json.load(f)
assert data["field"] == "value"
def test_structured_logger_set_recursive(tmp_file):
logger = StructuredLogger(tmp_file, JSONStrategy())
logger.data["field"] = {"test": True}
other = logger.data["field"]
other["late"] = True
with open(tmp_file, "r") as f:
data = json.load(f)
assert data["field"]["test"]
assert data["field"]["late"]
def test_structured_logger_set_list(tmp_file):
logger = StructuredLogger(tmp_file, JSONStrategy())
logger.data["field"] = [True]
other = logger.data["field"]
other.append(True)
with open(tmp_file, "r") as f:
data = json.load(f)
assert data["field"][0]
assert data["field"][1]
def test_structured_logger_delitem(tmp_file):
logger = StructuredLogger(tmp_file, JSONStrategy())
logger.data["field"] = "value"
del logger.data["field"]
with open(tmp_file, "r") as f:
data = json.load(f)
assert "field" not in data
def test_structured_logger_pop(tmp_file):
logger = StructuredLogger(tmp_file, JSONStrategy())
logger.data["field"] = "value"
logger.data.pop("field")
with open(tmp_file, "r") as f:
data = json.load(f)
assert "field" not in data
def test_structured_logger_update(tmp_file):
logger = StructuredLogger(tmp_file, JSONStrategy())
logger.data.update({"field": "value"})
with open(tmp_file, "r") as f:
data = json.load(f)
assert data["field"] == "value"

View file

@ -1,143 +0,0 @@
#!/usr/bin/env python3
# Copyright © 2022 Collabora Ltd.
# Authors:
# David Heidelberg <david.heidelberg@collabora.com>
#
# For the dependencies, see the requirements.txt
# SPDX-License-Identifier: MIT
"""
Helper script to update traces checksums
"""
import argparse
import bz2
import glob
import re
import json
import sys
from ruamel.yaml import YAML
import gitlab
from colorama import Fore, Style
from gitlab_common import get_gitlab_project, read_token, wait_for_pipeline
DESCRIPTION_FILE = "export PIGLIT_REPLAY_DESCRIPTION_FILE='.*/install/(.*)'$"
DEVICE_NAME = "export PIGLIT_REPLAY_DEVICE_NAME='(.*)'$"
def gather_results(
project,
pipeline,
) -> None:
"""Gather results"""
target_jobs_regex = re.compile(".*-traces([:].*)?$")
for job in pipeline.jobs.list(all=True, sort="desc"):
if target_jobs_regex.match(job.name) and job.status == "failed":
cur_job = project.jobs.get(job.id)
# get variables
print(f"👁 {job.name}...")
log: list[str] = cur_job.trace().decode("unicode_escape").splitlines()
filename: str = ''
dev_name: str = ''
for logline in log:
desc_file = re.search(DESCRIPTION_FILE, logline)
device_name = re.search(DEVICE_NAME, logline)
if desc_file:
filename = desc_file.group(1)
if device_name:
dev_name = device_name.group(1)
if not filename or not dev_name:
print(Fore.RED + "Couldn't find device name or YML file in the logs!" + Style.RESET_ALL)
return
print(f"👁 Found {dev_name} and file {filename}")
# find filename in Mesa source
traces_file = glob.glob('./**/' + filename, recursive=True)
# write into it
with open(traces_file[0], 'r', encoding='utf-8') as target_file:
yaml = YAML()
yaml.compact(seq_seq=False, seq_map=False)
yaml.version = 1,2
yaml.width = 2048 # do not break the text fields
yaml.default_flow_style = None
target = yaml.load(target_file)
# parse artifact
results_json_bz2 = cur_job.artifact(path="results/results.json.bz2", streamed=False)
results_json = bz2.decompress(results_json_bz2).decode("utf-8", errors="replace")
results = json.loads(results_json)
for _, value in results["tests"].items():
if (
not value['images'] or
not value['images'][0] or
"image_desc" not in value['images'][0]
):
continue
trace: str = value['images'][0]['image_desc']
checksum: str = value['images'][0]['checksum_render']
if not checksum:
print(Fore.RED + f"{dev_name}: {trace}: checksum is missing! Crash?" + Style.RESET_ALL)
continue
if checksum == "error":
print(Fore.RED + f"{dev_name}: {trace}: crashed" + Style.RESET_ALL)
continue
if target['traces'][trace][dev_name].get('checksum') == checksum:
continue
if "label" in target['traces'][trace][dev_name]:
print(f'{dev_name}: {trace}: please verify that label {Fore.BLUE}{target["traces"][trace][dev_name]["label"]}{Style.RESET_ALL} is still valid')
print(Fore.GREEN + f'{dev_name}: {trace}: checksum updated' + Style.RESET_ALL)
target['traces'][trace][dev_name]['checksum'] = checksum
with open(traces_file[0], 'w', encoding='utf-8') as target_file:
yaml.dump(target, target_file)
def parse_args() -> None:
"""Parse args"""
parser = argparse.ArgumentParser(
description="Tool to generate patch from checksums ",
epilog="Example: update_traces_checksum.py --rev $(git rev-parse HEAD) "
)
parser.add_argument(
"--rev", metavar="revision", help="repository git revision", required=True
)
parser.add_argument(
"--token",
metavar="token",
help="force GitLab token, otherwise it's read from ~/.config/gitlab-token",
)
return parser.parse_args()
if __name__ == "__main__":
try:
args = parse_args()
token = read_token(args.token)
gl = gitlab.Gitlab(url="https://gitlab.freedesktop.org", private_token=token)
cur_project = get_gitlab_project(gl, "mesa")
print(f"Revision: {args.rev}")
(pipe, cur_project) = wait_for_pipeline([cur_project], args.rev)
print(f"Pipeline: {pipe.web_url}")
gather_results(cur_project, pipe)
sys.exit()
except KeyboardInterrupt:
sys.exit(1)

View file

@ -1,10 +0,0 @@
#!/usr/bin/env bash
set -eu
this_dir=$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")")
readonly this_dir
exec \
"$this_dir/../python-venv.sh" \
"$this_dir/requirements.txt" \
"$this_dir/update_traces_checksum.py" "$@"

View file

@ -45,26 +45,24 @@ def is_commit_valid(commit: str) -> bool:
return ret == 0
def branch_has_commit(upstream_branch: str, commit: str) -> bool:
def branch_has_commit(upstream: str, branch: str, commit: str) -> bool:
"""
Returns True if the commit is actually present in the branch
"""
ret = subprocess.call(['git', 'merge-base', '--is-ancestor',
commit, upstream_branch],
commit, upstream + '/' + branch],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
return ret == 0
def branch_has_backport_of_commit(upstream_branch: str, commit: str) -> str:
def branch_has_backport_of_commit(upstream: str, branch: str, commit: str) -> str:
"""
Returns the commit hash if the commit has been backported to the branch,
or an empty string if is hasn't
"""
upstream, _ = upstream_branch.split('/', 1)
out = subprocess.check_output(['git', 'log', '--format=%H',
upstream + '..' + upstream_branch,
upstream + '..' + upstream + '/' + branch,
'--grep', 'cherry picked from commit ' + commit],
stderr=subprocess.DEVNULL)
return out.decode().strip()
@ -127,15 +125,17 @@ if __name__ == "__main__":
help='colorize output (default: true if stdout is a terminal)')
args = parser.parse_args()
if branch_has_commit(args.branch, args.commit):
print_(args, True, 'Commit ' + args.commit + ' is in branch ' + args.branch)
upstream, branch = args.branch.split('/', 1)
if branch_has_commit(upstream, branch, args.commit):
print_(args, True, 'Commit ' + args.commit + ' is in branch ' + branch)
exit(0)
backport = branch_has_backport_of_commit(args.branch, args.commit)
backport = branch_has_backport_of_commit(upstream, branch, args.commit)
if backport:
print_(args, True,
'Commit ' + args.commit + ' was backported to branch ' + args.branch + ' as commit ' + backport)
'Commit ' + args.commit + ' was backported to branch ' + branch + ' as commit ' + backport)
exit(0)
print_(args, False, 'Commit ' + args.commit + ' is NOT in branch ' + args.branch)
print_(args, False, 'Commit ' + args.commit + ' is NOT in branch ' + branch)
exit(1)

View file

@ -88,31 +88,33 @@ def test_is_commit_valid(commit: str, expected: bool) -> None:
@pytest.mark.parametrize(
'branch, commit, expected',
[
(get_upstream() + '/20.1', '20.1-branchpoint', True),
(get_upstream() + '/20.1', '20.0', False),
(get_upstream() + '/20.1', 'main', False),
(get_upstream() + '/20.1', 'e58a10af640ba58b6001f5c5ad750b782547da76', True),
(get_upstream() + '/20.1', 'd043d24654c851f0be57dbbf48274b5373dea42b', True),
(get_upstream() + '/staging/20.1', 'd043d24654c851f0be57dbbf48274b5373dea42b', True),
(get_upstream() + '/20.1', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', False),
(get_upstream() + '/main', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', True),
(get_upstream() + '/20.0', 'd043d24654c851f0be57dbbf48274b5373dea42b', False),
('20.1', '20.1-branchpoint', True),
('20.1', '20.0', False),
('20.1', 'main', False),
('20.1', 'e58a10af640ba58b6001f5c5ad750b782547da76', True),
('20.1', 'd043d24654c851f0be57dbbf48274b5373dea42b', True),
('staging/20.1', 'd043d24654c851f0be57dbbf48274b5373dea42b', True),
('20.1', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', False),
('main', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', True),
('20.0', 'd043d24654c851f0be57dbbf48274b5373dea42b', False),
])
def test_branch_has_commit(branch: str, commit: str, expected: bool) -> None:
assert branch_has_commit(branch, commit) == expected
upstream = get_upstream()
assert branch_has_commit(upstream, branch, commit) == expected
@pytest.mark.parametrize(
'branch, commit, expected',
[
(get_upstream() + '/20.1', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', 'd043d24654c851f0be57dbbf48274b5373dea42b'),
(get_upstream() + '/staging/20.1', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', 'd043d24654c851f0be57dbbf48274b5373dea42b'),
(get_upstream() + '/20.1', '20.1-branchpoint', ''),
(get_upstream() + '/20.1', '20.0', ''),
(get_upstream() + '/20.1', '20.2', 'abac4859618e02aea00f705b841a7c5c5007ad1a'),
(get_upstream() + '/20.1', 'main', ''),
(get_upstream() + '/20.1', 'd043d24654c851f0be57dbbf48274b5373dea42b', ''),
(get_upstream() + '/20.0', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', '8cd4f57381cefe69019a3282d457d5bda3644030'),
('20.1', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', 'd043d24654c851f0be57dbbf48274b5373dea42b'),
('staging/20.1', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', 'd043d24654c851f0be57dbbf48274b5373dea42b'),
('20.1', '20.1-branchpoint', ''),
('20.1', '20.0', ''),
('20.1', '20.2', ''),
('20.1', 'main', ''),
('20.1', 'd043d24654c851f0be57dbbf48274b5373dea42b', ''),
('20.0', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', ''),
])
def test_branch_has_backport_of_commit(branch: str, commit: str, expected: bool) -> None:
assert branch_has_backport_of_commit(branch, commit) == expected
upstream = get_upstream()
assert branch_has_backport_of_commit(upstream, branch, commit) == expected

View file

@ -78,9 +78,9 @@ def commit(message: str) -> None:
def _calculate_release_start(major: str, minor: str) -> datetime.date:
"""Calculate the start of the release for release candidates.
"""Calclulate the start of the release for release candidates.
This is quarterly, on the second wednesday, in January, April, July, and October.
This is quarterly, on the second wednesday, in Januray, April, July, and Octobor.
"""
quarter = datetime.date.fromisoformat(f'20{major}-0{[1, 4, 7, 10][int(minor)]}-01')

View file

@ -52,7 +52,7 @@ def mock_csv(data: typing.List[gen_calendar_entries.CalendarRowType]) -> typing.
@pytest.fixture(autouse=True, scope='module')
def disable_git_commits() -> None:
"""Mock out the commit function so no git commits are made during testing."""
"""Mock out the commit function so no git commits are made durring testing."""
with mock.patch('bin.gen_calendar_entries.commit', mock.Mock()):
yield

View file

@ -168,7 +168,6 @@ class Inliner(states.Inliner):
break
# Quote all original backslashes
checked = re.sub('\x00', "\\\x00", checked)
checked = re.sub('@', '\\@', checked)
return docutils.utils.unescape(checked, 1)
inliner = Inliner();
@ -218,10 +217,7 @@ async def parse_issues(commits: str) -> typing.List[str]:
async def gather_bugs(version: str) -> typing.List[str]:
commits = await gather_commits(version)
if commits:
issues = await parse_issues(commits)
else:
issues = []
issues = await parse_issues(commits)
loop = asyncio.get_event_loop()
async with aiohttp.ClientSession(loop=loop) as session:
@ -280,7 +276,7 @@ def calculate_next_version(version: str, is_point: bool) -> str:
def calculate_previous_version(version: str, is_point: bool) -> str:
"""Calculate the previous version to compare to.
In the case of -rc to final that version is the previous .0 release,
In the case of -rc to final that verison is the previous .0 release,
(19.3.0 in the case of 20.0.0, for example). for point releases that is
the last point release. This value will be the same as the input value
for a point release, but different for a major release.
@ -299,7 +295,7 @@ def calculate_previous_version(version: str, is_point: bool) -> str:
def get_features(is_point_release: bool) -> typing.Generator[str, None, None]:
p = pathlib.Path('docs') / 'relnotes' / 'new_features.txt'
p = pathlib.Path(__file__).parent.parent / 'docs' / 'relnotes' / 'new_features.txt'
if p.exists() and p.stat().st_size > 0:
if is_point_release:
print("WARNING: new features being introduced in a point release", file=sys.stderr)
@ -307,7 +303,6 @@ def get_features(is_point_release: bool) -> typing.Generator[str, None, None]:
for line in f:
yield line.rstrip()
p.unlink()
subprocess.run(['git', 'add', p])
else:
yield "None"
@ -325,13 +320,12 @@ def update_release_notes_index(version: str) -> None:
if first_list and line.startswith('-'):
first_list = False
new_relnotes.append(f'- :doc:`{version} release notes <relnotes/{version}>`\n')
if (not first_list and second_list and
re.match(r' \d+.\d+(.\d+)? <relnotes/\d+.\d+(.\d+)?>', line)):
if not first_list and second_list and line.startswith(' relnotes/'):
second_list = False
new_relnotes.append(f' {version} <relnotes/{version}>\n')
new_relnotes.append(f' relnotes/{version}\n')
new_relnotes.append(line)
with relnotes_index_path.open('w', encoding='utf-8') as f:
with relnotes_index_path.open('w') as f:
for line in new_relnotes:
f.write(line)
@ -339,7 +333,7 @@ def update_release_notes_index(version: str) -> None:
async def main() -> None:
v = pathlib.Path('VERSION')
v = pathlib.Path(__file__).parent.parent / 'VERSION'
with v.open('rt') as f:
raw_version = f.read().strip()
is_point_release = '-rc' not in raw_version
@ -356,8 +350,8 @@ async def main() -> None:
gather_bugs(previous_version),
)
final = pathlib.Path('docs') / 'relnotes' / f'{this_version}.rst'
with final.open('wt', encoding='utf-8') as f:
final = pathlib.Path(__file__).parent.parent / 'docs' / 'relnotes' / f'{this_version}.rst'
with final.open('wt') as f:
try:
f.write(TEMPLATE.render(
bugfix=is_point_release,
@ -374,7 +368,6 @@ async def main() -> None:
))
except:
print(exceptions.text_error_template().render())
return
subprocess.run(['git', 'add', final])

View file

@ -76,7 +76,7 @@ async def test_gather_commits():
'content, bugs',
[
# It is important to have the title on a new line, as
# textwrap.dedent won't work otherwise.
# textwrap.dedent wont work otherwise.
# Test the `Closes: #N` syntax
(
@ -113,7 +113,7 @@ async def test_gather_commits():
'''\
A commit for for something else completely
Closes: https://github.com/Organization/project/1234
Closes: https://github.com/Organiztion/project/1234
''',
[],
),
@ -198,8 +198,3 @@ async def test_parse_issues(content: str, bugs: typing.List[str]) -> None:
mock.patch('bin.gen_release_notes.gather_commits', mock.AsyncMock(return_value='sha\n')):
ids = await parse_issues('1234 not used')
assert set(ids) == set(bugs)
@pytest.mark.asyncio
async def test_rst_escape():
out = inliner.quoteInline('foo@bar')
assert out == 'foo\@bar'

View file

@ -89,8 +89,8 @@ python ./bin/gen_vs_module_defs.py --in_file src/gallium/targets/lavapipe/vulkan
'''
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=gen_help)
parser.add_argument('--in_file', help='input template module definition file')
parser.add_argument('--out_file', help='output module definition file')
parser.add_argument('--in_file', help='input template moudle definition file')
parser.add_argument('--out_file', help='output moudle definition file')
parser.add_argument('--compiler_abi', help='compiler abi')
parser.add_argument('--compiler_id', help='compiler id')
parser.add_argument('--cpu_family', help='cpu family')

View file

@ -118,36 +118,35 @@ SOURCES = [
'api': 'opencl',
'inc_folder': 'CL',
'sources': [
Source('include/CL/opencl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/opencl.h'),
Source('include/CL/cl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl.h'),
Source('include/CL/cl_platform.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_platform.h'),
Source('include/CL/cl_gl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_gl.h'),
Source('include/CL/cl_gl_ext.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_gl_ext.h'),
Source('include/CL/cl_ext.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_ext.h'),
Source('include/CL/cl_version.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_version.h'),
Source('include/CL/cl_icd.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_icd.h'),
Source('include/CL/cl_egl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_egl.h'),
Source('include/CL/cl_d3d10.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_d3d10.h'),
Source('include/CL/cl_d3d11.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_d3d11.h'),
Source('include/CL/cl_dx9_media_sharing.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_dx9_media_sharing.h'),
Source('include/CL/cl_dx9_media_sharing_intel.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_dx9_media_sharing_intel.h'),
Source('include/CL/cl_ext_intel.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_ext_intel.h'),
Source('include/CL/cl_va_api_media_sharing_intel.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_va_api_media_sharing_intel.h'),
Source('include/CL/opencl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/opencl.h'),
Source('include/CL/cl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl.h'),
Source('include/CL/cl_platform.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_platform.h'),
Source('include/CL/cl_gl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_gl.h'),
Source('include/CL/cl_gl_ext.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_gl_ext.h'),
Source('include/CL/cl_ext.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_ext.h'),
Source('include/CL/cl_version.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_version.h'),
Source('include/CL/cl_icd.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_icd.h'),
Source('include/CL/cl_egl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_egl.h'),
Source('include/CL/cl_d3d10.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_d3d10.h'),
Source('include/CL/cl_d3d11.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_d3d11.h'),
Source('include/CL/cl_dx9_media_sharing.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_dx9_media_sharing.h'),
Source('include/CL/cl_dx9_media_sharing_intel.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_dx9_media_sharing_intel.h'),
Source('include/CL/cl_ext_intel.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_ext_intel.h'),
Source('include/CL/cl_va_api_media_sharing_intel.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_va_api_media_sharing_intel.h'),
Source('include/CL/cl.hpp', 'https://github.com/KhronosGroup/OpenCL-CLHPP/raw/5f3cc41df821a3e5988490232082a3e3b82c0283/include/CL/cl.hpp'),
Source('include/CL/cl2.hpp', 'https://github.com/KhronosGroup/OpenCL-CLHPP/raw/main/include/CL/cl2.hpp'),
Source('include/CL/opencl.hpp', 'https://github.com/KhronosGroup/OpenCL-CLHPP/raw/main/include/CL/opencl.hpp'),
Source('include/CL/cl.hpp', 'https://github.com/KhronosGroup/OpenCL-CLHPP/raw/master/include/CL/cl.hpp'),
Source('include/CL/cl2.hpp', 'https://github.com/KhronosGroup/OpenCL-CLHPP/raw/master/include/CL/cl2.hpp'),
],
},
{
'api': 'spirv',
'sources': [
Source('src/compiler/spirv/spirv.h', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/main/include/spirv/unified1/spirv.h'),
Source('src/compiler/spirv/spirv.core.grammar.json', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/main/include/spirv/unified1/spirv.core.grammar.json'),
Source('src/compiler/spirv/OpenCL.std.h', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/main/include/spirv/unified1/OpenCL.std.h'),
Source('src/compiler/spirv/GLSL.std.450.h', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/main/include/spirv/unified1/GLSL.std.450.h'),
Source('src/compiler/spirv/GLSL.ext.AMD.h', 'https://github.com/KhronosGroup/glslang/raw/main/SPIRV/GLSL.ext.AMD.h'), # FIXME: is this the canonical source?
Source('src/compiler/spirv/spirv.h', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/master/include/spirv/unified1/spirv.h'),
Source('src/compiler/spirv/spirv.core.grammar.json', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/master/include/spirv/unified1/spirv.core.grammar.json'),
Source('src/compiler/spirv/OpenCL.std.h', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/master/include/spirv/unified1/OpenCL.std.h'),
Source('src/compiler/spirv/GLSL.std.450.h', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/master/include/spirv/unified1/GLSL.std.450.h'),
Source('src/compiler/spirv/GLSL.ext.AMD.h', 'https://github.com/KhronosGroup/glslang/raw/master/SPIRV/GLSL.ext.AMD.h'), # FIXME: is this the canonical source?
],
},

63
bin/meson-options.py Executable file
View file

@ -0,0 +1,63 @@
#!/usr/bin/env python3
from os import get_terminal_size
from textwrap import wrap
from mesonbuild import coredata
from mesonbuild import optinterpreter
(COLUMNS, _) = get_terminal_size()
def describe_option(option_name: str, option_default_value: str,
option_type: str, option_message: str) -> None:
print('name: ' + option_name)
print('default: ' + option_default_value)
print('type: ' + option_type)
for line in wrap(option_message, width=COLUMNS - 9):
print(' ' + line)
print('---')
oi = optinterpreter.OptionInterpreter('')
oi.process('meson_options.txt')
for (name, value) in oi.options.items():
if isinstance(value, coredata.UserStringOption):
describe_option(name,
value.value,
'string',
"You can type what you want, but make sure it makes sense")
elif isinstance(value, coredata.UserBooleanOption):
describe_option(name,
'true' if value.value else 'false',
'boolean',
"You can set it to 'true' or 'false'")
elif isinstance(value, coredata.UserIntegerOption):
describe_option(name,
str(value.value),
'integer',
"You can set it to any integer value between '{}' and '{}'".format(value.min_value, value.max_value))
elif isinstance(value, coredata.UserUmaskOption):
describe_option(name,
str(value.value),
'umask',
"You can set it to 'preserve' or a value between '0000' and '0777'")
elif isinstance(value, coredata.UserComboOption):
choices = '[' + ', '.join(["'" + v + "'" for v in value.choices]) + ']'
describe_option(name,
value.value,
'combo',
"You can set it to any one of those values: " + choices)
elif isinstance(value, coredata.UserArrayOption):
choices = '[' + ', '.join(["'" + v + "'" for v in value.choices]) + ']'
value = '[' + ', '.join(["'" + v + "'" for v in value.value]) + ']'
describe_option(name,
value,
'array',
"You can set it to one or more of those values: " + choices)
elif isinstance(value, coredata.UserFeatureOption):
describe_option(name,
value.value,
'feature',
"You can set it to 'auto', 'enabled', or 'disabled'")
else:
print(name + ' is an option of a type unknown to this script')
print('---')

View file

@ -1,4 +1,6 @@
# Copyright © 2023 Collabora
#!/usr/bin/env python3
# encoding=utf-8
# Copyright © 2017 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@ -18,22 +20,15 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
libpankmod_lib_files = files(
'pan_kmod.c',
'panfrost_kmod.c',
)
import os
libpankmod_lib = static_library(
'pankmod_lib',
[libpankmod_lib_files],
include_directories : [inc_include, inc_src, inc_panfrost],
c_args : [no_override_init_args],
gnu_symbol_visibility : 'hidden',
dependencies: [dep_libdrm, idep_mesautil],
build_by_default : false,
)
libpankmod_dep = declare_dependency(
include_directories: [inc_include, inc_src],
dependencies: [dep_libdrm],
)
def main():
filename = os.path.join(os.environ['MESON_SOURCE_ROOT'], 'VERSION')
with open(filename) as f:
version = f.read().strip()
print(version, end='')
if __name__ == '__main__':
main()

View file

@ -25,7 +25,7 @@
"""Perf annotate for JIT code.
Linux `perf annotate` does not work with JIT code. This script takes the data
produced by `perf script` command, plus the diassemblies outputted by gallivm
produced by `perf script` command, plus the diassemblies outputed by gallivm
into /tmp/perf-XXXXX.map.asm and produces output similar to `perf annotate`.
See docs/llvmpipe.rst for usage instructions.

View file

@ -27,7 +27,7 @@ from pick.ui import UI, PALETTE
if __name__ == "__main__":
u = UI()
evl = urwid.AsyncioEventLoop(loop=asyncio.new_event_loop())
evl = urwid.AsyncioEventLoop(loop=asyncio.get_event_loop())
loop = urwid.MainLoop(u.render(), PALETTE, event_loop=evl, handle_mouse=False)
u.mainloop = loop
loop.run()

View file

@ -1,10 +0,0 @@
#!/usr/bin/env bash
set -eu
this_dir=$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")")
readonly this_dir
exec \
"$this_dir/python-venv.sh" \
"$this_dir/pick/requirements.txt" \
"$this_dir/pick-ui.py" "$@"

View file

@ -40,19 +40,16 @@ if typing.TYPE_CHECKING:
sha: str
description: str
nominated: bool
nomination_type: int
nomination_type: typing.Optional[int]
resolution: typing.Optional[int]
main_sha: typing.Optional[str]
because_sha: typing.Optional[str]
notes: typing.Optional[str] = attr.ib(None)
IS_FIX = re.compile(r'^\s*fixes:\s*([a-f0-9]{6,40})', flags=re.MULTILINE | re.IGNORECASE)
# FIXME: I dislike the duplication in this regex, but I couldn't get it to work otherwise
IS_CC = re.compile(r'^\s*cc:\s*["\']?([0-9]{2}\.[0-9])?["\']?\s*["\']?([0-9]{2}\.[0-9])?["\']?\s*\<?mesa-stable',
flags=re.MULTILINE | re.IGNORECASE)
IS_REVERT = re.compile(r'This reverts commit ([0-9a-f]{40})')
IS_BACKPORT = re.compile(r'^\s*backport-to:\s*(\d{2}\.\d),?\s*(\d{2}\.\d)?',
flags=re.MULTILINE | re.IGNORECASE)
# XXX: hack
SEM = asyncio.Semaphore(50)
@ -74,8 +71,6 @@ class NominationType(enum.Enum):
CC = 0
FIXES = 1
REVERT = 2
NONE = 3
BACKPORT = 4
@enum.unique
@ -121,24 +116,24 @@ class Commit:
sha: str = attr.ib()
description: str = attr.ib()
nominated: bool = attr.ib(False)
nomination_type: NominationType = attr.ib(NominationType.NONE)
nomination_type: typing.Optional[NominationType] = attr.ib(None)
resolution: Resolution = attr.ib(Resolution.UNRESOLVED)
main_sha: typing.Optional[str] = attr.ib(None)
because_sha: typing.Optional[str] = attr.ib(None)
notes: typing.Optional[str] = attr.ib(None)
def to_json(self) -> 'CommitDict':
d: typing.Dict[str, typing.Any] = attr.asdict(self)
d['nomination_type'] = self.nomination_type.value
if self.nomination_type is not None:
d['nomination_type'] = self.nomination_type.value
if self.resolution is not None:
d['resolution'] = self.resolution.value
return typing.cast('CommitDict', d)
@classmethod
def from_json(cls, data: 'CommitDict') -> 'Commit':
c = cls(data['sha'], data['description'], data['nominated'], main_sha=data['main_sha'],
because_sha=data['because_sha'], notes=data['notes'])
c.nomination_type = NominationType(data['nomination_type'])
c = cls(data['sha'], data['description'], data['nominated'], main_sha=data['main_sha'], because_sha=data['because_sha'])
if data['nomination_type'] is not None:
c.nomination_type = NominationType(data['nomination_type'])
if data['resolution'] is not None:
c.resolution = Resolution(data['resolution'])
return c
@ -207,14 +202,6 @@ class Commit:
assert v
await ui.feedback(f'{self.sha} ({self.description}) committed successfully')
async def update_notes(self, ui: 'UI', notes: typing.Optional[str]) -> None:
self.notes = notes
async with ui.git_lock:
ui.save()
v = await commit_state(message=f'Updates notes for {self.sha}')
assert v
await ui.feedback(f'{self.sha} ({self.description}) notes updated successfully')
async def get_new_commits(sha: str) -> typing.List[typing.Tuple[str, str]]:
# Try to get the authoritative upstream main
@ -279,11 +266,13 @@ async def resolve_nomination(commit: 'Commit', version: str) -> 'Commit':
out = _out.decode()
# We give precedence to fixes and cc tags over revert tags.
if fix_for_commit := IS_FIX.search(out):
# XXX: not having the walrus operator available makes me sad :=
m = IS_FIX.search(out)
if m:
# We set the nomination_type and because_sha here so that we can later
# check to see if this fixes another staged commit.
try:
commit.because_sha = fixed = await full_sha(fix_for_commit.group(1))
commit.because_sha = fixed = await full_sha(m.group(1))
except PickUIException:
pass
else:
@ -292,22 +281,18 @@ async def resolve_nomination(commit: 'Commit', version: str) -> 'Commit':
commit.nominated = True
return commit
if backport_to := IS_BACKPORT.search(out):
if version in backport_to.groups():
commit.nominated = True
commit.nomination_type = NominationType.BACKPORT
return commit
if cc_to := IS_CC.search(out):
if cc_to.groups() == (None, None) or version in cc_to.groups():
m = IS_CC.search(out)
if m:
if m.groups() == (None, None) or version in m.groups():
commit.nominated = True
commit.nomination_type = NominationType.CC
return commit
if revert_of := IS_REVERT.search(out):
m = IS_REVERT.search(out)
if m:
# See comment for IS_FIX path
try:
commit.because_sha = reverted = await full_sha(revert_of.group(1))
commit.because_sha = reverted = await full_sha(m.group(1))
except PickUIException:
pass
else:

View file

@ -94,9 +94,9 @@ class TestRE:
Reviewed-by: Jonathan Marek <jonathan@marek.ca>
""")
fix_for_commit = core.IS_FIX.search(message)
assert fix_for_commit is not None
assert fix_for_commit.group(1) == '3d09bb390a39'
m = core.IS_FIX.search(message)
assert m is not None
assert m.group(1) == '3d09bb390a39'
class TestCC:
@ -114,9 +114,9 @@ class TestRE:
Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
""")
cc_to = core.IS_CC.search(message)
assert cc_to is not None
assert cc_to.group(1) == '19.2'
m = core.IS_CC.search(message)
assert m is not None
assert m.group(1) == '19.2'
def test_multiple_branches(self):
"""Tests commit with more than one branch specified"""
@ -130,10 +130,10 @@ class TestRE:
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
""")
cc_to = core.IS_CC.search(message)
assert cc_to is not None
assert cc_to.group(1) == '19.1'
assert cc_to.group(2) == '19.2'
m = core.IS_CC.search(message)
assert m is not None
assert m.group(1) == '19.1'
assert m.group(2) == '19.2'
def test_no_branch(self):
"""Tests commit with no branch specification"""
@ -148,8 +148,8 @@ class TestRE:
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
""")
cc_to = core.IS_CC.search(message)
assert cc_to is not None
m = core.IS_CC.search(message)
assert m is not None
def test_quotes(self):
"""Tests commit with quotes around the versions"""
@ -162,9 +162,9 @@ class TestRE:
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3454>
""")
cc_to = core.IS_CC.search(message)
assert cc_to is not None
assert cc_to.group(1) == '20.0'
m = core.IS_CC.search(message)
assert m is not None
assert m.group(1) == '20.0'
def test_multiple_quotes(self):
"""Tests commit with quotes around the versions"""
@ -177,10 +177,10 @@ class TestRE:
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3454>
""")
cc_to = core.IS_CC.search(message)
assert cc_to is not None
assert cc_to.group(1) == '20.0'
assert cc_to.group(2) == '20.1'
m = core.IS_CC.search(message)
assert m is not None
assert m.group(1) == '20.0'
assert m.group(2) == '20.1'
def test_single_quotes(self):
"""Tests commit with quotes around the versions"""
@ -193,9 +193,9 @@ class TestRE:
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3454>
""")
cc_to = core.IS_CC.search(message)
assert cc_to is not None
assert cc_to.group(1) == '20.0'
m = core.IS_CC.search(message)
assert m is not None
assert m.group(1) == '20.0'
def test_multiple_single_quotes(self):
"""Tests commit with quotes around the versions"""
@ -208,10 +208,10 @@ class TestRE:
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3454>
""")
cc_to = core.IS_CC.search(message)
assert cc_to is not None
assert cc_to.group(1) == '20.0'
assert cc_to.group(2) == '20.1'
m = core.IS_CC.search(message)
assert m is not None
assert m.group(1) == '20.0'
assert m.group(2) == '20.1'
class TestRevert:
@ -232,61 +232,9 @@ class TestRE:
Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
""")
revert_of = core.IS_REVERT.search(message)
assert revert_of is not None
assert revert_of.group(1) == '2ca8629fa9b303e24783b76a7b3b0c2513e32fbd'
class TestBackportTo:
def test_single_release(self):
"""Tests commit meant for a single branch, ie, 19.1"""
message = textwrap.dedent("""\
radv: fix DCC fast clear code for intensity formats
This fixes a rendering issue with DiRT 4 on GFX10. Only GFX10 was
affected because intensity formats are different.
Backport-to: 19.2
Closes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/1923
Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
""")
backport_to = core.IS_BACKPORT.search(message)
assert backport_to is not None
assert backport_to.groups() == ('19.2', None)
def test_multiple_release_space(self):
"""Tests commit with more than one branch specified"""
message = textwrap.dedent("""\
radeonsi: enable zerovram for Rocket League
Fixes corruption on game startup.
Closes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/1888
Backport-to: 19.1 19.2
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
""")
backport_to = core.IS_BACKPORT.search(message)
assert backport_to is not None
assert backport_to.groups() == ('19.1', '19.2')
def test_multiple_release_comma(self):
"""Tests commit with more than one branch specified"""
message = textwrap.dedent("""\
radeonsi: enable zerovram for Rocket League
Fixes corruption on game startup.
Closes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/1888
Backport-to: 19.1, 19.2
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
""")
backport_to = core.IS_BACKPORT.search(message)
assert backport_to is not None
assert backport_to.groups() == ('19.1', '19.2')
m = core.IS_REVERT.search(message)
assert m is not None
assert m.group(1) == '2ca8629fa9b303e24783b76a7b3b0c2513e32fbd'
class TestResolveNomination:
@ -294,7 +242,7 @@ class TestResolveNomination:
@attr.s(slots=True)
class FakeSubprocess:
"""A fake asyncio.subprocess like class for use with mock."""
"""A fake asyncio.subprocess like classe for use with mock."""
out: typing.Optional[bytes] = attr.ib(None)
returncode: int = attr.ib(0)
@ -375,28 +323,6 @@ class TestResolveNomination:
assert not c.nominated
assert c.nomination_type is None
@pytest.mark.asyncio
async def test_backport_is_nominated(self):
s = self.FakeSubprocess(b'Backport-to: 16.2')
c = core.Commit('abcdef1234567890', 'a commit')
with mock.patch('bin.pick.core.asyncio.create_subprocess_exec', s.mock):
await core.resolve_nomination(c, '16.2')
assert c.nominated
assert c.nomination_type is core.NominationType.BACKPORT
@pytest.mark.asyncio
async def test_backport_is_not_nominated(self):
s = self.FakeSubprocess(b'Backport-to: 16.2')
c = core.Commit('abcdef1234567890', 'a commit')
with mock.patch('bin.pick.core.asyncio.create_subprocess_exec', s.mock):
await core.resolve_nomination(c, '16.1')
assert not c.nominated
assert c.nomination_type is None
@pytest.mark.asyncio
async def test_revert_is_nominated(self):
s = self.FakeSubprocess(b'This reverts commit 1234567890123456789012345678901234567890.')
@ -421,21 +347,6 @@ class TestResolveNomination:
assert not c.nominated
assert c.nomination_type is core.NominationType.REVERT
@pytest.mark.asyncio
async def test_is_fix_and_backport(self):
s = self.FakeSubprocess(
b'Fixes: 3d09bb390a39 (etnaviv: GC7000: State changes for HALTI3..5)\n'
b'Backport-to: 16.1'
)
c = core.Commit('abcdef1234567890', 'a commit')
with mock.patch('bin.pick.core.asyncio.create_subprocess_exec', s.mock):
with mock.patch('bin.pick.core.is_commit_in_branch', self.return_true):
await core.resolve_nomination(c, '16.1')
assert c.nominated
assert c.nomination_type is core.NominationType.FIXES
@pytest.mark.asyncio
async def test_is_fix_and_cc(self):
s = self.FakeSubprocess(

View file

@ -1,2 +0,0 @@
attrs==23.1.0
urwid==2.1.2

View file

@ -47,13 +47,6 @@ class RootWidget(urwid.Frame):
super().__init__(*args, **kwargs)
self.ui = ui
class CommitList(urwid.ListBox):
def __init__(self, *args, ui: 'UI', **kwargs):
super().__init__(*args, **kwargs)
self.ui = ui
def keypress(self, size: int, key: str) -> typing.Optional[str]:
if key == 'q':
raise urwid.ExitMainLoop()
@ -108,23 +101,6 @@ class CommitWidget(urwid.Text):
return None
class FocusAwareEdit(urwid.Edit):
"""An Edit type that signals when it comes into and leaves focus."""
signals = urwid.Edit.signals + ['focus_changed']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__is_focus = False
def render(self, size: typing.Tuple[int], focus: bool = False) -> urwid.Canvas:
if focus != self.__is_focus:
self._emit("focus_changed", focus)
self.__is_focus = focus
return super().render(size, focus)
@attr.s(slots=True)
class UI:
@ -136,7 +112,6 @@ class UI:
commit_list: typing.List['urwid.Button'] = attr.ib(factory=lambda: urwid.SimpleFocusListWalker([]), init=False)
feedback_box: typing.List['urwid.Text'] = attr.ib(factory=lambda: urwid.SimpleFocusListWalker([]), init=False)
notes: 'FocusAwareEdit' = attr.ib(factory=lambda: FocusAwareEdit('', multiline=True), init=False)
header: 'urwid.Text' = attr.ib(factory=lambda: urwid.Text('Mesa Stable Picker', align='center'), init=False)
body: 'urwid.Columns' = attr.ib(attr.Factory(lambda s: s._make_body(), True), init=False)
footer: 'urwid.Columns' = attr.ib(attr.Factory(lambda s: s._make_footer(), True), init=False)
@ -147,36 +122,10 @@ class UI:
new_commits: typing.List['core.Commit'] = attr.ib(factory=list, init=False)
git_lock: asyncio.Lock = attr.ib(factory=asyncio.Lock, init=False)
def _get_current_commit(self) -> typing.Optional['core.Commit']:
entry = self.commit_list.get_focus()[0]
return entry.original_widget.commit if entry is not None else None
def _change_notes_cb(self) -> None:
commit = self._get_current_commit()
if commit and commit.notes:
self.notes.set_edit_text(commit.notes)
else:
self.notes.set_edit_text('')
def _change_notes_focus_cb(self, notes: 'FocusAwareEdit', focus: 'bool') -> 'None':
# in the case of coming into focus we don't want to do anything
if focus:
return
commit = self._get_current_commit()
if commit is None:
return
text: str = notes.get_edit_text()
if text != commit.notes:
asyncio.ensure_future(commit.update_notes(self, text))
def _make_body(self) -> 'urwid.Columns':
commits = CommitList(self.commit_list, ui=self)
commits = urwid.ListBox(self.commit_list)
feedback = urwid.ListBox(self.feedback_box)
urwid.connect_signal(self.commit_list, 'modified', self._change_notes_cb)
notes = urwid.Filler(self.notes)
urwid.connect_signal(self.notes, 'focus_changed', self._change_notes_focus_cb)
return urwid.Columns([urwid.LineBox(commits), urwid.Pile([urwid.LineBox(notes), urwid.LineBox(feedback)])])
return urwid.Columns([commits, feedback])
def _make_footer(self) -> 'urwid.Columns':
body = [
@ -185,12 +134,12 @@ class UI:
urwid.Text('[C]herry Pick'),
urwid.Text('[D]enominate'),
urwid.Text('[B]ackport'),
urwid.Text('[A]pply additional patch'),
urwid.Text('[A]pply additional patch')
]
return urwid.Columns(body)
def _make_root(self) -> 'RootWidget':
return RootWidget(self.body, urwid.LineBox(self.header), urwid.LineBox(self.footer), 'body', ui=self)
return RootWidget(self.body, self.header, self.footer, 'body', ui=self)
def render(self) -> 'WidgetType':
asyncio.ensure_future(self.update())

View file

@ -1,47 +0,0 @@
#!/usr/bin/env bash
set -eu
readonly requirements_file=$1
shift
venv_dir="$(dirname "$requirements_file")"/.venv
readonly venv_dir
readonly venv_req=$venv_dir/requirements.txt
readonly venv_python_version=$venv_dir/python-version.txt
if [ -d "$venv_dir" ]
then
if [ ! -r "$venv_python_version" ]
then
echo "Python environment predates Python version checks."
echo "It might be invalid and needs to be regenerated."
rm -rf "$venv_dir"
elif ! cmp --quiet <(python --version) "$venv_python_version"
then
old=$(cat "$venv_python_version")
new=$(python --version)
echo "Python version has changed ($old -> $new)."
echo "Python environment needs to be regenerated."
unset old new
rm -rf "$venv_dir"
fi
fi
if ! [ -r "$venv_dir/bin/activate" ]
then
echo "Creating Python environment..."
python -m venv "$venv_dir"
python --version > "$venv_python_version"
fi
# shellcheck disable=1091
source "$venv_dir/bin/activate"
if ! cmp --quiet "$requirements_file" "$venv_req"
then
echo "$(realpath --relative-to="$PWD" "$requirements_file") has changed, re-installing..."
pip --disable-pip-version-check install --requirement "$requirements_file"
cp "$requirements_file" "$venv_req"
fi
python "$@"

View file

@ -7,41 +7,12 @@ import subprocess
# This list contains symbols that _might_ be exported for some platforms
PLATFORM_SYMBOLS = [
'_GLOBAL_OFFSET_TABLE_',
'__bss_end__',
'__bss_start__',
'__bss_start',
'__cxa_guard_abort',
'__cxa_guard_acquire',
'__cxa_guard_release',
'__cxa_allocate_dependent_exception',
'__cxa_allocate_exception',
'__cxa_begin_catch',
'__cxa_call_unexpected',
'__cxa_current_exception_type',
'__cxa_current_primary_exception',
'__cxa_decrement_exception_refcount',
'__cxa_deleted_virtual',
'__cxa_demangle',
'__cxa_end_catch',
'__cxa_free_dependent_exception',
'__cxa_free_exception',
'__cxa_get_exception_ptr',
'__cxa_get_globals',
'__cxa_get_globals_fast',
'__cxa_increment_exception_refcount',
'__cxa_new_handler',
'__cxa_pure_virtual',
'__cxa_rethrow',
'__cxa_rethrow_primary_exception',
'__cxa_terminate_handler',
'__cxa_throw',
'__cxa_uncaught_exception',
'__cxa_uncaught_exceptions',
'__cxa_unexpected_handler',
'__dynamic_cast',
'__emutls_get_address',
'__gxx_personality_v0',
'__end__',
'__odr_asan._glapi_Context',
'__odr_asan._glapi_Dispatch',
@ -69,7 +40,7 @@ def get_symbols_nm(nm, lib):
if len(fields) == 2 or fields[1] == 'U':
continue
symbol_name = fields[0]
if platform_name == 'Linux' or platform_name == 'GNU' or platform_name.startswith('GNU/'):
if platform_name == 'Linux':
if symbol_name in PLATFORM_SYMBOLS:
continue
elif platform_name == 'Darwin':
@ -190,7 +161,7 @@ def main():
continue
if symbol[:2] == '_Z':
# As ajax found out, the compiler intentionally exports symbols
# that we explicitly asked it not to export, and we can't do
# that we explicitely asked it not to export, and we can't do
# anything about it:
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=36022#c4
continue

View file

@ -81,7 +81,7 @@ Additions to Chapter 8 of the GLES 3.2 Specification (Textures and Samplers)
BGRA_EXT B, G, R, A Color
Add to table 8.9 (Effective internal format corresponding to
Add to table 8.9 (Effective internal format correspondig to
external format).
Format Type Effective

View file

@ -12,7 +12,7 @@ Contact
Status
Obsolete.
Not shipping.
Version
@ -70,7 +70,7 @@ Changes to Chapter 2 of the GLX 1.3 Specification (Functions and Errors)
In addition, an indirect rendering context can be current for
only one thread at a time. A direct rendering context may be
current to multiple threads, with synchronization of access to
the context through the GL managed by the application through
the context thruogh the GL managed by the application through
mutexes.
Changes to Chapter 3 of the GLX 1.3 Specification (Functions and Errors)

View file

@ -360,7 +360,7 @@ Revision History
Version 4, 2013/02/01 - Add issue #12 regarding texture / renderbuffer
format queries.
Version 5, 2013/02/14 - Add issues #13 and #14 regarding simpler queries
Version 5, 2013/02/14 - Add issues #13 and #14 regarding simpler queires
after the context is created and made current.
Add issue #15 regarding the string query.
Add issue #16 regarding the value type returned

View file

@ -46,7 +46,7 @@ Overview
GL_ARB_gpu_shader5 extends GLSL in a number of useful ways. Much of this
added functionality requires significant hardware support. There are many
aspects, however, that can be easily implemented on any GPU with "real"
aspects, however, that can be easily implmented on any GPU with "real"
integer support (as opposed to simulating integers using floating point
calculations).

View file

@ -12,7 +12,7 @@ Contact
Status
Obsolete.
Deployed in DRI drivers post-XFree86 4.3.
Version

View file

@ -51,7 +51,7 @@ Overview
monitor. The screen surface can be scrolled by changing this origin.
This extension also defines functions for controlling the monitor's
display mode (width, height, refresh rate, etc), and specifying which
display mode (width, height, refresh rate, etc), and specifing which
screen surface is to be displayed on a monitor.
The new EGLModeMESA type and related functions are very similar to the

View file

@ -64,7 +64,7 @@ Issues
that enjoys privileged access, or that they do not wish to separate
the tracing code from their driver code base.
(2) Should the Trace API explicitly support the notion of "frames?
(2) Should the Trace API explicitely support the notion of "frames?
This would require hooking into glXSwapBuffers calls as well.
RESOLVED: No. The application can use NewTraceMESA/EndTraceMESA
@ -93,7 +93,7 @@ Issues
be considered persistent state?
RESOLVED: No. The implementation is not forced to use this information
on subsequent occurrences of name/pointer, and is free to consider it
on subsequent occurences of name/pointer, and is free to consider it
transient state.
(5) Should comment commands be prohibited between Begin/End?
@ -218,7 +218,7 @@ Additions to Chapter 5 of the OpenGL 1.2.1 Specification (Special Functions)
Bitmap and DrawPixels commands.
TRACE_ERRORS_BIT_MESA controls logging of all errors. If this bit is
set, GetError will be executed wherever applicable, and the result will
set, GetError will be executed whereever applicable, and the result will
be added to the trace as a comment. The error returns are cached and
returned to the application on its GetError calls. If the user does not
wish the additional GetError calls to be performed, this bit should not

View file

@ -73,9 +73,6 @@ GL_MESA_tile_raster_order
GL_MESA_framebuffer_flip_y
GL_FRAMEBUFFER_FLIP_Y_MESA 0x8BBB
GL_MESA_texture_const_bandwidth
GL_CONST_BW_TILING_MESA 0x8BBE
EGL_MESA_drm_image
EGL_DRM_BUFFER_FORMAT_MESA 0x31D0
EGL_DRM_BUFFER_USE_MESA 0x31D1

View file

@ -1,129 +0,0 @@
# BSD 3-Clause License
#
# Copyright (c) 2018, pandas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Based on https://github.com/pydata/pydata-sphinx-theme
from docutils import nodes
import sphinx
from sphinx.ext.autosummary import autosummary_table
from sphinx.locale import admonitionlabels
import types
class BootstrapHTML5TranslatorMixin:
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
self.settings.table_style = "table"
def starttag(self, *args, **kwargs):
"""ensure an aria-level is set for any heading role"""
if kwargs.get("ROLE") == "heading" and "ARIA-LEVEL" not in kwargs:
kwargs["ARIA-LEVEL"] = "2"
return super().starttag(*args, **kwargs)
def visit_admonition(self, node, name: str = '') -> None:
admonitionclasses = {
'attention': 'alert-primary',
'caution': 'alert-secondary',
'danger': 'alert-danger',
'error': 'alert-danger',
'hint': 'alert-secondary',
'important': 'alert-primary',
'note': 'alert-info',
'seealso': 'alert-info',
'tip': 'alert-info',
'warning': 'alert-warning',
}
self.body.append(self.starttag(
node, 'div', CLASS=('alert ' + admonitionclasses[name])))
if name:
self.body.append(
self.starttag(node, 'div', '', CLASS='h5'))
self.body.append(str(admonitionlabels[name]))
self.body.append('</div>')
def visit_table(self, node):
# init the attributes
atts = {}
self._table_row_indices.append(0)
# get the classes
classes = [cls.strip(" \t\n") for cls in self.settings.table_style.split(",")]
# we're looking at the 'real_table', which is wrapped by an autosummary
if isinstance(node.parent, autosummary_table):
classes += ["autosummary"]
# add the width if set in a style attribute
if "width" in node:
atts["style"] = f'width: {node["width"]}'
# add specific class if align is set
if "align" in node:
classes.append(f'table-{node["align"]}')
tag = self.starttag(node, "table", CLASS=" ".join(classes), **atts)
self.body.append(tag)
def setup_translators(app):
if app.builder.default_translator_class is None:
return
if not app.registry.translators.items():
translator = types.new_class(
"BootstrapHTML5Translator",
(
BootstrapHTML5TranslatorMixin,
app.builder.default_translator_class,
),
{},
)
app.set_translator(app.builder.name, translator, override=True)
else:
for name, klass in app.registry.translators.items():
if app.builder.format != "html":
# Skip translators that are not HTML
continue
translator = types.new_class(
"BootstrapHTML5Translator",
(
BootstrapHTML5TranslatorMixin,
klass,
),
{},
)
app.set_translator(name, translator, override=True)
def setup(app):
app.connect("builder-inited", setup_translators)

View file

@ -6,8 +6,17 @@
import docutils.nodes
import sphinx.addnodes
from sphinx.util.nodes import split_explicit_title
from docutils import nodes, utils
def parse_envvar(env, sig, signode):
envvar, t, default = sig.split(" ", 2)
envvar = envvar.strip().upper()
t = "Type: %s" % t.strip(" <>").lower()
default = "Default: %s" % default.strip(" ()")
signode += sphinx.addnodes.desc_name(envvar, envvar)
signode += docutils.nodes.Text(' ')
signode += sphinx.addnodes.desc_type(t, t)
signode += docutils.nodes.Text(', ')
signode += sphinx.addnodes.desc_annotation(default, default)
return envvar
def parse_opcode(env, sig, signode):
opcode, desc = sig.split("-", 1)
@ -17,33 +26,8 @@ def parse_opcode(env, sig, signode):
signode += sphinx.addnodes.desc_annotation(desc, desc)
return opcode
def ext_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
has_explicit_title, title, ext = split_explicit_title(text)
parts = ext.split('_', 2)
if parts[0] == 'VK':
full_url = f'https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/{ext}.html'
elif parts[0] == 'GL':
full_url = f'https://registry.khronos.org/OpenGL/extensions/{parts[1]}/{parts[1]}_{parts[2]}.txt'
else:
raise Exception(f'Unexpected API: {parts[0]}')
pnode = nodes.reference(title, title, internal=False, refuri=full_url)
return [pnode], []
def vkfeat_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
has_explicit_title, title, ext = split_explicit_title(text)
full_url = f'https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#features-{ext}'
pnode = nodes.reference(title, title, internal=False, refuri=full_url)
return [pnode], []
def setup(app):
app.add_object_type("envvar", "envvar", "%s (environment variable)",
parse_envvar)
app.add_object_type("opcode", "opcode", "%s (TGSI opcode)",
parse_opcode)
app.add_role('ext', ext_role)
app.add_role('vk-feat', vkfeat_role)

View file

@ -40,7 +40,7 @@ import nir_opcodes
OP_DESC_TEMPLATE = mako.template.Template("""
<%
def src_decl_list(num_srcs):
return ', '.join('nir_def *src' + str(i) for i in range(num_srcs))
return ', '.join('nir_ssa_def *src' + str(i) for i in range(num_srcs))
def to_yn(b):
return 'Y' if b else 'N'
@ -58,8 +58,6 @@ def to_yn(b):
- ${to_yn('associative' in op.algebraic_properties)}
- ${to_yn('2src_commutative' in op.algebraic_properties)}
${("**Description:** " + op.description) if op.description != "" else ""}
**Constant-folding:**
.. code-block:: c
@ -68,7 +66,7 @@ ${textwrap.indent(op.const_expr, ' ')}
**Builder function:**
.. c:function:: nir_def *nir_${op.name}(nir_builder *, ${src_decl_list(op.num_inputs)})
.. c:function:: nir_ssa_def *nir_${op.name}(nir_builder *, ${src_decl_list(op.num_inputs)})
""")
def parse_rst(state, parent, rst):

View file

@ -1,105 +0,0 @@
Name
MESA_sampler_objects
Name Strings
GL_MESA_sampler_objects
Contact
Adam Jackson <ajax@redhat.com>
Contributors
Emma Anholt
The contributors to ARB_sampler_objects and OpenGL ES 3
Status
Shipping
Version
Last Modified Date: 14 Sep 2021
Author Revision: 3
Number
TBD
Dependencies
OpenGL ES 2.0 is required.
This extension interacts with:
- EXT_shadow_samplers
- EXT_texture_filter_anisotropic
- EXT_texture_sRGB_decode
- OES_texture_border_clamp
Overview
This extension makes the sampler object subset of OpenGL ES 3.0 available
in OpenGL ES 2.0 contexts. As the intent is to allow access to the API
without necessarily requiring additional renderer functionality, some
sampler state that would be mandatory in GLES 3 is dependent on the
presence of additional extensions. Under GLES 3.0 or above this extension's
name string may be exposed for compatibility, but it is otherwise without
effect.
Refer to the OpenGL ES 3.0 specification for API details not covered here.
New Procedures and Functions
void glGenSamplers (GLsizei count, GLuint *samplers);
void glDeleteSamplers (GLsizei count, const GLuint *samplers);
GLboolean glIsSampler (GLuint sampler);
void glBindSampler (GLuint unit, GLuint sampler);
void glSamplerParameteri (GLuint sampler, GLenum pname, GLint param);
void glSamplerParameteriv (GLuint sampler, GLenum pname, const GLint *param);
void glSamplerParameterf (GLuint sampler, GLenum pname, GLfloat param);
void glSamplerParameterfv (GLuint sampler, GLenum pname, const GLfloat *param);
void glGetSamplerParameteriv (GLuint sampler, GLenum pname, GLint *params);
void glGetSamplerParameterfv (GLuint sampler, GLenum pname, GLfloat *params);
Note that these names are exactly as in ES3, with no MESA suffix.
New Tokens
SAMPLER_BINDING 0x8919
Interactions
If EXT_shadow_samplers is not supported then TEXTURE_COMPARE_MODE and
TEXTURE_COMPARE_FUNC will generate INVALID_ENUM.
If EXT_texture_filter_anisotropic is not supported then
TEXTURE_MAX_ANISOTROPY_EXT will generate INVALID_ENUM.
If EXT_texture_sRGB_decode is not supported then TEXTURE_SRGB_DECODE_EXT
will generate INVALID_ENUM.
If OES_texture_border_clamp is not supported then TEXTURE_BORDER_COLOR
will generate INVALID_ENUM.
Issues
1) Why bother?
Sampler objects, at least in Mesa, are generically supported without any
driver-dependent requirements, so enabling this is essentially free. This
simplifies application support for otherwise GLES2 hardware, and for
drivers in development that haven't yet achieved GLES3.
Revision History
Rev. Date Author Changes
---- -------- -------- ---------------------------------------------
1 2019/10/22 ajax Initial revision
2 2019/11/14 ajax Add extension interactions:
- EXT_shadow_samplers
- EXT_texture_filter_anisotropic
- EXT_texture_sRGB_decode
- OES_texture_border_clamp
3 2021/09/14 ajax Expand the justification and ES3 interaction

View file

@ -1,83 +0,0 @@
Name
MESA_texture_const_bandwidth
Name Strings
GL_MESA_texture_const_bandwidth
Contact
Rob Clark <robdclark@chromium.org>
Contributors
Rob Clark, Google
Lina Versace, Google
Tapani Pälli, Intel
Status
Proposal
Version
Version 1, September, 2023
Number
tbd
Dependencies
Requires EXT_memory_object.
Overview
The use of data dependent bandwidth compressed formats (UBWC, AFBC, etc)
can introduce a form of side-channel, in that the bandwidth used for
texture access is dependent on the texture's contents. In some cases
an application may want to disable the use of data dependent formats on
specific textures.
For that purpose, this extension extends EXT_memory_object to introduce
a new <param> CONST_BW_TILING_MESA.
IP Status
None
Issues
None
New Procedures and Functions
None
New Types
None
New Tokens
Returned in the <params> parameter of GetInternalFormativ or
GetInternalFormati64v when the <pname> parameter is TILING_TYPES_EXT,
returned in the <params> parameter of GetTexParameter{if}v,
GetTexParameterI{i ui}v, GetTextureParameter{if}v, and
GetTextureParameterI{i ui}v when the <pname> parameter is
TEXTURE_TILING_EXT, and accepted by the <params> parameter of
TexParameter{ifx}{v}, TexParameterI{i ui}v, TextureParameter{if}{v},
TextureParameterI{i ui}v when the <pname> parameter is
TEXTURE_TILING_EXT:
CONST_BW_TILING_MESA 0x8BBE
Errors
None
Revision History
Version 1, 2023-9-28 (Rob Clark)
Initial draft.

View file

@ -3,7 +3,7 @@ Amber Branch
After Mesa 21.3, all non-Gallium DRI drivers were removed from the Mesa
source-tree. These drivers are still being maintained to some degree,
but only on the ``amber`` branch, and only for critical fixes.
but only on the 21.3.x branch, and only for critical fixes.
These drivers include:
@ -39,8 +39,8 @@ enable that logic, you need to pass the ``-Damber=true`` flag to Meson.
Documentation
-------------
On `docs.mesa3d.org <https://docs.mesa3d.org/>`__, we currently only
On `docs.mesa3d.org <https://docs.mesa3d.org/>`, we currently only
publish the documentation from our main branch. But you can view the
documentation for the Amber branch `here
<https://gitlab.freedesktop.org/mesa/mesa/-/tree/amber/docs>`__.
<https://gitlab.freedesktop.org/mesa/mesa/-/tree/21.3/docs>`_.

View file

@ -16,9 +16,7 @@ Building using the Android NDK
Download and install the NDK using whatever method you normally would.
Then, create your Meson cross file to use it, something like this
``~/.local/share/meson/cross/android-aarch64`` file:
.. code-block:: ini
``~/.local/share/meson/cross/android-aarch64`` file::
[binaries]
ar = 'NDKDIR/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android-ar'
@ -43,15 +41,15 @@ one cross-compiling the turnip driver for a stock Pixel phone)
.. code-block:: console
meson setup build-android-aarch64 \
meson build-android-aarch64 \
--cross-file android-aarch64 \
-Dplatforms=android \
-Dplatform-sdk-version=26 \
-Dandroid-stub=true \
-Dgallium-drivers= \
-Dvulkan-drivers=freedreno \
-Dfreedreno-kmds=kgsl
meson compile -C build-android-aarch64
-Dfreedreno-kgsl=true
ninja -C build-android-aarch64
Replacing Android drivers on stock Android
------------------------------------------

View file

@ -6,7 +6,7 @@ This page documents known issues with some OpenGL applications.
Topogun
-------
`Topogun <https://www.topogun.com/>`__ for Linux (version 2, at least)
`Topogun <http://www.topogun.com/>`__ for Linux (version 2, at least)
creates a GLX visual without requesting a depth buffer. This causes bad
rendering if the OpenGL driver happens to choose a visual without a
depth buffer.

View file

@ -1,18 +1,13 @@
LAVA CI
=======
`LAVA <https://www.lavasoftware.org/>`__ is a system for functional
testing of boards including deploying custom bootloaders and kernels.
This is particularly relevant to testing Mesa because we often need
to change kernels for UAPI changes (and this lets us do full testing
of a new kernel during development), and our workloads can easily
take down boards when mistakes are made (kernel oopses, OOMs that
take out critical system services).
Available LAVA labs
-------------------
- Collabora `[dashboard] <https://lava.collabora.dev/scheduler/device_types>`__ (without authentication only health check jobs are displayed)
- Lima [dashboard not available]
`LAVA <https://lavasoftware.org/>`_ is a system for functional testing
of boards including deploying custom bootloaders and kernels. This is
particularly relevant to testing Mesa because we often need to change
kernels for UAPI changes (and this lets us do full testing of a new
kernel during development), and our workloads can easily take down
boards when mistakes are made (kernel oopses, OOMs that take out
critical system services).
Mesa-LAVA software architecture
-------------------------------
@ -66,16 +61,16 @@ the web interface, and create an API token. Copy that into a
.. code-block:: yaml
default:
token: <token contents>
uri: <URL to the instance>
username: gitlab-runner
default:
token: <token contents>
uri: <URL to the instance>
username: gitlab-runner
Add a volume mount of that ``lavacli.yaml`` to
``/etc/gitlab-runner/config.toml`` so that the Docker container can
access it. You probably have a ``volumes = ["/cache"]`` already, so now it would be::
volumes = ["/home/anholt/lava-config/lavacli.yaml:/root/.config/lavacli.yaml", "/cache"]
volumes = ["/home/anholt/lava-config/lavacli.yaml:/root/.config/lavacli.yaml", "/cache"]
Note that this token is visible to anybody that can submit MRs to
Mesa! It is not an actual secret. We could just bake it into the

View file

@ -34,7 +34,7 @@ initramfs) for trace replay testing. Given that we need networking already, and
our dEQP/Piglit/etc. payload is large, we use NFS from the x86 runner system
rather than initramfs.
See ``src/freedreno/ci/gitlab-ci.yml`` for an example of fastboot on DB410c and
See `src/freedreno/ci/gitlab-ci.yml` for an example of fastboot on DB410c and
DB820c (freedreno-a306 and freedreno-a530).
Requirements (Servo)
@ -56,25 +56,25 @@ done using dnsmasq on the runner host. For example, this snippet in
the dnsmasq.conf.d in the google farm, with the gitlab-runner host we
call "servo"::
dhcp-host=1c:69:7a:0d:a3:d3,10.42.0.10,set:servo
dhcp-host=1c:69:7a:0d:a3:d3,10.42.0.10,set:servo
# Fixed dhcp addresses for my sanity, and setting a tag for
# specializing other DHCP options
dhcp-host=a0:ce:c8:c8:d9:5d,10.42.0.11,set:cheza1
dhcp-host=a0:ce:c8:c8:d8:81,10.42.0.12,set:cheza2
# Fixed dhcp addresses for my sanity, and setting a tag for
# specializing other DHCP options
dhcp-host=a0:ce:c8:c8:d9:5d,10.42.0.11,set:cheza1
dhcp-host=a0:ce:c8:c8:d8:81,10.42.0.12,set:cheza2
# Specify the next server, watch out for the double ',,'. The
# filename didn't seem to get picked up by the bootloader, so we use
# tftp-unique-root and mount directories like
# /srv/tftp/10.42.0.11/jwerner/cheza as /tftp in the job containers.
tftp-unique-root
dhcp-boot=tag:cheza1,cheza1/vmlinuz,,10.42.0.10
dhcp-boot=tag:cheza2,cheza2/vmlinuz,,10.42.0.10
# Specify the next server, watch out for the double ',,'. The
# filename didn't seem to get picked up by the bootloader, so we use
# tftp-unique-root and mount directories like
# /srv/tftp/10.42.0.11/jwerner/cheza as /tftp in the job containers.
tftp-unique-root
dhcp-boot=tag:cheza1,cheza1/vmlinuz,,10.42.0.10
dhcp-boot=tag:cheza2,cheza2/vmlinuz,,10.42.0.10
dhcp-option=tag:cheza1,option:root-path,/srv/nfs/cheza1
dhcp-option=tag:cheza2,option:root-path,/srv/nfs/cheza2
dhcp-option=tag:cheza1,option:root-path,/srv/nfs/cheza1
dhcp-option=tag:cheza2,option:root-path,/srv/nfs/cheza2
See ``src/freedreno/ci/gitlab-ci.yml`` for an example of Servo on cheza. Note
See `src/freedreno/ci/gitlab-ci.yml` for an example of Servo on cheza. Note
that other Servo boards in CI are managed using LAVA.
Requirements (POE)
@ -98,38 +98,38 @@ You'll talk to the Cisco for configuration using its USB port, which provides a
serial terminal at 9600 baud. You need to enable SNMP control, which we'll do
using a "mesaci" community name that the gitlab runner can access as its
authentication (no password) to configure. To talk to the SNMP on the router,
you need to put an IP address on the default VLAN (VLAN 1).
you need to put an IP address on the default vlan (vlan 1).
Setting that up looks something like:
.. code-block: console
Switch>
Password:
Switch#configure terminal
Switch(config)#interface Vlan 1
Switch(config-if)#ip address 10.42.0.2 255.255.0.0
Switch(config-if)#end
Switch(config)#snmp-server community mesaci RW
Switch(config)#end
Switch#copy running-config startup-config
Switch>
Password:
Switch#configure terminal
Switch(config)#interface Vlan 1
Switch(config-if)#ip address 10.42.0.2 255.255.0.0
Switch(config-if)#end
Switch(config)#snmp-server community mesaci RW
Switch(config)#end
Switch#copy running-config startup-config
With that set up, you should be able to power on/off a port with something like:
.. code-block: console
% snmpset -v2c -r 3 -t 30 -cmesaci 10.42.0.2 1.3.6.1.4.1.9.9.402.1.2.1.1.1.1 i 1
% snmpset -v2c -r 3 -t 30 -cmesaci 10.42.0.2 1.3.6.1.4.1.9.9.402.1.2.1.1.1.1 i 4
% snmpset -v2c -r 3 -t 30 -cmesaci 10.42.0.2 1.3.6.1.4.1.9.9.402.1.2.1.1.1.1 i 1
% snmpset -v2c -r 3 -t 30 -cmesaci 10.42.0.2 1.3.6.1.4.1.9.9.402.1.2.1.1.1.1 i 4
Note that the "1.3.6..." SNMP OID changes between switches. The last digit
above is the interface id (port number). You can probably find the right OID by
google, that was easier than figuring it out from finding the switch's MIB
database. You can query the POE status from the switch serial using the ``show
power inline`` command.
database. You can query the POE status from the switch serial using the `show
power inline` command.
Other than that, find the dnsmasq/tftp/NFS setup for your boards "servo" above.
See ``src/broadcom/ci/gitlab-ci.yml`` and ``src/nouveau/ci/gitlab-ci.yml`` for an
See `src/broadcom/ci/gitlab-ci.yml` and `src/nouveau/ci/gitlab-ci.yml` for an
examples of POE for Raspberry Pi 3/4, and Jetson Nano.
Setup
@ -140,17 +140,17 @@ something like this to register a fastboot board:
.. code-block:: console
sudo gitlab-runner register \
--url https://gitlab.freedesktop.org \
--registration-token $1 \
--name MY_BOARD_NAME \
--tag-list MY_BOARD_TAG \
--executor docker \
--docker-image "alpine:latest" \
--docker-volumes "/dev:/dev" \
--docker-network-mode "host" \
--docker-privileged \
--non-interactive
sudo gitlab-runner register \
--url https://gitlab.freedesktop.org \
--registration-token $1 \
--name MY_BOARD_NAME \
--tag-list MY_BOARD_TAG \
--executor docker \
--docker-image "alpine:latest" \
--docker-volumes "/dev:/dev" \
--docker-network-mode "host" \
--docker-privileged \
--non-interactive
For a Servo board, you'll need to also volume mount the board's NFS
root dir at /nfs and TFTP kernel directory at /tftp.
@ -178,9 +178,9 @@ board's runner, set ``limit = 1`` ("only 1 job served by this board at a
time"). Finally, add the board-specific environment variables
required by your bare-metal script, something like::
[[runners]]
name = "google-freedreno-db410c-1"
environment = ["BM_SERIAL=/dev/ttyDB410c8", "BM_POWERUP=google-power-up.sh 8", "BM_FASTBOOT_SERIAL=15e9e390", "FDO_CI_CONCURRENT=4"]
[[runners]]
name = "google-freedreno-db410c-1"
environment = ["BM_SERIAL=/dev/ttyDB410c8", "BM_POWERUP=google-power-up.sh 8", "BM_FASTBOOT_SERIAL=15e9e390", "FDO_CI_CONCURRENT=4"]
The ``FDO_CI_CONCURRENT`` variable should be set to the number of CPU threads on
the board, which is used for auto-tuning of job parallelism.
@ -196,7 +196,7 @@ want a pass-through HTTP cache. On your runner box, install nginx:
.. code-block:: console
sudo apt install nginx libnginx-mod-http-lua
sudo apt install nginx libnginx-mod-http-lua
Add the server setup files:
@ -215,17 +215,16 @@ Enable the site and restart nginx:
.. code-block:: console
sudo rm /etc/nginx/sites-enabled/default
sudo ln -s /etc/nginx/sites-available/fdo-cache /etc/nginx/sites-enabled/fdo-cache
sudo systemctl restart nginx
sudo ln -s /etc/nginx/sites-available/fdo-cache /etc/nginx/sites-enabled/fdo-cache
sudo service nginx restart
# First download will hit the internet
wget http://localhost/cache/?uri=https://s3.freedesktop.org/mesa-tracie-public/itoral-gl-terrain-demo/demo-v2.trace
# Second download should be cached.
wget http://localhost/cache/?uri=https://s3.freedesktop.org/mesa-tracie-public/itoral-gl-terrain-demo/demo-v2.trace
# First download will hit the internet
wget http://localhost/cache/?uri=https://s3.freedesktop.org/mesa-tracie-public/itoral-gl-terrain-demo/demo.trace
# Second download should be cached.
wget http://localhost/cache/?uri=https://s3.freedesktop.org/mesa-tracie-public/itoral-gl-terrain-demo/demo.trace
Now, set ``download-url`` in your ``traces-*.yml`` entry to something like
``http://caching-proxy/cache/?uri=https://s3.freedesktop.org/mesa-tracie-public``
``http://10.42.0.1:8888/cache/?uri=https://s3.freedesktop.org/mesa-tracie-public``
and you should have cached downloads for traces. Add it to
``FDO_HTTP_CACHE_URI=`` in your ``config.toml`` runner environment lines and you
can use it for cached artifact downloads instead of going all the way to

View file

@ -3,7 +3,7 @@ Docker CI
For LLVMpipe and Softpipe CI, we run tests in a container containing
VK-GL-CTS, on the shared GitLab runners provided by `freedesktop
<https://www.freedesktop.org>`__
<http://freedesktop.org>`_
Software architecture
---------------------
@ -34,7 +34,7 @@ at the job's log for which specific tests failed).
DUT requirements
----------------
In addition to the general :ref:`CI-job-user-expectations`, using
In addition to the general :ref:`CI-farm-expectations`, using
Docker requires:
* DUTs must have a stable kernel and GPU reset (if applicable).
@ -53,7 +53,7 @@ step across multiple test runs. Since the images are large and change
approximately weekly, the DUTs also need to be running some script to
prune stale Docker images periodically in order to not run out of disk
space as we rev those containers (perhaps `this script
<https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2980#note_169233611>`__).
<https://gitlab.com/gitlab-org/gitlab-runner/issues/2980#note_169233611>`_).
Note that Docker doesn't allow containers to be stored on NFS, and
doesn't allow multiple Docker daemons to interact with the same

View file

@ -1,9 +1,9 @@
proxy_cache_path /var/cache/nginx/ levels=1:2 keys_zone=my_cache:10m max_size=24g inactive=48h use_temp_path=off;
server {
listen 10.42.0.1:80 default_server;
listen 127.0.0.1:80 default_server;
listen [::]:80 default_server;
listen 10.42.0.1:8888 default_server;
listen 127.0.0.1:8888 default_server;
listen [::]:8888 default_server;
resolver 8.8.8.8;
root /var/www/html;

View file

@ -13,18 +13,15 @@ modified and thus is unreliable).
The CI runs a number of tests, from trivial build-testing to complex GPU rendering:
- Build testing for a number of configurations and platforms
- Build testing for a number of build systems, configurations and platforms
- Sanity checks (``meson test``)
- Most drivers are also tested using several test suites, such as the
`Vulkan/GL/GLES conformance test suite <https://github.com/KhronosGroup/VK-GL-CTS>`__,
`Piglit <https://gitlab.freedesktop.org/mesa/piglit>`__, and others.
- Some drivers (Softpipe, LLVMpipe, Freedreno and Panfrost) are also tested
using `VK-GL-CTS <https://github.com/KhronosGroup/VK-GL-CTS>`__
- Replay of application traces
A typical run takes between 20 and 30 minutes, although it can go up very quickly
if the GitLab runners are overwhelmed, which happens sometimes. When it does happen,
not much can be done besides waiting it out, or cancel it.
You can do your part by only running the jobs you care about by using `our
tool <#running-specific-ci-jobs>`__.
Due to limited resources, we currently do not run the CI automatically
on every push; instead, we only run it automatically once the MR has
@ -55,29 +52,6 @@ The three GitLab CI systems currently integrated are:
LAVA
docker
Farm management
---------------
.. note::
Never mix disabling/re-enabling a farm with any change that can affect a job
that runs in another farm!
When the farm starts failing for any reason (power, network, out-of-space), it needs to be disabled by pushing separate MR with
.. code-block:: console
git mv .ci-farms{,-disabled}/$farm_name
After farm restore functionality can be enabled by pushing a new merge request, which contains
.. code-block:: console
git mv .ci-farms{-disabled,}/$farm_name
.. warning::
Pushing (``git push``) directly to ``main`` is forbidden; this change must
be sent as a :ref:`Merge Request <merging>`.
Application traces replay
-------------------------
@ -85,7 +59,7 @@ The CI replays application traces with various drivers in two different jobs. Th
job replays traces listed in ``src/<driver>/ci/traces-<driver>.yml`` files and if any
of those traces fail the pipeline fails as well. The second job replays traces listed in
``src/<driver>/ci/restricted-traces-<driver>.yml`` and it is allowed to fail. This second
job is only created when the pipeline is triggered by ``marge-bot`` or any other user that
job is only created when the pipeline is triggered by `marge-bot` or any other user that
has been granted access to these traces.
A traces YAML file also includes a ``download-url`` pointing to a MinIO
@ -107,7 +81,7 @@ non-redistributable traces can request permission to Daniel Stone <daniels@colla
gitlab.freedesktop.org accounts that are to be granted access to these traces will be
added to the OPA policy for the MinIO repository as per
https://gitlab.freedesktop.org/freedesktop/helm-gitlab-infra/-/commit/a3cd632743019f68ac8a829267deb262d9670958 .
https://gitlab.freedesktop.org/freedesktop/helm-gitlab-config/-/commit/a3cd632743019f68ac8a829267deb262d9670958 .
So the jobs are created in personal repositories, the name of the user's account needs
to be added to the rules attribute of the GitLab CI job that accesses the restricted
@ -148,10 +122,10 @@ If you're having issues with the Intel CI, your best bet is to ask about
it on ``#dri-devel`` on OFTC and tag `Nico Cortes
<https://gitlab.freedesktop.org/ngcortes>`__ (``ngcortes`` on IRC).
.. _CI-job-user-expectations:
.. _CI-farm-expectations:
CI job user expectations
------------------------
CI farm expectations
--------------------
To make sure that testing of one vendor's drivers doesn't block
unrelated work by other vendors, we require that a given driver's test
@ -160,23 +134,11 @@ driver had CI and failed once a week, we would be seeing someone's
code getting blocked on a spurious failure daily, which is an
unacceptable cost to the project.
To ensure that, driver maintainers with CI enabled should watch the Flakes panel
of the `CI flakes dashboard
<https://ci-stats-grafana.freedesktop.org/d/Ae_TLIwVk/mesa-ci-quality-false-positives?orgId=1>`__,
particularly the "Flake jobs" pane, to inspect jobs in their driver where the
automatic retry of a failing job produced a success a second time.
Additionally, most CI reports test-level flakes to an IRC channel, and flakes
reported as NEW are not expected and could cause spurious failures in jobs.
Please track the NEW reports in jobs and add them as appropriate to the
``-flakes.txt`` file for your driver.
Additionally, the test farm needs to be able to provide a short enough
turnaround time that we can get our MRs through marge-bot without the pipeline
backing up. As a result, we require that the test farm be able to handle a
whole pipeline's worth of jobs in less than 15 minutes (to compare, the build
stage is about 10 minutes). Given boot times and intermittent network delays,
this generally means that the test runtime as reported by deqp-runner should be
kept to 10 minutes.
turnaround time that we can get our MRs through marge-bot without the
pipeline backing up. As a result, we require that the test farm be
able to handle a whole pipeline's worth of jobs in less than 15 minutes
(to compare, the build stage is about 10 minutes).
If a test farm is short the HW to provide these guarantees, consider dropping
tests to reduce runtime. dEQP job logs print the slowest tests at the end of
@ -186,24 +148,20 @@ artifacts. Or, you can add the following to your job to only run some fraction
.. code-block:: yaml
variables:
variables:
DEQP_FRACTION: 10
to just run 1/10th of the test list.
For Collabora's LAVA farm, the `device types
<https://lava.collabora.dev/scheduler/device_types>`__ page can tell you how
many boards of a specific tag are currently available by adding the "Idle" and
"Busy" columns. For bare-metal, a gitlab admin can look at the `runners
<https://gitlab.freedesktop.org/admin/runners>`__ page. A pipeline should
probably not create more jobs for a board type than there are boards, unless you
clearly have some short-runtime jobs.
If a HW CI farm goes offline (network dies and all CI pipelines end up
stalled) or its runners are consistently spuriously failing (disk
full?), and the maintainer is not immediately available to fix the
issue, please push through an MR disabling that farm's jobs according
to the `Farm Management <#farm-management>`__ instructions.
issue, please push through an MR disabling that farm's jobs by adding
'.' to the front of the jobs names until the maintainer can bring
things back up. If this happens, the farm maintainer should provide a
report to mesa-dev@lists.freedesktop.org after the fact explaining
what happened and what the mitigation plan is for that failure next
time.
Personal runners
----------------
@ -215,18 +173,16 @@ faster personal machine as a runner. You can find the gitlab-runner
package in Debian, or use GitLab's own builds.
To do so, follow `GitLab's instructions
<https://docs.gitlab.com/ee/ci/runners/runners_scope.html#create-a-project-runner-with-a-runner-authentication-token>`__
to register your personal GitLab runner in your Mesa fork. Then, tell
<https://docs.gitlab.com/ce/ci/runners/#create-a-specific-runner>`__ to
register your personal GitLab runner in your Mesa fork. Then, tell
Mesa how many jobs it should serve (``concurrent=``) and how many
cores those jobs should use (``FDO_CI_CONCURRENT=``) by editing these
lines in ``/etc/gitlab-runner/config.toml``, for example:
lines in ``/etc/gitlab-runner/config.toml``, for example::
.. code-block:: toml
concurrent = 2
concurrent = 2
[[runners]]
environment = ["FDO_CI_CONCURRENT=16"]
[[runners]]
environment = ["FDO_CI_CONCURRENT=16"]
Docker caching
@ -235,7 +191,7 @@ Docker caching
The CI system uses Docker images extensively to cache
infrequently-updated build content like the CTS. The `freedesktop.org
CI templates
<https://gitlab.freedesktop.org/freedesktop/ci-templates/>`__ help us
<https://gitlab.freedesktop.org/freedesktop/ci-templates/>`_ help us
manage the building of the images to reduce how frequently rebuilds
happen, and trim down the images (stripping out manpages, cleaning the
apt cache, and other such common pitfalls of building Docker images).
@ -243,7 +199,7 @@ apt cache, and other such common pitfalls of building Docker images).
When running a container job, the templates will look for an existing
build of that image in the container registry under
``MESA_IMAGE_TAG``. If it's found it will be reused, and if
not, the associated ``.gitlab-ci/containers/<jobname>.sh`` will be run
not, the associated `.gitlab-ci/containers/<jobname>.sh`` will be run
to build it. So, when developing any change to container build
scripts, you need to update the associated ``MESA_IMAGE_TAG`` to
a new unique string. We recommend using the current date plus some
@ -255,7 +211,7 @@ When developing a given change to your Docker image, you would have to
bump the tag on each ``git commit --amend`` to your development
branch, which can get tedious. Instead, you can navigate to the
`container registry
<https://gitlab.freedesktop.org/mesa/mesa/container_registry>`__ for
<https://gitlab.freedesktop.org/mesa/mesa/container_registry>`_ for
your repository and delete the tag to force a rebuild. When your code
is eventually merged to main, a full image rebuild will occur again
(forks inherit images from the main repo, but MRs don't propagate
@ -269,7 +225,7 @@ don't personally have. If you're experiencing this with the CI
builds, you can use Docker to use their build environment locally. Go
to your job log, and at the top you'll see a line like::
Pulling docker image registry.freedesktop.org/anholt/mesa/debian/android_build:2020-09-11
Pulling docker image registry.freedesktop.org/anholt/mesa/debian/android_build:2020-09-11
We'll use a volume mount to make our current Mesa tree be what the
Docker container uses, so they'll share everything (their build will
@ -281,29 +237,17 @@ useful for debug). Extract your build setup variables from
.. code-block:: console
IMAGE=registry.freedesktop.org/anholt/mesa/debian/android_build:2020-09-11
sudo docker pull $IMAGE
sudo docker run --rm -v `pwd`:/mesa -w /mesa $IMAGE env PKG_CONFIG_PATH=/usr/local/lib/aarch64-linux-android/pkgconfig/:/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/lib/aarch64-linux-android/pkgconfig/ GALLIUM_DRIVERS=freedreno UNWIND=disabled EXTRA_OPTION="-D android-stub=true -D llvm=disabled" DRI_LOADERS="-D glx=disabled -D gbm=disabled -D egl=enabled -D platforms=android" CROSS=aarch64-linux-android ./.gitlab-ci/meson-build.sh
IMAGE=registry.freedesktop.org/anholt/mesa/debian/android_build:2020-09-11
sudo docker pull $IMAGE
sudo docker run --rm -v `pwd`:/mesa -w /mesa $IMAGE env PKG_CONFIG_PATH=/usr/local/lib/aarch64-linux-android/pkgconfig/:/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/lib/aarch64-linux-android/pkgconfig/ GALLIUM_DRIVERS=freedreno UNWIND=disabled EXTRA_OPTION="-D android-stub=true -D llvm=disabled" DRI_LOADERS="-D glx=disabled -D gbm=disabled -D egl=enabled -D platforms=android" CROSS=aarch64-linux-android ./.gitlab-ci/meson-build.sh
All you have left over from the build is its output, and a _build
directory. You can hack on mesa and iterate testing the build with:
.. code-block:: console
sudo docker run --rm -v `pwd`:/mesa $IMAGE meson compile -C /mesa/_build
sudo docker run --rm -v `pwd`:/mesa $IMAGE ninja -C /mesa/_build
Running specific CI jobs
------------------------
You can use ``bin/ci/ci_run_n_monitor.py`` to run specific CI jobs. It
will automatically take care of running all the jobs yours depends on,
and cancel the rest to avoid wasting resources.
See ``bin/ci/ci_run_n_monitor.py --help`` for all the options.
The ``--target`` argument takes a regex that you can use to select the
jobs names you want to run, eg. ``--target 'zink.*'`` will run all the
zink jobs, leaving the other drivers' jobs free for others to use.
Conformance Tests
-----------------
@ -327,15 +271,3 @@ instructions on how to uprev Linux Kernel in the GitLab CI ecosystem.
:maxdepth: 1
kernel
Reusing CI scripts for other projects
--------------------------------------
The CI scripts in ``.gitlab-ci/`` can be reused for other projects, to
facilitate reuse of the infrastructure, our scripts can be used as tools
to create containers and run tests on the available farms.
.. envvar:: EXTRA_LOCAL_PACKAGES
Define extra Debian packages to be installed in the container.

View file

@ -18,8 +18,8 @@ Linux mainline, that is why Mesa has its own kernel version which should be used
as the base for newer kernels.
So, one should base the kernel uprev from the last tag used in the Mesa CI,
please refer to ``.gitlab-ci/image-tags.yml`` ``KERNEL_TAG`` variable.
Every tag has a standard naming: ``vX.YZ-for-mesa-ci-<commit_short_SHA>``, which
please refer to `.gitlab-ci/container/gitlab-ci.yml` `KERNEL_URL` variable.
Every tag has a standard naming: `vX.YZ-for-mesa-ci-<commit_short_SHA>`, which
can be created via the command:
:code:`git tag vX.YZ-for-mesa-ci-$(git rev-parse --short HEAD)`
@ -27,7 +27,8 @@ can be created via the command:
Building Kernel
---------------
The kernel files are loaded from the artifacts uploaded to S3 from gfx-ci/linux.
When Mesa CI generates a new rootfs image, the Linux Kernel is built based on
the script located at `.gitlab-ci/container/build-kernel.sh`.
Updating Kconfigs
^^^^^^^^^^^^^^^^^
@ -35,20 +36,20 @@ Updating Kconfigs
When a Kernel uprev happens, it is worth compiling and cross-compiling the
Kernel locally, in order to update the Kconfigs accordingly. Remember that the
resulting Kconfig is a merge between *Mesa CI Kconfig* and *Linux tree
defconfig* made via ``merge_config.sh`` script located at Linux Kernel tree.
defconfig* made via `merge_config.sh` script located at Linux Kernel tree.
Kconfigs location
"""""""""""""""""
+------------+------------------------------------------------------+-------------------------------------+
| Platform | Mesa CI Kconfig location | Linux tree defconfig |
+============+======================================================+=====================================+
| arm | kernel/configs/mesa3d-ci_arm.config\@gfx-ci/linux | arch/arm/configs/multi_v7_defconfig |
+------------+------------------------------------------------------+-------------------------------------+
| arm64 | kernel/configs/mesa3d-ci_arm64.config\@gfx-ci/linux | arch/arm64/configs/defconfig |
+------------+------------------------------------------------------+-------------------------------------+
| x86-64 | kernel/configs/mesa3d-ci_x86_64.config\@gfx-ci/linux | arch/x86/configs/x86_64_defconfig |
+------------+------------------------------------------------------+-------------------------------------+
+------------+--------------------------------------------+-------------------------------------+
| Platform | Mesa CI Kconfig location | Linux tree defconfig |
+============+============================================+=====================================+
| arm | .gitlab-ci/container/arm.config | arch/arm/configs/multi_v7_defconfig |
+------------+--------------------------------------------+-------------------------------------+
| arm64 | .gitlab-ci/container/arm64.config | arch/arm64/configs/defconfig |
+------------+--------------------------------------------+-------------------------------------+
| x86-64 | .gitlab-ci/container/x86_64.config | arch/x86/configs/x86_64_defconfig |
+------------+--------------------------------------------+-------------------------------------+
Updating image tags
-------------------
@ -69,9 +70,9 @@ Development routine
1. Compile the newer kernel locally for each platform.
2. Compile device trees for ARM platforms
3. Update Kconfigs. Are new Kconfigs necessary? Is CONFIG_XYZ_BLA deprecated? Does the ``merge_config.sh`` override an important config?
3. Update Kconfigs. Are new Kconfigs necessary? Is CONFIG_XYZ_BLA deprecated? Does the `merge_config.sh` override an important config?
4. Push a new development branch to `Kernel repository`_ based on the latest kernel tag used in GitLab CI
5. Hack ``build-kernel.sh`` script to clone kernel from your development branch
5. Hack `build-kernel.sh` script to clone kernel from your development branch
6. Update image tags. See `Updating image tags`_
7. Run the entire CI pipeline, all the automatic jobs should be green. If some job is red or taking too long, you will need to investigate it and probably ask for help.
@ -79,7 +80,7 @@ When the Kernel uprev is stable
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1. Push a new tag to Mesa CI `Kernel repository`_
2. Update KERNEL_URL ``debian/x86_test-gl`` job definition
2. Update KERNEL_URL `debian/x86_test-gl` job definition
3. Open a merge request, if it is not opened yet
Tips and Tricks
@ -106,15 +107,15 @@ Bare-metal custom kernels
Some CI jobs have support to plug in a custom kernel by simply changing a variable.
This is great, since rebuilding the kernel and rootfs may takes dozens of minutes.
For example, Freedreno jobs ``gitlab.yml`` manifest support a variable named
``BM_KERNEL``. If one puts a gz-compressed kernel URL there, the job will use that
kernel to boot the Freedreno bare-metal devices. The same works for ``BM_DTB`` in
For example, Freedreno jobs `gitlab.yml` manifest support a variable named
`BM_KERNEL`. If one puts a gz-compressed kernel URL there, the job will use that
kernel to boot the Freedreno bare-metal devices. The same works for `BM_DTB` in
the case of device tree binaries.
Careful reading of the job logs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Sometimes a job may turn to red for reasons unrelated to the kernel update, e.g.
LAVA ``tftp`` timeout, problems with the freedesktop servers etc.
LAVA `tftp` timeout, problems with the freedesktop servers etc.
So it is important to see the reason why the job turned red, and retry it if an
infrastructure error has happened.

View file

@ -3,10 +3,10 @@ Running traces on a local machine
Prerequisites
-------------
- Install `Apitrace <https://apitrace.github.io/>`__
- Install `Renderdoc <https://renderdoc.org/>`__ (only needed for some traces)
- Download and compile `Piglit <https://gitlab.freedesktop.org/mesa/piglit>`__ and install his `dependencies <https://gitlab.freedesktop.org/mesa/piglit#2-setup>`__
- Download traces you want to replay from `traces-db <https://gitlab.freedesktop.org/gfx-ci/tracie/traces-db/>`__
- Install `Apitrace <https://apitrace.github.io/>`_
- Install `Renderdoc <https://renderdoc.org/>`_ (only needed for some traces)
- Download and compile `Piglit <https://gitlab.freedesktop.org/mesa/piglit>`_ and install his `dependencies <https://gitlab.freedesktop.org/mesa/piglit#2-setup>`_
- Download traces you want to replay from `traces-db <https://gitlab.freedesktop.org/gfx-ci/tracie/traces-db/>`_
Running single trace
--------------------
@ -14,17 +14,17 @@ A simple run to see the output of the trace can be done with
.. code-block:: console
apitrace replay -w name_of_trace.trace
apitrace replay -w name_of_trace.trace
For more information, look into the `Apitrace documentation <https://github.com/apitrace/apitrace/blob/master/docs/USAGE.markdown>`__.
For more information, look into the `Apitrace documentation <https://github.com/apitrace/apitrace/blob/master/docs/USAGE.markdown>`_.
For comparing checksums use:
.. code-block:: console
cd piglit/replayer
export PIGLIT_SOURCE_DIR="../"
./replayer.py compare trace -d test path/name_of_trace.trace 0 # replace with expected checksum
cd piglit/replayer
export PIGLIT_SOURCE_DIR="../"
./replayer.py compare trace -d test path/name_of_trace.trace 0 # replace with expected checksum
Simulating CI trace job
@ -32,14 +32,14 @@ Simulating CI trace job
Sometimes it's useful to be able to test traces on your local machine instead of the Mesa CI runner. To simulate the CI environment as closely as possible.
Download the YAML file from your driver's ``ci/`` directory and then change the path in the YAML file from local proxy or MinIO to the local directory (url-like format ``file://``)
Download the YAML file from your driver's `ci/` directory and then change the path in the YAML file from local proxy or MinIO to the local directory (url-like format ``file://``)
.. code-block:: console
# The PIGLIT_REPLAY_DEVICE_NAME has to match name in the YAML file.
export PIGLIT_REPLAY_DEVICE_NAME='your_device_name'
export PIGLIT_REPLAY_DESCRIPTION_FILE='path_to_mesa_traces_file.yml'
./piglit run -l verbose --timeout 300 -j10 replay ~/results/
# The PIGLIT_REPLAY_DEVICE_NAME has to match name in the YAML file.
export PIGLIT_REPLAY_DEVICE_NAME='your_device_name'
export PIGLIT_REPLAY_DESCRIPTION_FILE='path_to_mesa_traces_file.yml'
./piglit run -l verbose --timeout 300 -j10 replay ~/results/
Note: For replaying traces, you may need to allow higher GL and GLSL versions. You can achieve that by setting  ``MESA_GLSL_VERSION_OVERRIDE`` and ``MESA_GL_VERSION_OVERRIDE``.

View file

@ -1,33 +1,101 @@
SkQP
====
`SkQP <https://skia.org/docs/dev/testing/skqp/>`__ stands for SKIA Quality
`SkQP <https://skia.org/docs/dev/testing/skqp/>`_ stands for SKIA Quality
Program conformance tests. Basically, it has sets of rendering tests and unit
tests to ensure that `SKIA <https://skia.org/>`__ is meeting its design specifications on a specific
tests to ensure that `SKIA <https://skia.org/>`_ is meeting its design specifications on a specific
device.
The rendering tests have support for GL, GLES and Vulkan backends and test some
rendering scenarios.
And the unit tests check the GPU behavior without rendering images, using any of the GL/GLES or Vulkan drivers.
And the unit tests check the GPU behavior without rendering images.
Tests
-----
Render tests design
^^^^^^^^^^^^^^^^^^^
It is worth noting that `rendertests.txt` can bring some detail about each test
expectation, so each test can have a max pixel error count, to tell SkQP that it
is OK to have at most that number of errors for that test. See also:
https://github.com/google/skia/blob/c29454d1c9ebed4758a54a69798869fa2e7a36e0/tools/skqp/README_ALGORITHM.md
.. _test-location:
Location
^^^^^^^^
Each `rendertests.txt` and `unittest.txt` file must be located inside a specific
subdirectory inside SkQP assets directory.
+--------------+--------------------------------------------+
| Test type | Location |
+==============+============================================+
| Render tests | `${SKQP_ASSETS_DIR}/skqp/rendertests.txt` |
+--------------+--------------------------------------------+
| Unit tests | `${SKQP_ASSETS_DIR}/skqp/unittests.txt` |
+--------------+--------------------------------------------+
The `skqp-runner.sh` script will make the necessary modifications to separate
`rendertests.txt` for each backend-driver combination. As long as the test files are located in the expected place:
+--------------+----------------------------------------------------------------------------------------------+
| Test type | Location |
+==============+==============================================================================================+
| Render tests | `${MESA_REPOSITORY_DIR}/src/${GPU_DRIVER}/ci/${GPU_VERSION}-${SKQP_BACKEND}_rendertests.txt` |
+--------------+----------------------------------------------------------------------------------------------+
| Unit tests | `${MESA_REPOSITORY_DIR}/src/${GPU_DRIVER}/ci/${GPU_VERSION}_unittests.txt` |
+--------------+----------------------------------------------------------------------------------------------+
Where `SKQP_BACKEND` can be:
- gl: for GL backend
- gles: for GLES backend
- vk: for Vulkan backend
Example file
""""""""""""
.. code-block:: console
src/freedreno/ci/freedreno-a630-skqp-gl_rendertests.txt
- GPU_DRIVER: `freedreno`
- GPU_VERSION: `freedreno-a630`
- SKQP_BACKEND: `gl`
.. _rendertests-design:
SkQP reports
------------
SkQP generates reports after finishing its execution, and deqp-runner collects
them in the job artifacts results directory under the test name. Click the
'Browse' button from a failing job to get to them.
SkQP generates reports after finishing its execution, they are located at the job
artifacts results directory and are divided in subdirectories by rendering tests
backends and unit
tests. The job log has links to every generated report in order to facilitate
the SkQP debugging.
SkQP failing tests
------------------
Maintaining SkQP on Mesa CI
---------------------------
SkQP rendering tests will have a range of pixel values allowed for the driver's
rendering for a given test. This can make the "expected" image in the result
output look rather strange, but you should be able to make sense of it knowing
that.
SkQP is built alongside with another binary, namely `list_gpu_unit_tests`, it is
located in the same folder where `skqp` binary is.
In SkQP itself, testcases can have increased failing pixel thresholds added to
them to keep CI green when the rendering is "correct" but out of normal range.
However, we don't support changing the thresholds in our testing. Because any
driver rendering not meeting the normal thresholds will trigger Android CTS
failures, we treat them as failures and track them as expected failures the
```*-fails.txt`` file.`
This binary will generate the expected `unittests.txt` for the target GPU, so
ideally it should be executed on every SkQP update and when a new device
receives SkQP CI jobs.
1. Generate target unit tests for the current GPU with :code:`./list_gpu_unit_tests > unittests.txt`
2. Run SkQP job
3. If there is a failing or crashing unit test, remove it from the corresponding `unittests.txt`
4. If there is a crashing render test, remove it from the corresponding `rendertests.txt`
5. If there is a failing render test, visually inspect the result from the HTML report
- If the render result is OK, update the max error count for that test
- Otherwise, or put `-1` in the same threshold, as seen in :ref:`rendertests-design`
6. Remember to put the new tests files to the locations cited in :ref:`test-location`

View file

@ -8,103 +8,7 @@ of mesa can use different coding style as set in the local EditorConfig
following is applicable. If the guidelines below don't cover something,
try following the format of existing, neighboring code.
``clang-format``
----------------
A growing number of drivers and components are adopting ``clang-format``
to standardize the formatting and make it easy for everyone to apply it.
You can re-format the code for the components that have opted-in to the
formatting enforcement (listed in ``.clang-format-include``) by simply
running ``ninja -C build/ clang-format``.
Since mass-reformatting commits can be an annoying extra jump to go
through when looking at ``git blame``, you can configure it to ignore
them by running::
git config blame.ignoreRevsFile .git-blame-ignore-revs
Most code editors also support automatically formatting code as you
write it; check your editor or its pluggins to see how to enable this.
Vim
***
Add this to your ``.vimrc`` to automatically format any C & C++ file
(that has a .clang-format config) when you save it:
.. code:: vim
augroup ClangFormatOnSave
au!
function! ClangFormatOnSave()
" Only format files that have a .clang-format in a parent folder
if !empty(findfile('.clang-format', '.;'))
let l:formatdiff = 1 " Only format lines that have changed
py3f /usr/share/clang/clang-format.py
endif
endfunction
autocmd BufWritePre *.h,*.c,*.cc,*.cpp call ClangFormatOnSave()
augroup END
If ``/usr/share/clang/clang-format.py`` doesn't exist, try
``/usr/share/clang/clang-format-$CLANG_VERSION/clang-format.py``
(replacing ``$CLANG_VERSION`` with your clang version). If your distro
has put the file somewhere else, look through the files in the package
providing ``clang-format``.
Emacs
*****
Add this to your ``.emacs`` to automatically format any C & C++ file
(that has a .clang-format config) when you save it:
.. code:: emacs
(load "/usr/share/clang/clang-format.el")
(defun clang-format-save-hook-for-this-buffer ()
"Create a buffer local save hook."
(add-hook 'before-save-hook
(lambda ()
(when (locate-dominating-file "." ".clang-format")
(clang-format-buffer))
;; Continue to save.
nil)
nil
;; Buffer local hook.
t))
;; Run this for each mode you want to use the hook.
(add-hook 'c-mode-hook (lambda () (clang-format-save-hook-for-this-buffer)))
(add-hook 'c++-mode-hook (lambda () (clang-format-save-hook-for-this-buffer)))
If ``/usr/share/clang/clang-format.el`` doesn't exist, look through the
files in the package providing ``clang-format`` in your distro. If you
can't find anything (eg. on Debian/Ubuntu), refer to `this StackOverflow
answer <https://stackoverflow.com/questions/59690583/how-do-you-use-clang-format-on-emacs-ubuntu/59850773#59850773>`__
to install clang-format through Emacs instead.
git ``pre-commit`` hook
***********************
If your editor doesn't support this, or if you don't want to enable it, you
can always just run ``ninja clang-format`` to format everything, or add
a ``pre-commit`` hook that runs this automatically whenever you ``git
commit`` by adding the following in your ``.git/hooks/pre-commit``:
.. code:: sh
shopt -s globstar
git clang-format $upstream -- $(grep -E '^[^#]' .clang-format-include)
# replace $upstream with the name of the remote tracking upstream mesa
# if you don't know, it's probably `origin`
Basic formatting guidelines
---------------------------
- 3-space indentation, no tabs.
- Limit lines to 78 or fewer characters. The idea is to prevent line
@ -132,7 +36,7 @@ Basic formatting guidelines
- Use comments wherever you think it would be helpful for other
developers. Several specific cases and style examples follow. Note
that we roughly follow `Doxygen <https://www.doxygen.nl>`__
that we roughly follow `Doxygen <http://www.doxygen.nl>`__
conventions.
Single-line comments:

View file

@ -1,6 +1,8 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sphinx_rtd_theme
#
# The Mesa 3D Graphics Library documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 29 14:08:51 2017.
@ -21,8 +23,6 @@
import os
import sys
from hawkmoth.util import compiler
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
@ -38,14 +38,7 @@ sys.path.append(os.path.abspath('_exts'))
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'bootstrap',
'formatting',
'hawkmoth',
'nir',
'redirects',
'sphinx.ext.graphviz',
]
extensions = ['sphinx.ext.graphviz', 'breathe', 'formatting', 'nir', 'redirects']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@ -65,7 +58,7 @@ copyright = '1995-2018, Brian Paul'
author = 'Brian Paul'
html_show_copyright = False
html_theme_path = ['.']
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@ -86,33 +79,54 @@ language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['header-stubs']
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Disable highlighting unless a language is specified, otherwise we'll get
# python keywords highlit in literal blocks.
highlight_language = 'none'
highlight_language = "none"
default_role = 'c:expr'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'mesa3d_theme'
html_theme = 'sphinx_rtd_theme'
html_favicon = 'favicon.ico'
html_favicon = "favicon.ico"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'display_version': False,
}
html_context = {
'display_gitlab': True,
'gitlab_host': 'gitlab.freedesktop.org',
'gitlab_user': 'mesa',
'gitlab_repo': 'mesa',
'gitlab_version': 'main',
'conf_py_path': '/docs/',
}
html_copy_source = False
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [
'_static/',
html_static_path = []
html_extra_path = [
'_extra/',
'release-maintainers-keys.asc',
'features.txt',
'libGL.txt',
@ -120,42 +134,11 @@ html_static_path = [
'README.VCE',
]
html_extra_path = []
html_redirects = [
('webmaster', 'https://www.mesa3d.org/website/'),
('developers', 'https://www.mesa3d.org/developers/'),
('thanks', 'https://gitlab.freedesktop.org/mesa/mesa/-/blob/amber/docs/thanks.rst'),
]
# -- Options for linkcheck ------------------------------------------------
linkcheck_ignore = [
r'specs/.*\.spec', # gets copied during the build process
r'news:.*', # seems linkcheck doesn't like the news: URI-scheme...
r'http://mesa-ci-results.jf.intel.com', # only available for Intel employees
r'https://gitlab.com/.*#.*', # needs JS eval
r'https://gitlab.freedesktop.org/.*#.*', # needs JS eval
r'https://github.com/.*#.*', # needs JS eval
]
linkcheck_exclude_documents = [r'relnotes/.*']
linkcheck_allowed_redirects = {
# Pages that forward the front-page to a wiki or some explore-page
'https://www.freedesktop.org': 'https://www.freedesktop.org/wiki/',
'https://x.org': 'https://x.org/wiki/',
'https://perf.wiki.kernel.org/': 'https://perf.wiki.kernel.org/index.php/Main_Page',
'https://dri.freedesktop.org/': 'https://dri.freedesktop.org/wiki/',
'https://gitlab.freedesktop.org/': 'https://gitlab.freedesktop.org/explore/groups',
'https://www.sphinx-doc.org/': 'https://www.sphinx-doc.org/en/master/',
# Pages that requires authentication
'https://gitlab.freedesktop.org/admin/runners': 'https://gitlab.freedesktop.org/users/sign_in',
'https://gitlab.freedesktop.org/profile/personal_access_tokens': 'https://gitlab.freedesktop.org/users/sign_in',
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
@ -216,25 +199,10 @@ texinfo_documents = [
graphviz_output_format = 'svg'
# -- Options for hawkmoth -------------------------------------------------
hawkmoth_root = os.path.abspath('..')
hawkmoth_clang = [
'-Idocs/header-stubs/',
'-Iinclude/',
'-Isrc/',
'-Isrc/gallium/include/',
'-Isrc/intel/',
'-Isrc/mesa/',
'-DHAVE_STRUCT_TIMESPEC',
'-DHAVE_PTHREAD',
'-DHAVE_ENDIAN_H',
]
hawkmoth_clang.extend(compiler.get_include_args())
# helpers for definining parameter direction
rst_prolog = '''
.. |in| replace:: **[in]**
.. |out| replace:: **[out]**
.. |inout| replace:: **[inout]**
'''
# -- Options for breathe --------------------------------------------------
breathe_projects = {
'mesa' : 'doxygen_xml',
}
breathe_default_project = 'mesa'
breathe_show_define_initializer = True
breathe_show_enumvalue_initializer = True

View file

@ -14,4 +14,4 @@ In your debugger you can set a breakpoint in ``_mesa_error()`` to trap
Mesa errors.
There is a display list printing/debugging facility. See the end of
``src/mesa/main/dlist.c`` for details.
``src/dlist.c`` for details.

Some files were not shown because too many files have changed in this diff Show more