aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitlab-ci.yml37
-rw-r--r--Dockerfile.debian20
-rw-r--r--Dockerfile.debian-minimal24
-rw-r--r--Dockerfile.fedora52
-rw-r--r--METADATA13
-rw-r--r--NEWS38
-rw-r--r--benchmarks/gem_syslatency.c1
-rw-r--r--benchmarks/gem_wsim.c1546
-rw-r--r--benchmarks/wsim/README139
-rw-r--r--benchmarks/wsim/frame-split-60fps.wsim18
-rw-r--r--benchmarks/wsim/high-composited-game.wsim11
-rw-r--r--benchmarks/wsim/media-1080p-player.wsim5
-rw-r--r--benchmarks/wsim/medium-composited-game.wsim9
-rw-r--r--configure.ac16
-rw-r--r--docs/chamelium.txt22
-rw-r--r--docs/reference/igt-gpu-tools/meson.build41
-rw-r--r--include/drm-uapi/i915_drm.h209
-rw-r--r--include/drm-uapi/panfrost_drm.h142
-rw-r--r--lib/Makefile.sources6
-rw-r--r--lib/drmtest.c12
-rw-r--r--lib/drmtest.h1
-rw-r--r--lib/i915/gem_context.c72
-rw-r--r--lib/i915/gem_context.h13
-rw-r--r--lib/i915/gem_engine_topology.c291
-rw-r--r--lib/i915/gem_engine_topology.h70
-rw-r--r--lib/i915/gem_vm.c130
-rw-r--r--lib/i915/gem_vm.h38
-rw-r--r--lib/igt.h5
-rw-r--r--lib/igt_alsa.c233
-rw-r--r--lib/igt_alsa.h20
-rw-r--r--lib/igt_audio.c226
-rw-r--r--lib/igt_audio.h10
-rw-r--r--lib/igt_aux.c13
-rw-r--r--lib/igt_aux.h11
-rw-r--r--lib/igt_chamelium.c134
-rw-r--r--lib/igt_chamelium.h24
-rw-r--r--lib/igt_core.c131
-rw-r--r--lib/igt_core.h64
-rw-r--r--lib/igt_debugfs.c4
-rw-r--r--lib/igt_debugfs.h3
-rw-r--r--lib/igt_device.c17
-rw-r--r--lib/igt_dummyload.c52
-rw-r--r--lib/igt_dummyload.h2
-rw-r--r--lib/igt_edid.c127
-rw-r--r--lib/igt_edid.h139
-rw-r--r--lib/igt_eld.c258
-rw-r--r--lib/igt_eld.h55
-rw-r--r--lib/igt_fb.c144
-rw-r--r--lib/igt_gt.c73
-rw-r--r--lib/igt_gt.h27
-rw-r--r--lib/igt_kms.c187
-rw-r--r--lib/igt_kms.h13
-rw-r--r--lib/igt_panfrost.c277
-rw-r--r--lib/igt_panfrost.h60
-rw-r--r--lib/igt_syncobj.c2
-rw-r--r--lib/igt_vc4.c278
-rw-r--r--lib/igt_vc4.h12
-rw-r--r--lib/intel_device_info.c2
-rw-r--r--lib/meson.build10
-rw-r--r--lib/panfrost-job.h1516
-rw-r--r--lib/panfrost-misc.h47
-rw-r--r--lib/tests/igt_audio.c222
-rw-r--r--lib/tests/igt_conflicting_args.c99
-rw-r--r--lib/tests/igt_edid.c97
-rw-r--r--lib/tests/igt_hdmi_inject.c1
-rw-r--r--lib/tests/igt_subtest_group.c9
-rw-r--r--lib/tests/meson.build9
-rw-r--r--man/meson.build14
-rw-r--r--meson.build147
-rw-r--r--meson_options.txt32
-rw-r--r--overlay/meson.build17
-rw-r--r--runner/job_list.c38
-rw-r--r--runner/job_list.h1
-rw-r--r--runner/meson.build16
-rw-r--r--runner/resultgen.c21
-rw-r--r--runner/runner.c5
-rw-r--r--runner/runner_tests.c628
-rw-r--r--runner/settings.c147
-rw-r--r--runner/settings.h5
-rw-r--r--runner/testdata/meson.build5
-rw-r--r--runner/testdata/test-blacklist.txt2
-rw-r--r--runner/testdata/test-blacklist2.txt2
-rwxr-xr-xscripts/media-bench.pl9
-rwxr-xr-xscripts/trace.pl321
-rw-r--r--tests/Makefile.am1
-rw-r--r--tests/Makefile.sources17
-rw-r--r--tests/amdgpu/amd_color.c405
-rw-r--r--tests/amdgpu/meson.build1
-rw-r--r--tests/core_auth.c120
-rw-r--r--tests/drm_import_export.c2
-rw-r--r--tests/i915/gem_busy.c143
-rw-r--r--tests/i915/gem_close_race.c1
-rw-r--r--tests/i915/gem_cs_tlb.c8
-rw-r--r--tests/i915/gem_ctx_clone.c462
-rw-r--r--tests/i915/gem_ctx_create.c234
-rw-r--r--tests/i915/gem_ctx_engines.c522
-rw-r--r--tests/i915/gem_ctx_exec.c16
-rw-r--r--tests/i915/gem_ctx_isolation.c21
-rw-r--r--tests/i915/gem_ctx_param.c116
-rw-r--r--tests/i915/gem_ctx_shared.c862
-rw-r--r--tests/i915/gem_ctx_switch.c75
-rw-r--r--tests/i915/gem_exec_balancer.c1332
-rw-r--r--tests/i915/gem_exec_basic.c61
-rw-r--r--tests/i915/gem_exec_blt.c6
-rw-r--r--tests/i915/gem_exec_fence.c77
-rw-r--r--tests/i915/gem_exec_parallel.c26
-rw-r--r--tests/i915/gem_exec_schedule.c25
-rw-r--r--tests/i915/gem_exec_store.c36
-rw-r--r--tests/i915/gem_exec_whisper.c69
-rw-r--r--tests/i915/gem_gtt_speed.c31
-rw-r--r--tests/i915/gem_hang.c25
-rw-r--r--tests/i915/gem_linear_blits.c6
-rw-r--r--tests/i915/gem_mmap.c9
-rw-r--r--tests/i915/gem_mmap_gtt.c10
-rw-r--r--tests/i915/gem_mmap_wc.c9
-rw-r--r--tests/i915/gem_ppgtt.c49
-rw-r--r--tests/i915/gem_pread.c25
-rw-r--r--tests/i915/gem_pwrite.c25
-rw-r--r--tests/i915/gem_pwrite_pread.c24
-rw-r--r--tests/i915/gem_render_copy.c25
-rw-r--r--tests/i915/gem_render_copy_redux.c6
-rw-r--r--tests/i915/gem_request_retire.c6
-rw-r--r--tests/i915/gem_softpin.c2
-rw-r--r--tests/i915/gem_spin_batch.c4
-rw-r--r--tests/i915/gem_stress.c262
-rw-r--r--tests/i915/gem_tiled_blits.c6
-rw-r--r--tests/i915/gem_userptr_blits.c22
-rw-r--r--tests/i915/gem_vm_create.c412
-rw-r--r--tests/i915/gem_wait.c24
-rw-r--r--tests/i915/gem_workarounds.c63
-rw-r--r--tests/i915/gen3_mixed_blits.c28
-rw-r--r--tests/i915/gen3_render_linear_blits.c28
-rw-r--r--tests/i915/gen3_render_mixed_blits.c28
-rw-r--r--tests/i915/gen3_render_tiledx_blits.c28
-rw-r--r--tests/i915/gen3_render_tiledy_blits.c28
-rw-r--r--tests/i915/i915_hangman.c17
-rw-r--r--tests/i915/i915_pm_rpm.c209
-rw-r--r--tests/i915/i915_query.c247
-rw-r--r--tests/intel-ci/blacklist.txt1
-rw-r--r--tests/intel-ci/fast-feedback.testlist31
-rw-r--r--tests/kms_available_modes_crc.c1
-rw-r--r--tests/kms_big_fb.c713
-rw-r--r--tests/kms_chamelium.c1202
-rw-r--r--tests/kms_concurrent.c21
-rw-r--r--tests/kms_cursor_crc.c238
-rw-r--r--tests/kms_cursor_edge_walk.c32
-rw-r--r--tests/kms_dp_dsc.c1
-rwxr-xr-xtests/kms_flip.c35
-rw-r--r--tests/kms_flip_tiling.c8
-rw-r--r--tests/kms_force_connector_basic.c22
-rw-r--r--tests/kms_frontbuffer_tracking.c62
-rw-r--r--tests/kms_hdmi_inject.c102
-rw-r--r--tests/kms_mmap_write_crc.c10
-rw-r--r--tests/kms_plane.c71
-rw-r--r--tests/kms_plane_lowres.c2
-rw-r--r--tests/kms_plane_multiple.c114
-rw-r--r--tests/kms_plane_scaling.c32
-rw-r--r--tests/kms_psr.c25
-rw-r--r--tests/kms_psr2_su.c6
-rw-r--r--tests/kms_rotation_crc.c13
-rw-r--r--tests/kms_selftest.c24
-rw-r--r--tests/kms_setmode.c44
-rw-r--r--tests/kms_tv_load_detect.c6
-rw-r--r--tests/meson.build18
-rw-r--r--tests/panfrost_gem_new.c90
-rw-r--r--tests/panfrost_get_param.c73
-rw-r--r--tests/panfrost_prime.c79
-rw-r--r--tests/panfrost_submit.c181
-rw-r--r--tests/perf.c10
-rw-r--r--tests/perf_pmu.c110
-rw-r--r--tests/prime_mmap_coherency.c6
-rw-r--r--tests/prime_mmap_kms.c2
-rw-r--r--tests/prime_vgem.c28
-rw-r--r--tests/testdisplay.c232
-rw-r--r--tools/intel_reg.c4
-rw-r--r--tools/intel_vbt_decode.c6
-rw-r--r--tools/intel_vbt_defs.h657
-rw-r--r--tools/meson.build6
178 files changed, 15694 insertions, 3919 deletions
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index aee90a27..893dc988 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -2,14 +2,13 @@ image: $CI_REGISTRY/$CI_PROJECT_PATH/igt-fedora:latest
variables:
MESON_OPTIONS: >
-Dwith_libdrm=intel,nouveau,amdgpu
- -Dbuild_overlay=true
- -Dbuild_audio=true
- -Dbuild_chamelium=true
- -Dwith_valgrind=true
- -Dbuild_man=true
- -Dbuild_tests=true
- -Dbuild_runner=true
- -Dwith_libunwind=true
+ -Dbuild_overlay=enabled
+ -Dbuild_chamelium=enabled
+ -Dwith_valgrind=enabled
+ -Dbuild_man=enabled
+ -Dbuild_tests=enabled
+ -Dbuild_runner=enabled
+ -Dwith_libunwind=enabled
LANG: "C.UTF-8"
stages:
@@ -69,6 +68,16 @@ build:tests-debian-meson:
paths:
- meson-test-list.txt
+build:tests-debian-minimal:
+ image: $CI_REGISTRY/$CI_PROJECT_PATH/igt-debian-minimal:latest
+ stage: build
+ script:
+ - meson -Dbuild_tests=disabled -Dwith_libdrm="" build
+ - ninja -C build
+ artifacts:
+ paths:
+ - build
+
build:tests-debian-meson-armhf:
image: $CI_REGISTRY/$CI_PROJECT_PATH/igt-debian-armhf:latest
stage: build
@@ -118,6 +127,13 @@ test:ninja-test-clang:
stage: test
script: ninja -C build test
+test:ninja-test-minimal:
+ image: $CI_REGISTRY/$CI_PROJECT_PATH/igt-debian-minimal:latest
+ dependencies:
+ - build:tests-debian-minimal
+ stage: test
+ script: ninja -C build test
+
test:ninja-test-arm64:
image: $CI_REGISTRY/$CI_PROJECT_PATH/igt-debian-arm64:latest
dependencies:
@@ -151,7 +167,7 @@ test:test-list-diff:
- build:tests-debian-autotools
- build:tests-debian-meson
stage: test
- script: diff <(sed "s/ /\n/g" meson-test-list.txt| grep -v 'vc4\|v3d' | sort) <(sed "s/ /\n/g" autotools-test-list.txt | sort)
+ script: diff <(sed "s/ /\n/g" meson-test-list.txt| grep -v 'vc4\|v3d\|panfrost' | sort) <(sed "s/ /\n/g" autotools-test-list.txt | sort)
################### DEPLOY #########################
@@ -176,6 +192,7 @@ containers:igt-debian:
image: docker:stable
only:
changes:
+ - Dockerfile.debian-minimal
- Dockerfile.debian
- .gitlab-ci.yml
services:
@@ -185,7 +202,9 @@ containers:igt-debian:
DOCKER_DRIVER: overlay2
script:
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
+ - docker build -t $CI_REGISTRY/$CI_PROJECT_PATH/igt-debian-minimal -t igt-debian-minimal -f Dockerfile.debian-minimal .
- docker build -t $CI_REGISTRY/$CI_PROJECT_PATH/igt-debian -f Dockerfile.debian .
+ - docker push $CI_REGISTRY/$CI_PROJECT_PATH/igt-debian-minimal
- docker push $CI_REGISTRY/$CI_PROJECT_PATH/igt-debian
containers:igt-debian-armhf:
diff --git a/Dockerfile.debian b/Dockerfile.debian
index b9c3be39..717630e1 100644
--- a/Dockerfile.debian
+++ b/Dockerfile.debian
@@ -1,36 +1,18 @@
-FROM debian:stretch-backports
+FROM igt-debian-minimal
RUN apt-get update
RUN apt-get install -y \
- gcc \
- flex \
- bison \
- pkg-config \
- libpciaccess-dev \
- libkmod-dev \
- libprocps-dev \
libunwind-dev \
- libdw-dev \
- zlib1g-dev \
- liblzma-dev \
- libcairo-dev \
- libpixman-1-dev \
- libudev-dev \
libgsl-dev \
libasound2-dev \
libxmlrpc-core-c3-dev \
libjson-c-dev \
libcurl4-openssl-dev \
- libxrandr-dev \
- libxv-dev \
- x11proto-dri2-dev \
python-docutils \
valgrind \
peg
RUN apt-get install -t stretch-backports -y \
- meson \
- libdrm-dev \
libdrm-intel1
# autotools build deps
diff --git a/Dockerfile.debian-minimal b/Dockerfile.debian-minimal
new file mode 100644
index 00000000..bbe70bed
--- /dev/null
+++ b/Dockerfile.debian-minimal
@@ -0,0 +1,24 @@
+FROM debian:stretch-backports
+
+RUN apt-get update
+RUN apt-get install -y \
+ gcc \
+ flex \
+ bison \
+ pkg-config \
+ libpciaccess-dev \
+ libkmod-dev \
+ libprocps-dev \
+ libdw-dev \
+ zlib1g-dev \
+ liblzma-dev \
+ libcairo-dev \
+ libpixman-1-dev \
+ libudev-dev \
+ libxrandr-dev \
+ libxv-dev \
+ x11proto-dri2-dev
+
+RUN apt-get install -t stretch-backports -y \
+ meson \
+ libdrm-dev
diff --git a/Dockerfile.fedora b/Dockerfile.fedora
index 0795de0e..6686e587 100644
--- a/Dockerfile.fedora
+++ b/Dockerfile.fedora
@@ -1,30 +1,28 @@
-FROM fedora:28
+FROM fedora:30
-RUN dnf install -y gcc \
- flex \
- meson \
- bison \
- gtk-doc \
- xdotool \
- gsl-devel \
- kmod-devel \
- glib2-devel \
- cairo-devel \
- ninja-build \
- procps-devel \
- pixman-devel \
- json-c-devel \
- libdrm-devel \
- libudev-devel \
- xmlrpc-c-devel \
- elfutils-devel \
- libunwind-devel \
- python-docutils \
- libpciaccess-devel \
- alsa-lib-devel \
- valgrind-devel \
- libXrandr-devel \
- libXv-devel
+RUN dnf install -y \
+ gcc flex bison meson ninja-build xdotool \
+ 'pkgconfig(libdrm)' \
+ 'pkgconfig(pciaccess)' \
+ 'pkgconfig(libkmod)' \
+ 'pkgconfig(libprocps)' \
+ 'pkgconfig(libunwind)' \
+ 'pkgconfig(libdw)' \
+ 'pkgconfig(pixman-1)' \
+ 'pkgconfig(valgrind)' \
+ 'pkgconfig(cairo)' \
+ 'pkgconfig(libudev)' \
+ 'pkgconfig(glib-2.0)' \
+ 'pkgconfig(gsl)' \
+ 'pkgconfig(alsa)' \
+ 'pkgconfig(xmlrpc)' \
+ 'pkgconfig(xmlrpc_util)' \
+ 'pkgconfig(xmlrpc_client)' \
+ 'pkgconfig(json-c)' \
+ 'pkgconfig(gtk-doc)' \
+ 'pkgconfig(xv)' \
+ 'pkgconfig(xrandr)' \
+ python3-docutils
# We need peg to build overlay
RUN dnf install -y make
@@ -40,7 +38,7 @@ RUN dnf install -y clang
# Meson version switching shenanigans
WORKDIR /usr/src
-RUN curl -O https://files.pythonhosted.org/packages/17/d0/0fe98a9557a2f07dbe6f99ef57f2bc37450b641e1f6ceae9ce04c3c845dd/meson-0.46.0.tar.gz
+RUN curl -O https://files.pythonhosted.org/packages/c0/9b/44cdb8adcbb186be6cba5c93718d0c68f177b0e8082ae00cafa63a1d3535/meson-0.47.0.tar.gz
# Cleanup workdir
WORKDIR /
diff --git a/METADATA b/METADATA
index 9d2468af..69d80cef 100644
--- a/METADATA
+++ b/METADATA
@@ -1,14 +1,15 @@
name: "igt-gpu-tools"
-description:
- "IGT GPU Tools is a collection of tools for development and testing of "
- "Linux kernel DRM drivers."
-
+description: "IGT GPU Tools is a collection of tools for development and testing of Linux kernel DRM drivers."
third_party {
url {
type: GIT
value: "https://gitlab.freedesktop.org/drm/igt-gpu-tools"
}
- version: "f052e49a43cc9704ea5f240df15dd9d3dfed68ab"
- last_upgrade_date { year: 2019 month: 5 day: 6 }
+ version: "22850c1906550fb97b405c019275dcfb34be8cf7"
license_type: NOTICE
+ last_upgrade_date {
+ year: 2019
+ month: 6
+ day: 20
+ }
}
diff --git a/NEWS b/NEWS
index ffddc096..d94374fa 100644
--- a/NEWS
+++ b/NEWS
@@ -1,9 +1,39 @@
-Unreleased
-----------
+Release 1.24 (2019-06-20)
+-------------------------
-General changes:
+- Bumped required meson version to 0.47. (Arkadiusz Hiler)
+
+- All the meson build options that used auto, true and false are now first
+ class 'feature' options taking auto, enabled and disabled. (Arkadiusz Hiler)
+
+- Piglit as the primary test executor replaced by mostly drop-in
+ compatible igt_runner. (Petri Latvala)
+
+- Stack traces now contain source file names and line numbers, using
+ libdw. (Maarten Lankhorst)
+
+- Pixman dependency is now mandatory. (Maxime Ripard)
+
+- The project has moved to gitlab, and uses gitlab's CI to build test
+ and sanity test each commit on various distributions and
+ architectures. (Arkadiusz Hiler et al)
+
+- Shader debugger removed due to lack of use and accumulation of
+ bitrot. (Arkadiusz Hiler)
+
+- Added support for testing DP/HDMI audio with the Chamelium device,
+ dropping the audio tests that required exotic custom hardware to
+ execute. (Simon Ser)
+
+- Autotools support dropped for various parts of IGT (assembler,
+ documentation, etc). (Daniel Vetter)
+
+- intel-gpu-top can now output data to stdout or a log file for
+ noninteractive use. (Tvrtko Ursulin)
+
+
+And many other bug fixes, improvements, cleanups and new tests.
- - Bumped required meson version to 0.46. (Arkadiusz Hiler)
Release 1.23 (2018-07-18)
-------------------------
diff --git a/benchmarks/gem_syslatency.c b/benchmarks/gem_syslatency.c
index ba5d32a9..7671dc43 100644
--- a/benchmarks/gem_syslatency.c
+++ b/benchmarks/gem_syslatency.c
@@ -44,7 +44,6 @@
#include <linux/unistd.h>
-#define gettid() syscall(__NR_gettid)
#define sigev_notify_thread_id _sigev_un._tid
static volatile int done;
diff --git a/benchmarks/gem_wsim.c b/benchmarks/gem_wsim.c
index afb9644d..a76fdbfe 100644
--- a/benchmarks/gem_wsim.c
+++ b/benchmarks/gem_wsim.c
@@ -41,7 +41,6 @@
#include <limits.h>
#include <pthread.h>
-
#include "intel_chipset.h"
#include "intel_reg.h"
#include "drm.h"
@@ -57,10 +56,8 @@
#include "ewma.h"
-#define LOCAL_I915_EXEC_FENCE_IN (1<<16)
-#define LOCAL_I915_EXEC_FENCE_OUT (1<<17)
-
enum intel_engine_id {
+ DEFAULT,
RCS,
BCS,
VCS,
@@ -85,12 +82,18 @@ enum w_type
SW_FENCE,
SW_FENCE_SIGNAL,
CTX_PRIORITY,
- PREEMPTION
+ PREEMPTION,
+ ENGINE_MAP,
+ LOAD_BALANCE,
+ BOND,
+ TERMINATE,
+ SSEU
};
struct deps
{
int nr;
+ bool submit_fence;
int *list;
};
@@ -98,6 +101,12 @@ struct w_arg {
char *filename;
char *desc;
int prio;
+ bool sseu;
+};
+
+struct bond {
+ uint64_t mask;
+ enum intel_engine_id master;
};
struct w_step
@@ -107,6 +116,7 @@ struct w_step
unsigned int context;
unsigned int engine;
struct duration duration;
+ bool unbound_duration;
struct deps data_deps;
struct deps fence_deps;
int emit_fence;
@@ -118,6 +128,16 @@ struct w_step
int throttle;
int fence_signal;
int priority;
+ struct {
+ unsigned int engine_map_count;
+ enum intel_engine_id *engine_map;
+ };
+ bool load_balance;
+ struct {
+ uint64_t bond_mask;
+ enum intel_engine_id bond_master;
+ };
+ int sseu;
};
/* Implementation details */
@@ -128,7 +148,7 @@ struct w_step
struct drm_i915_gem_execbuffer2 eb;
struct drm_i915_gem_exec_object2 *obj;
- struct drm_i915_gem_relocation_entry reloc[4];
+ struct drm_i915_gem_relocation_entry reloc[5];
unsigned long bb_sz;
uint32_t bb_handle;
uint32_t *seqno_value;
@@ -138,10 +158,24 @@ struct w_step
uint32_t *rt1_address;
uint32_t *latch_value;
uint32_t *latch_address;
+ uint32_t *recursive_bb_start;
};
DECLARE_EWMA(uint64_t, rt, 4, 2)
+struct ctx {
+ uint32_t id;
+ int priority;
+ unsigned int engine_map_count;
+ enum intel_engine_id *engine_map;
+ unsigned int bond_count;
+ struct bond *bonds;
+ bool targets_instance;
+ bool wants_balance;
+ unsigned int static_vcs;
+ uint64_t sseu;
+};
+
struct workload
{
unsigned int id;
@@ -149,6 +183,7 @@ struct workload
unsigned int nr_steps;
struct w_step *steps;
int prio;
+ bool sseu;
pthread_t thread;
bool run;
@@ -158,16 +193,13 @@ struct workload
unsigned int flags;
bool print_stats;
+ uint32_t bb_prng;
uint32_t prng;
struct timespec repeat_start;
unsigned int nr_ctxs;
- struct {
- uint32_t id;
- int priority;
- unsigned int static_vcs;
- } *ctx_list;
+ struct ctx *ctx_list;
int sync_timeline;
uint32_t sync_seqno;
@@ -199,20 +231,25 @@ struct workload
int fd;
bool first;
unsigned int num_engines;
- unsigned int engine_map[5];
+ unsigned int engine_map[NUM_ENGINES];
uint64_t t_prev;
- uint64_t prev[5];
- double busy[5];
+ uint64_t prev[NUM_ENGINES];
+ double busy[NUM_ENGINES];
} busy_balancer;
};
static const unsigned int nop_calibration_us = 1000;
static unsigned long nop_calibration;
+static unsigned int master_prng;
+
static unsigned int context_vcs_rr;
static int verbose = 1;
static int fd;
+static struct drm_i915_gem_context_param_sseu device_sseu = {
+ .slice_mask = -1 /* Force read on first use. */
+};
#define SWAPVCS (1<<0)
#define SEQNO (1<<1)
@@ -224,6 +261,8 @@ static int fd;
#define HEARTBEAT (1<<7)
#define GLOBAL_BALANCE (1<<8)
#define DEPSYNC (1<<9)
+#define I915 (1<<10)
+#define SSEU (1<<11)
#define SEQNO_IDX(engine) ((engine) * 16)
#define SEQNO_OFFSET(engine) (SEQNO_IDX(engine) * sizeof(uint32_t))
@@ -232,6 +271,7 @@ static int fd;
#define REG(x) (volatile uint32_t *)((volatile char *)igt_global_mmio + x)
static const char *ring_str_map[NUM_ENGINES] = {
+ [DEFAULT] = "DEFAULT",
[RCS] = "RCS",
[BCS] = "BCS",
[VCS] = "VCS",
@@ -252,17 +292,23 @@ parse_dependencies(unsigned int nr_steps, struct w_step *w, char *_desc)
w->data_deps.list == w->fence_deps.list);
while ((token = strtok_r(tstart, "/", &tctx)) != NULL) {
+ bool submit_fence = false;
char *str = token;
struct deps *deps;
int dep;
tstart = NULL;
- if (strlen(token) > 1 && token[0] == 'f') {
+ if (str[0] == '-' || (str[0] >= '0' && str[0] <= '9')) {
+ deps = &w->data_deps;
+ } else {
+ if (str[0] == 's')
+ submit_fence = true;
+ else if (str[0] != 'f')
+ return -1;
+
deps = &w->fence_deps;
str++;
- } else {
- deps = &w->data_deps;
}
dep = atoi(str);
@@ -280,6 +326,7 @@ parse_dependencies(unsigned int nr_steps, struct w_step *w, char *_desc)
sizeof(*deps->list) * deps->nr);
igt_assert(deps->list);
deps->list[deps->nr - 1] = dep;
+ deps->submit_fence = submit_fence;
}
}
@@ -288,6 +335,343 @@ parse_dependencies(unsigned int nr_steps, struct w_step *w, char *_desc)
return 0;
}
+static void __attribute__((format(printf, 1, 2)))
+wsim_err(const char *fmt, ...)
+{
+ va_list ap;
+
+ if (!verbose)
+ return;
+
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+
+#define check_arg(cond, fmt, ...) \
+{ \
+ if (cond) { \
+ wsim_err(fmt, __VA_ARGS__); \
+ return NULL; \
+ } \
+}
+
+static int str_to_engine(const char *str)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ring_str_map); i++) {
+ if (!strcasecmp(str, ring_str_map[i]))
+ return i;
+ }
+
+ return -1;
+}
+
+static bool __engines_queried;
+static unsigned int __num_engines;
+static struct i915_engine_class_instance *__engines;
+
+static int
+__i915_query(int i915, struct drm_i915_query *q)
+{
+ if (igt_ioctl(i915, DRM_IOCTL_I915_QUERY, q))
+ return -errno;
+ return 0;
+}
+
+static int
+__i915_query_items(int i915, struct drm_i915_query_item *items, uint32_t n_items)
+{
+ struct drm_i915_query q = {
+ .num_items = n_items,
+ .items_ptr = to_user_pointer(items),
+ };
+ return __i915_query(i915, &q);
+}
+
+static void
+i915_query_items(int i915, struct drm_i915_query_item *items, uint32_t n_items)
+{
+ igt_assert_eq(__i915_query_items(i915, items, n_items), 0);
+}
+
+static bool has_engine_query(int i915)
+{
+ struct drm_i915_query_item item = {
+ .query_id = DRM_I915_QUERY_ENGINE_INFO,
+ };
+
+ return __i915_query_items(i915, &item, 1) == 0 && item.length > 0;
+}
+
+static void query_engines(void)
+{
+ struct i915_engine_class_instance *engines;
+ unsigned int num;
+
+ if (__engines_queried)
+ return;
+
+ __engines_queried = true;
+
+ if (!has_engine_query(fd)) {
+ unsigned int num_bsd = gem_has_bsd(fd) + gem_has_bsd2(fd);
+ unsigned int i = 0;
+
+ igt_assert(num_bsd);
+
+ num = 1 + num_bsd;
+
+ if (gem_has_blt(fd))
+ num++;
+
+ if (gem_has_vebox(fd))
+ num++;
+
+ engines = calloc(num,
+ sizeof(struct i915_engine_class_instance));
+ igt_assert(engines);
+
+ engines[i].engine_class = I915_ENGINE_CLASS_RENDER;
+ engines[i].engine_instance = 0;
+ i++;
+
+ if (gem_has_blt(fd)) {
+ engines[i].engine_class = I915_ENGINE_CLASS_COPY;
+ engines[i].engine_instance = 0;
+ i++;
+ }
+
+ if (gem_has_bsd(fd)) {
+ engines[i].engine_class = I915_ENGINE_CLASS_VIDEO;
+ engines[i].engine_instance = 0;
+ i++;
+ }
+
+ if (gem_has_bsd2(fd)) {
+ engines[i].engine_class = I915_ENGINE_CLASS_VIDEO;
+ engines[i].engine_instance = 1;
+ i++;
+ }
+
+ if (gem_has_vebox(fd)) {
+ engines[i].engine_class =
+ I915_ENGINE_CLASS_VIDEO_ENHANCE;
+ engines[i].engine_instance = 0;
+ i++;
+ }
+ } else {
+ struct drm_i915_query_engine_info *engine_info;
+ struct drm_i915_query_item item = {
+ .query_id = DRM_I915_QUERY_ENGINE_INFO,
+ };
+ const unsigned int sz = 4096;
+ unsigned int i;
+
+ engine_info = malloc(sz);
+ igt_assert(engine_info);
+ memset(engine_info, 0, sz);
+
+ item.data_ptr = to_user_pointer(engine_info);
+ item.length = sz;
+
+ i915_query_items(fd, &item, 1);
+ igt_assert(item.length > 0);
+ igt_assert(item.length <= sz);
+
+ num = engine_info->num_engines;
+
+ engines = calloc(num,
+ sizeof(struct i915_engine_class_instance));
+ igt_assert(engines);
+
+ for (i = 0; i < num; i++) {
+ struct drm_i915_engine_info *engine =
+ (struct drm_i915_engine_info *)&engine_info->engines[i];
+
+ engines[i] = engine->engine;
+ }
+ }
+
+ __engines = engines;
+ __num_engines = num;
+}
+
+static unsigned int num_engines_in_class(enum intel_engine_id class)
+{
+ unsigned int i, count = 0;
+
+ igt_assert(class == VCS);
+
+ query_engines();
+
+ for (i = 0; i < __num_engines; i++) {
+ if (__engines[i].engine_class == I915_ENGINE_CLASS_VIDEO)
+ count++;
+ }
+
+ igt_assert(count);
+ return count;
+}
+
+static void
+fill_engines_class(struct i915_engine_class_instance *ci,
+ enum intel_engine_id class)
+{
+ unsigned int i, j = 0;
+
+ igt_assert(class == VCS);
+
+ query_engines();
+
+ for (i = 0; i < __num_engines; i++) {
+ if (__engines[i].engine_class != I915_ENGINE_CLASS_VIDEO)
+ continue;
+
+ ci[j].engine_class = __engines[i].engine_class;
+ ci[j].engine_instance = __engines[i].engine_instance;
+ j++;
+ }
+}
+
+static void
+fill_engines_id_class(enum intel_engine_id *list,
+ enum intel_engine_id class)
+{
+ enum intel_engine_id engine = VCS1;
+ unsigned int i, j = 0;
+
+ igt_assert(class == VCS);
+ igt_assert(num_engines_in_class(VCS) <= 2);
+
+ query_engines();
+
+ for (i = 0; i < __num_engines; i++) {
+ if (__engines[i].engine_class != I915_ENGINE_CLASS_VIDEO)
+ continue;
+
+ list[j++] = engine++;
+ }
+}
+
+static unsigned int
+find_physical_instance(enum intel_engine_id class, unsigned int logical)
+{
+ unsigned int i, j = 0;
+
+ igt_assert(class == VCS);
+
+ for (i = 0; i < __num_engines; i++) {
+ if (__engines[i].engine_class != I915_ENGINE_CLASS_VIDEO)
+ continue;
+
+ /* Map logical to physical instances. */
+ if (logical == j++)
+ return __engines[i].engine_instance;
+ }
+
+ igt_assert(0);
+ return 0;
+}
+
+static struct i915_engine_class_instance
+get_engine(enum intel_engine_id engine)
+{
+ struct i915_engine_class_instance ci;
+
+ query_engines();
+
+ switch (engine) {
+ case RCS:
+ ci.engine_class = I915_ENGINE_CLASS_RENDER;
+ ci.engine_instance = 0;
+ break;
+ case BCS:
+ ci.engine_class = I915_ENGINE_CLASS_COPY;
+ ci.engine_instance = 0;
+ break;
+ case VCS1:
+ case VCS2:
+ ci.engine_class = I915_ENGINE_CLASS_VIDEO;
+ ci.engine_instance = find_physical_instance(VCS, engine - VCS1);
+ break;
+ case VECS:
+ ci.engine_class = I915_ENGINE_CLASS_VIDEO_ENHANCE;
+ ci.engine_instance = 0;
+ break;
+ default:
+ igt_assert(0);
+ };
+
+ return ci;
+}
+
+static int parse_engine_map(struct w_step *step, const char *_str)
+{
+ char *token, *tctx = NULL, *tstart = (char *)_str;
+
+ while ((token = strtok_r(tstart, "|", &tctx))) {
+ enum intel_engine_id engine;
+ unsigned int add;
+
+ tstart = NULL;
+
+ if (!strcmp(token, "DEFAULT"))
+ return -1;
+
+ engine = str_to_engine(token);
+ if ((int)engine < 0)
+ return -1;
+
+ if (engine != VCS && engine != VCS1 && engine != VCS2 &&
+ engine != RCS)
+ return -1; /* TODO */
+
+ add = engine == VCS ? num_engines_in_class(VCS) : 1;
+ step->engine_map_count += add;
+ step->engine_map = realloc(step->engine_map,
+ step->engine_map_count *
+ sizeof(step->engine_map[0]));
+
+ if (engine != VCS)
+ step->engine_map[step->engine_map_count - add] = engine;
+ else
+ fill_engines_id_class(&step->engine_map[step->engine_map_count - add], VCS);
+ }
+
+ return 0;
+}
+
+static uint64_t engine_list_mask(const char *_str)
+{
+ uint64_t mask = 0;
+
+ char *token, *tctx = NULL, *tstart = (char *)_str;
+
+ while ((token = strtok_r(tstart, "|", &tctx))) {
+ enum intel_engine_id engine = str_to_engine(token);
+
+ if ((int)engine < 0 || engine == DEFAULT || engine == VCS)
+ return 0;
+
+ mask |= 1 << engine;
+
+ tstart = NULL;
+ }
+
+ return mask;
+}
+
+#define int_field(_STEP_, _FIELD_, _COND_, _ERR_) \
+ if ((field = strtok_r(fstart, ".", &fctx))) { \
+ tmp = atoi(field); \
+ check_arg(_COND_, _ERR_, nr_steps); \
+ step.type = _STEP_; \
+ step._FIELD_ = tmp; \
+ goto add_step; \
+ } \
+
static struct workload *
parse_workload(struct w_arg *arg, unsigned int flags, struct workload *app_w)
{
@@ -303,7 +687,7 @@ parse_workload(struct w_arg *arg, unsigned int flags, struct workload *app_w)
igt_assert(desc);
- while ((_token = strtok_r(tstart, ",", &tctx)) != NULL) {
+ while ((_token = strtok_r(tstart, ",", &tctx))) {
tstart = NULL;
token = strdup(_token);
igt_assert(token);
@@ -311,65 +695,30 @@ parse_workload(struct w_arg *arg, unsigned int flags, struct workload *app_w)
valid = 0;
memset(&step, 0, sizeof(step));
- if ((field = strtok_r(fstart, ".", &fctx)) != NULL) {
+ if ((field = strtok_r(fstart, ".", &fctx))) {
fstart = NULL;
if (!strcmp(field, "d")) {
- if ((field = strtok_r(fstart, ".", &fctx)) !=
- NULL) {
- tmp = atoi(field);
- if (tmp <= 0) {
- if (verbose)
- fprintf(stderr,
- "Invalid delay at step %u!\n",
- nr_steps);
- return NULL;
- }
-
- step.type = DELAY;
- step.delay = tmp;
- goto add_step;
- }
+ int_field(DELAY, delay, tmp <= 0,
+ "Invalid delay at step %u!\n");
} else if (!strcmp(field, "p")) {
- if ((field = strtok_r(fstart, ".", &fctx)) !=
- NULL) {
- tmp = atoi(field);
- if (tmp <= 0) {
- if (verbose)
- fprintf(stderr,
- "Invalid period at step %u!\n",
- nr_steps);
- return NULL;
- }
-
- step.type = PERIOD;
- step.period = tmp;
- goto add_step;
- }
+ int_field(PERIOD, period, tmp <= 0,
+ "Invalid period at step %u!\n");
} else if (!strcmp(field, "P")) {
unsigned int nr = 0;
- while ((field = strtok_r(fstart, ".", &fctx)) !=
- NULL) {
+ while ((field = strtok_r(fstart, ".", &fctx))) {
tmp = atoi(field);
- if (tmp <= 0 && nr == 0) {
- if (verbose)
- fprintf(stderr,
- "Invalid context at step %u!\n",
- nr_steps);
- return NULL;
- }
-
- if (nr == 0) {
+ check_arg(nr == 0 && tmp <= 0,
+ "Invalid context at step %u!\n",
+ nr_steps);
+ check_arg(nr > 1,
+ "Invalid priority format at step %u!\n",
+ nr_steps);
+
+ if (nr == 0)
step.context = tmp;
- } else if (nr == 1) {
+ else
step.priority = tmp;
- } else {
- if (verbose)
- fprintf(stderr,
- "Invalid priority format at step %u!\n",
- nr_steps);
- return NULL;
- }
nr++;
}
@@ -377,108 +726,150 @@ parse_workload(struct w_arg *arg, unsigned int flags, struct workload *app_w)
step.type = CTX_PRIORITY;
goto add_step;
} else if (!strcmp(field, "s")) {
- if ((field = strtok_r(fstart, ".", &fctx)) !=
- NULL) {
+ int_field(SYNC, target,
+ tmp >= 0 || ((int)nr_steps + tmp) < 0,
+ "Invalid sync target at step %u!\n");
+ } else if (!strcmp(field, "S")) {
+ unsigned int nr = 0;
+ while ((field = strtok_r(fstart, ".", &fctx))) {
tmp = atoi(field);
- if (tmp >= 0 ||
- ((int)nr_steps + tmp) < 0) {
- if (verbose)
- fprintf(stderr,
- "Invalid sync target at step %u!\n",
- nr_steps);
- return NULL;
- }
+ check_arg(tmp <= 0 && nr == 0,
+ "Invalid context at step %u!\n",
+ nr_steps);
+ check_arg(nr > 1,
+ "Invalid SSEU format at step %u!\n",
+ nr_steps);
+
+ if (nr == 0)
+ step.context = tmp;
+ else if (nr == 1)
+ step.sseu = tmp;
- step.type = SYNC;
- step.target = tmp;
- goto add_step;
+ nr++;
}
+
+ step.type = SSEU;
+ goto add_step;
} else if (!strcmp(field, "t")) {
- if ((field = strtok_r(fstart, ".", &fctx)) !=
- NULL) {
+ int_field(THROTTLE, throttle,
+ tmp < 0,
+ "Invalid throttle at step %u!\n");
+ } else if (!strcmp(field, "q")) {
+ int_field(QD_THROTTLE, throttle,
+ tmp < 0,
+ "Invalid qd throttle at step %u!\n");
+ } else if (!strcmp(field, "a")) {
+ int_field(SW_FENCE_SIGNAL, target,
+ tmp >= 0,
+ "Invalid sw fence signal at step %u!\n");
+ } else if (!strcmp(field, "f")) {
+ step.type = SW_FENCE;
+ goto add_step;
+ } else if (!strcmp(field, "M")) {
+ unsigned int nr = 0;
+ while ((field = strtok_r(fstart, ".", &fctx))) {
tmp = atoi(field);
- if (tmp < 0) {
- if (verbose)
- fprintf(stderr,
- "Invalid throttle at step %u!\n",
- nr_steps);
- return NULL;
+ check_arg(nr == 0 && tmp <= 0,
+ "Invalid context at step %u!\n",
+ nr_steps);
+ check_arg(nr > 1,
+ "Invalid engine map format at step %u!\n",
+ nr_steps);
+
+ if (nr == 0) {
+ step.context = tmp;
+ } else {
+ tmp = parse_engine_map(&step,
+ field);
+ check_arg(tmp < 0,
+ "Invalid engine map list at step %u!\n",
+ nr_steps);
}
- step.type = THROTTLE;
- step.throttle = tmp;
- goto add_step;
+ nr++;
}
- } else if (!strcmp(field, "q")) {
- if ((field = strtok_r(fstart, ".", &fctx)) !=
- NULL) {
+
+ step.type = ENGINE_MAP;
+ goto add_step;
+ } else if (!strcmp(field, "T")) {
+ int_field(TERMINATE, target,
+ tmp >= 0 || ((int)nr_steps + tmp) < 0,
+ "Invalid terminate target at step %u!\n");
+ } else if (!strcmp(field, "X")) {
+ unsigned int nr = 0;
+ while ((field = strtok_r(fstart, ".", &fctx))) {
tmp = atoi(field);
- if (tmp < 0) {
- if (verbose)
- fprintf(stderr,
- "Invalid qd throttle at step %u!\n",
- nr_steps);
- return NULL;
- }
+ check_arg(nr == 0 && tmp <= 0,
+ "Invalid context at step %u!\n",
+ nr_steps);
+ check_arg(nr == 1 && tmp < 0,
+ "Invalid preemption period at step %u!\n",
+ nr_steps);
+ check_arg(nr > 1,
+ "Invalid preemption format at step %u!\n",
+ nr_steps);
+
+ if (nr == 0)
+ step.context = tmp;
+ else
+ step.period = tmp;
- step.type = QD_THROTTLE;
- step.throttle = tmp;
- goto add_step;
+ nr++;
}
- } else if (!strcmp(field, "a")) {
- if ((field = strtok_r(fstart, ".", &fctx)) !=
- NULL) {
+
+ step.type = PREEMPTION;
+ goto add_step;
+ } else if (!strcmp(field, "B")) {
+ unsigned int nr = 0;
+ while ((field = strtok_r(fstart, ".", &fctx))) {
tmp = atoi(field);
- if (tmp >= 0) {
- if (verbose)
- fprintf(stderr,
- "Invalid sw fence signal at step %u!\n",
- nr_steps);
- return NULL;
- }
+ check_arg(nr == 0 && tmp <= 0,
+ "Invalid context at step %u!\n",
+ nr_steps);
+ check_arg(nr > 0,
+ "Invalid load balance format at step %u!\n",
+ nr_steps);
+
+ step.context = tmp;
+ step.load_balance = true;
- step.type = SW_FENCE_SIGNAL;
- step.target = tmp;
- goto add_step;
+ nr++;
}
- } else if (!strcmp(field, "f")) {
- step.type = SW_FENCE;
+
+ step.type = LOAD_BALANCE;
goto add_step;
- } else if (!strcmp(field, "X")) {
+ } else if (!strcmp(field, "b")) {
unsigned int nr = 0;
- while ((field = strtok_r(fstart, ".", &fctx)) !=
- NULL) {
- tmp = atoi(field);
- if (tmp <= 0 && nr == 0) {
- if (verbose)
- fprintf(stderr,
- "Invalid context at step %u!\n",
- nr_steps);
- return NULL;
- } else if (tmp < 0 && nr == 1) {
- if (verbose)
- fprintf(stderr,
- "Invalid preemption period at step %u!\n",
- nr_steps);
- return NULL;
- }
+ while ((field = strtok_r(fstart, ".", &fctx))) {
+ check_arg(nr > 2,
+ "Invalid bond format at step %u!\n",
+ nr_steps);
if (nr == 0) {
+ tmp = atoi(field);
step.context = tmp;
+ check_arg(tmp <= 0,
+ "Invalid context at step %u!\n",
+ nr_steps);
} else if (nr == 1) {
- step.period = tmp;
- } else {
- if (verbose)
- fprintf(stderr,
- "Invalid preemption format at step %u!\n",
- nr_steps);
- return NULL;
+ step.bond_mask = engine_list_mask(field);
+ check_arg(step.bond_mask == 0,
+ "Invalid siblings list at step %u!\n",
+ nr_steps);
+ } else if (nr == 2) {
+ tmp = str_to_engine(field);
+ check_arg(tmp <= 0 ||
+ tmp == VCS ||
+ tmp == DEFAULT,
+ "Invalid master engine at step %u!\n",
+ nr_steps);
+ step.bond_master = tmp;
}
nr++;
}
- step.type = PREEMPTION;
+ step.type = BOND;
goto add_step;
}
@@ -491,113 +882,87 @@ parse_workload(struct w_arg *arg, unsigned int flags, struct workload *app_w)
}
tmp = atoi(field);
- if (tmp < 0) {
- if (verbose)
- fprintf(stderr,
- "Invalid ctx id at step %u!\n",
- nr_steps);
- return NULL;
- }
+ check_arg(tmp < 0, "Invalid ctx id at step %u!\n",
+ nr_steps);
step.context = tmp;
valid++;
}
- if ((field = strtok_r(fstart, ".", &fctx)) != NULL) {
- unsigned int old_valid = valid;
-
+ if ((field = strtok_r(fstart, ".", &fctx))) {
fstart = NULL;
- for (i = 0; i < ARRAY_SIZE(ring_str_map); i++) {
- if (!strcasecmp(field, ring_str_map[i])) {
- step.engine = i;
- if (step.engine == BCS)
- bcs_used = true;
- valid++;
- break;
- }
- }
+ i = str_to_engine(field);
+ check_arg(i < 0,
+ "Invalid engine id at step %u!\n", nr_steps);
- if (old_valid == valid) {
- if (verbose)
- fprintf(stderr,
- "Invalid engine id at step %u!\n",
- nr_steps);
- return NULL;
- }
+ valid++;
+
+ step.engine = i;
+
+ if (step.engine == BCS)
+ bcs_used = true;
}
- if ((field = strtok_r(fstart, ".", &fctx)) != NULL) {
+ if ((field = strtok_r(fstart, ".", &fctx))) {
char *sep = NULL;
long int tmpl;
fstart = NULL;
- tmpl = strtol(field, &sep, 10);
- if (tmpl <= 0 || tmpl == LONG_MIN || tmpl == LONG_MAX) {
- if (verbose)
- fprintf(stderr,
- "Invalid duration at step %u!\n",
+ if (field[0] == '*') {
+ check_arg(intel_gen(intel_get_drm_devid(fd)) < 8,
+ "Infinite batch at step %u needs Gen8+!\n",
+ nr_steps);
+ step.unbound_duration = true;
+ } else {
+ tmpl = strtol(field, &sep, 10);
+ check_arg(tmpl <= 0 || tmpl == LONG_MIN ||
+ tmpl == LONG_MAX,
+ "Invalid duration at step %u!\n",
+ nr_steps);
+ step.duration.min = tmpl;
+
+ if (sep && *sep == '-') {
+ tmpl = strtol(sep + 1, NULL, 10);
+ check_arg(tmpl <= 0 ||
+ tmpl <= step.duration.min ||
+ tmpl == LONG_MIN ||
+ tmpl == LONG_MAX,
+ "Invalid duration range at step %u!\n",
nr_steps);
- return NULL;
- }
- step.duration.min = tmpl;
-
- if (sep && *sep == '-') {
- tmpl = strtol(sep + 1, NULL, 10);
- if (tmpl <= 0 || tmpl <= step.duration.min ||
- tmpl == LONG_MIN || tmpl == LONG_MAX) {
- if (verbose)
- fprintf(stderr,
- "Invalid duration range at step %u!\n",
- nr_steps);
- return NULL;
+ step.duration.max = tmpl;
+ } else {
+ step.duration.max = step.duration.min;
}
- step.duration.max = tmpl;
- } else {
- step.duration.max = step.duration.min;
}
valid++;
}
- if ((field = strtok_r(fstart, ".", &fctx)) != NULL) {
+ if ((field = strtok_r(fstart, ".", &fctx))) {
fstart = NULL;
tmp = parse_dependencies(nr_steps, &step, field);
- if (tmp < 0) {
- if (verbose)
- fprintf(stderr,
- "Invalid dependency at step %u!\n",
- nr_steps);
- return NULL;
- }
+ check_arg(tmp < 0,
+ "Invalid dependency at step %u!\n", nr_steps);
valid++;
}
- if ((field = strtok_r(fstart, ".", &fctx)) != NULL) {
+ if ((field = strtok_r(fstart, ".", &fctx))) {
fstart = NULL;
- if (strlen(field) != 1 ||
- (field[0] != '0' && field[0] != '1')) {
- if (verbose)
- fprintf(stderr,
- "Invalid wait boolean at step %u!\n",
- nr_steps);
- return NULL;
- }
+ check_arg(strlen(field) != 1 ||
+ (field[0] != '0' && field[0] != '1'),
+ "Invalid wait boolean at step %u!\n",
+ nr_steps);
step.sync = field[0] - '0';
valid++;
}
- if (valid != 5) {
- if (verbose)
- fprintf(stderr, "Invalid record at step %u!\n",
- nr_steps);
- return NULL;
- }
+ check_arg(valid != 5, "Invalid record at step %u!\n", nr_steps);
step.type = BATCH;
@@ -632,6 +997,7 @@ add_step:
wrk->nr_steps = nr_steps;
wrk->steps = steps;
wrk->prio = arg->prio;
+ wrk->sseu = arg->sseu;
free(desc);
@@ -642,15 +1008,10 @@ add_step:
for (i = 0; i < nr_steps; i++) {
for (j = 0; j < steps[i].fence_deps.nr; j++) {
tmp = steps[i].idx + steps[i].fence_deps.list[j];
- if (tmp < 0 || tmp >= i ||
- (steps[tmp].type != BATCH &&
- steps[tmp].type != SW_FENCE)) {
- if (verbose)
- fprintf(stderr,
- "Invalid dependency target %u!\n",
- i);
- return NULL;
- }
+ check_arg(tmp < 0 || tmp >= i ||
+ (steps[tmp].type != BATCH &&
+ steps[tmp].type != SW_FENCE),
+ "Invalid dependency target %u!\n", i);
steps[tmp].emit_fence = -1;
}
}
@@ -659,14 +1020,9 @@ add_step:
for (i = 0; i < nr_steps; i++) {
if (steps[i].type == SW_FENCE_SIGNAL) {
tmp = steps[i].idx + steps[i].target;
- if (tmp < 0 || tmp >= i ||
- steps[tmp].type != SW_FENCE) {
- if (verbose)
- fprintf(stderr,
- "Invalid sw fence target %u!\n",
- i);
- return NULL;
- }
+ check_arg(tmp < 0 || tmp >= i ||
+ steps[tmp].type != SW_FENCE,
+ "Invalid sw fence target %u!\n", i);
}
}
@@ -687,6 +1043,7 @@ clone_workload(struct workload *_wrk)
memset(wrk, 0, sizeof(*wrk));
wrk->prio = _wrk->prio;
+ wrk->sseu = _wrk->sseu;
wrk->nr_steps = _wrk->nr_steps;
wrk->steps = calloc(wrk->nr_steps, sizeof(struct w_step));
igt_assert(wrk->steps);
@@ -713,14 +1070,14 @@ clone_workload(struct workload *_wrk)
#define PAGE_SIZE (4096)
#endif
-static unsigned int get_duration(struct w_step *w)
+static unsigned int get_duration(struct workload *wrk, struct w_step *w)
{
struct duration *dur = &w->duration;
if (dur->min == dur->max)
return dur->min;
else
- return dur->min + hars_petruska_f54_1_random_unsafe() %
+ return dur->min + hars_petruska_f54_1_random(&wrk->bb_prng) %
(dur->max + 1 - dur->min);
}
@@ -739,7 +1096,7 @@ init_bb(struct w_step *w, unsigned int flags)
unsigned int i;
uint32_t *ptr;
- if (!arb_period)
+ if (w->unbound_duration || !arb_period)
return;
gem_set_domain(fd, w->bb_handle,
@@ -753,12 +1110,13 @@ init_bb(struct w_step *w, unsigned int flags)
munmap(ptr, mmap_len);
}
-static void
+static unsigned int
terminate_bb(struct w_step *w, unsigned int flags)
{
const uint32_t bbe = 0xa << 23;
unsigned long mmap_start, mmap_len;
unsigned long batch_start = w->bb_sz;
+ unsigned int r = 0;
uint32_t *ptr, *cs;
igt_assert(((flags & RT) && (flags & SEQNO)) || !(flags & RT));
@@ -769,6 +1127,9 @@ terminate_bb(struct w_step *w, unsigned int flags)
if (flags & RT)
batch_start -= 12 * sizeof(uint32_t);
+ if (w->unbound_duration)
+ batch_start -= 4 * sizeof(uint32_t); /* MI_ARB_CHK + MI_BATCH_BUFFER_START */
+
mmap_start = rounddown(batch_start, PAGE_SIZE);
mmap_len = ALIGN(w->bb_sz - mmap_start, PAGE_SIZE);
@@ -778,8 +1139,19 @@ terminate_bb(struct w_step *w, unsigned int flags)
ptr = gem_mmap__wc(fd, w->bb_handle, mmap_start, mmap_len, PROT_WRITE);
cs = (uint32_t *)((char *)ptr + batch_start - mmap_start);
+ if (w->unbound_duration) {
+ w->reloc[r++].offset = batch_start + 2 * sizeof(uint32_t);
+ batch_start += 4 * sizeof(uint32_t);
+
+ *cs++ = w->preempt_us ? 0x5 << 23 /* MI_ARB_CHK; */ : MI_NOOP;
+ w->recursive_bb_start = cs;
+ *cs++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ *cs++ = 0;
+ *cs++ = 0;
+ }
+
if (flags & SEQNO) {
- w->reloc[0].offset = batch_start + sizeof(uint32_t);
+ w->reloc[r++].offset = batch_start + sizeof(uint32_t);
batch_start += 4 * sizeof(uint32_t);
*cs++ = MI_STORE_DWORD_IMM;
@@ -791,7 +1163,7 @@ terminate_bb(struct w_step *w, unsigned int flags)
}
if (flags & RT) {
- w->reloc[1].offset = batch_start + sizeof(uint32_t);
+ w->reloc[r++].offset = batch_start + sizeof(uint32_t);
batch_start += 4 * sizeof(uint32_t);
*cs++ = MI_STORE_DWORD_IMM;
@@ -801,7 +1173,7 @@ terminate_bb(struct w_step *w, unsigned int flags)
w->rt0_value = cs;
*cs++ = 0;
- w->reloc[2].offset = batch_start + 2 * sizeof(uint32_t);
+ w->reloc[r++].offset = batch_start + 2 * sizeof(uint32_t);
batch_start += 4 * sizeof(uint32_t);
*cs++ = 0x24 << 23 | 2; /* MI_STORE_REG_MEM */
@@ -810,7 +1182,7 @@ terminate_bb(struct w_step *w, unsigned int flags)
*cs++ = 0;
*cs++ = 0;
- w->reloc[3].offset = batch_start + sizeof(uint32_t);
+ w->reloc[r++].offset = batch_start + sizeof(uint32_t);
batch_start += 4 * sizeof(uint32_t);
*cs++ = MI_STORE_DWORD_IMM;
@@ -822,9 +1194,12 @@ terminate_bb(struct w_step *w, unsigned int flags)
}
*cs = bbe;
+
+ return r;
}
static const unsigned int eb_engine_map[NUM_ENGINES] = {
+ [DEFAULT] = I915_EXEC_DEFAULT,
[RCS] = I915_EXEC_RENDER,
[BCS] = I915_EXEC_BLT,
[VCS] = I915_EXEC_BSD,
@@ -841,21 +1216,49 @@ eb_set_engine(struct drm_i915_gem_execbuffer2 *eb,
if (engine == VCS2 && (flags & VCS2REMAP))
engine = BCS;
- eb->flags = eb_engine_map[engine];
+ if ((flags & I915) && engine == VCS)
+ eb->flags = 0;
+ else
+ eb->flags = eb_engine_map[engine];
+}
+
+static unsigned int
+find_engine_in_map(struct ctx *ctx, enum intel_engine_id engine)
+{
+ unsigned int i;
+
+ for (i = 0; i < ctx->engine_map_count; i++) {
+ if (ctx->engine_map[i] == engine)
+ return i + 1;
+ }
+
+ igt_assert(ctx->wants_balance);
+ return 0;
+}
+
+static struct ctx *
+__get_ctx(struct workload *wrk, struct w_step *w)
+{
+ return &wrk->ctx_list[w->context * 2];
}
static void
-eb_update_flags(struct w_step *w, enum intel_engine_id engine,
- unsigned int flags)
+eb_update_flags(struct workload *wrk, struct w_step *w,
+ enum intel_engine_id engine, unsigned int flags)
{
- eb_set_engine(&w->eb, engine, flags);
+ struct ctx *ctx = __get_ctx(wrk, w);
+
+ if (ctx->engine_map)
+ w->eb.flags = find_engine_in_map(ctx, engine);
+ else
+ eb_set_engine(&w->eb, engine, flags);
w->eb.flags |= I915_EXEC_HANDLE_LUT;
w->eb.flags |= I915_EXEC_NO_RELOC;
igt_assert(w->emit_fence <= 0);
if (w->emit_fence)
- w->eb.flags |= LOCAL_I915_EXEC_FENCE_OUT;
+ w->eb.flags |= I915_EXEC_FENCE_OUT;
}
static struct drm_i915_gem_exec_object2 *
@@ -867,6 +1270,17 @@ get_status_objects(struct workload *wrk)
return wrk->status_object;
}
+static uint32_t
+get_ctxid(struct workload *wrk, struct w_step *w)
+{
+ struct ctx *ctx = __get_ctx(wrk, w);
+
+ if (ctx->targets_instance && ctx->wants_balance && w->engine == VCS)
+ return wrk->ctx_list[w->context * 2 + 1].id;
+ else
+ return wrk->ctx_list[w->context * 2].id;
+}
+
static void
alloc_step_batch(struct workload *wrk, struct w_step *w, unsigned int flags)
{
@@ -902,54 +1316,168 @@ alloc_step_batch(struct workload *wrk, struct w_step *w, unsigned int flags)
}
}
- w->bb_sz = get_bb_sz(w->duration.max);
- w->bb_handle = w->obj[j].handle = gem_create(fd, w->bb_sz);
+ if (w->unbound_duration)
+ /* nops + MI_ARB_CHK + MI_BATCH_BUFFER_START */
+ w->bb_sz = max(PAGE_SIZE, get_bb_sz(w->preempt_us)) +
+ (1 + 3) * sizeof(uint32_t);
+ else
+ w->bb_sz = get_bb_sz(w->duration.max);
+ w->bb_handle = w->obj[j].handle = gem_create(fd, w->bb_sz + (w->unbound_duration ? 4096 : 0));
init_bb(w, flags);
- terminate_bb(w, flags);
+ w->obj[j].relocation_count = terminate_bb(w, flags);
- if (flags & SEQNO) {
+ if (w->obj[j].relocation_count) {
w->obj[j].relocs_ptr = to_user_pointer(&w->reloc);
- if (flags & RT)
- w->obj[j].relocation_count = 4;
- else
- w->obj[j].relocation_count = 1;
for (i = 0; i < w->obj[j].relocation_count; i++)
w->reloc[i].target_handle = 1;
+ if (w->unbound_duration)
+ w->reloc[0].target_handle = j;
}
w->eb.buffers_ptr = to_user_pointer(w->obj);
w->eb.buffer_count = j + 1;
- w->eb.rsvd1 = wrk->ctx_list[w->context].id;
+ w->eb.rsvd1 = get_ctxid(wrk, w);
if (flags & SWAPVCS && engine == VCS1)
engine = VCS2;
else if (flags & SWAPVCS && engine == VCS2)
engine = VCS1;
- eb_update_flags(w, engine, flags);
+ eb_update_flags(wrk, w, engine, flags);
#ifdef DEBUG
printf("%u: %u:|", w->idx, w->eb.buffer_count);
for (i = 0; i <= j; i++)
printf("%x|", w->obj[i].handle);
printf(" %10lu flags=%llx bb=%x[%u] ctx[%u]=%u\n",
w->bb_sz, w->eb.flags, w->bb_handle, j, w->context,
- wrk->ctx_list[w->context].id);
+ get_ctxid(wrk, w));
#endif
}
-static void
+static void __ctx_set_prio(uint32_t ctx_id, unsigned int prio)
+{
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = ctx_id,
+ .param = I915_CONTEXT_PARAM_PRIORITY,
+ .value = prio,
+ };
+
+ if (prio)
+ gem_context_set_param(fd, &param);
+}
+
+static int __vm_destroy(int i915, uint32_t vm_id)
+{
+ struct drm_i915_gem_vm_control ctl = { .vm_id = vm_id };
+ int err = 0;
+
+ if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_VM_DESTROY, &ctl)) {
+ err = -errno;
+ igt_assume(err);
+ }
+
+ errno = 0;
+ return err;
+}
+
+static void vm_destroy(int i915, uint32_t vm_id)
+{
+ igt_assert_eq(__vm_destroy(i915, vm_id), 0);
+}
+
+static unsigned int
+find_engine(struct i915_engine_class_instance *ci, unsigned int count,
+ enum intel_engine_id engine)
+{
+ struct i915_engine_class_instance e = get_engine(engine);
+ unsigned int i;
+
+ for (i = 0; i < count; i++, ci++) {
+ if (!memcmp(&e, ci, sizeof(*ci)))
+ return i;
+ }
+
+ igt_assert(0);
+ return 0;
+}
+
+static struct drm_i915_gem_context_param_sseu get_device_sseu(void)
+{
+ struct drm_i915_gem_context_param param = { };
+
+ if (device_sseu.slice_mask == -1) {
+ param.param = I915_CONTEXT_PARAM_SSEU;
+ param.value = (uintptr_t)&device_sseu;
+
+ gem_context_get_param(fd, &param);
+ }
+
+ return device_sseu;
+}
+
+static uint64_t
+set_ctx_sseu(struct ctx *ctx, uint64_t slice_mask)
+{
+ struct drm_i915_gem_context_param_sseu sseu = get_device_sseu();
+ struct drm_i915_gem_context_param param = { };
+
+ if (slice_mask == -1)
+ slice_mask = device_sseu.slice_mask;
+
+ if (ctx->engine_map && ctx->wants_balance) {
+ sseu.flags = I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX;
+ sseu.engine.engine_class = I915_ENGINE_CLASS_INVALID;
+ sseu.engine.engine_instance = 0;
+ }
+
+ sseu.slice_mask = slice_mask;
+
+ param.ctx_id = ctx->id;
+ param.param = I915_CONTEXT_PARAM_SSEU;
+ param.size = sizeof(sseu);
+ param.value = (uintptr_t)&sseu;
+
+ gem_context_set_param(fd, &param);
+
+ return slice_mask;
+}
+
+static size_t sizeof_load_balance(int count)
+{
+ return offsetof(struct i915_context_engines_load_balance,
+ engines[count]);
+}
+
+static size_t sizeof_param_engines(int count)
+{
+ return offsetof(struct i915_context_param_engines,
+ engines[count]);
+}
+
+static size_t sizeof_engines_bond(int count)
+{
+ return offsetof(struct i915_context_engines_bond,
+ engines[count]);
+}
+
+#define alloca0(sz) ({ size_t sz__ = (sz); memset(alloca(sz__), 0, sz__); })
+
+static int
prepare_workload(unsigned int id, struct workload *wrk, unsigned int flags)
{
- unsigned int ctx_vcs = 0;
+ unsigned int ctx_vcs;
int max_ctx = -1;
struct w_step *w;
- int i;
+ int i, j;
wrk->id = id;
wrk->prng = rand();
+ wrk->bb_prng = (wrk->flags & SYNCEDCLIENTS) ? master_prng : rand();
wrk->run = true;
+ ctx_vcs = 0;
if (flags & INITVCSRR)
- wrk->vcs_rr = id & 1;
+ ctx_vcs = id & 1;
+ wrk->vcs_rr = ctx_vcs;
if (flags & GLOBAL_BALANCE) {
int ret = pthread_mutex_init(&wrk->mutex, NULL);
@@ -973,45 +1501,305 @@ prepare_workload(unsigned int id, struct workload *wrk, unsigned int flags)
}
}
+ /*
+ * Pre-scan workload steps to allocate context list storage.
+ */
for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
- if ((int)w->context > max_ctx) {
- int delta = w->context + 1 - wrk->nr_ctxs;
+ int ctx = w->context * 2 + 1; /* Odd slots are special. */
+ int delta;
- wrk->nr_ctxs += delta;
- wrk->ctx_list = realloc(wrk->ctx_list,
- wrk->nr_ctxs *
- sizeof(*wrk->ctx_list));
- memset(&wrk->ctx_list[wrk->nr_ctxs - delta], 0,
- delta * sizeof(*wrk->ctx_list));
+ if (ctx <= max_ctx)
+ continue;
+
+ delta = ctx + 1 - wrk->nr_ctxs;
- max_ctx = w->context;
+ wrk->nr_ctxs += delta;
+ wrk->ctx_list = realloc(wrk->ctx_list,
+ wrk->nr_ctxs * sizeof(*wrk->ctx_list));
+ memset(&wrk->ctx_list[wrk->nr_ctxs - delta], 0,
+ delta * sizeof(*wrk->ctx_list));
+
+ max_ctx = ctx;
+ }
+
+ /*
+ * Identify if contexts target specific engine instances and if they
+ * want to be balanced.
+ *
+ * Transfer over engine map configuration from the workload step.
+ */
+ for (j = 0; j < wrk->nr_ctxs; j += 2) {
+ struct ctx *ctx = &wrk->ctx_list[j];
+
+ bool targets = false;
+ bool balance = false;
+
+ for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
+ if (w->context != (j / 2))
+ continue;
+
+ if (w->type == BATCH) {
+ if (w->engine == VCS)
+ balance = true;
+ else
+ targets = true;
+ } else if (w->type == ENGINE_MAP) {
+ ctx->engine_map = w->engine_map;
+ ctx->engine_map_count = w->engine_map_count;
+ } else if (w->type == LOAD_BALANCE) {
+ if (!ctx->engine_map) {
+ wsim_err("Load balancing needs an engine map!\n");
+ return 1;
+ }
+ ctx->wants_balance = w->load_balance;
+ } else if (w->type == BOND) {
+ if (!ctx->wants_balance) {
+ wsim_err("Engine bonds need load balancing engine map!\n");
+ return 1;
+ }
+ ctx->bond_count++;
+ ctx->bonds = realloc(ctx->bonds,
+ ctx->bond_count *
+ sizeof(struct bond));
+ igt_assert(ctx->bonds);
+ ctx->bonds[ctx->bond_count - 1].mask =
+ w->bond_mask;
+ ctx->bonds[ctx->bond_count - 1].master =
+ w->bond_master;
+ }
}
- if (!wrk->ctx_list[w->context].id) {
- struct drm_i915_gem_context_create arg = {};
+ wrk->ctx_list[j].targets_instance = targets;
+ if (flags & I915)
+ wrk->ctx_list[j].wants_balance |= balance;
+ }
- drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &arg);
- igt_assert(arg.ctx_id);
+ /*
+ * Ensure VCS is not allowed with engine map contexts.
+ */
+ for (j = 0; j < wrk->nr_ctxs; j += 2) {
+ for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
+ if (w->context != (j / 2))
+ continue;
- wrk->ctx_list[w->context].id = arg.ctx_id;
+ if (w->type != BATCH)
+ continue;
- if (flags & GLOBAL_BALANCE) {
- wrk->ctx_list[w->context].static_vcs = context_vcs_rr;
- context_vcs_rr ^= 1;
- } else {
- wrk->ctx_list[w->context].static_vcs = ctx_vcs;
- ctx_vcs ^= 1;
+ if (wrk->ctx_list[j].engine_map &&
+ !wrk->ctx_list[j].wants_balance &&
+ (w->engine == VCS || w->engine == DEFAULT)) {
+ wsim_err("Batches targetting engine maps must use explicit engines!\n");
+ return -1;
}
+ }
+ }
- if (wrk->prio) {
+
+ /*
+ * Create and configure contexts.
+ */
+ for (i = 0; i < wrk->nr_ctxs; i += 2) {
+ struct ctx *ctx = &wrk->ctx_list[i];
+ uint32_t ctx_id, share_vm = 0;
+
+ if (ctx->id)
+ continue;
+
+ if ((flags & I915) || ctx->engine_map) {
+ struct drm_i915_gem_context_create_ext_setparam ext = {
+ .base.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ .param.param = I915_CONTEXT_PARAM_VM,
+ };
+ struct drm_i915_gem_context_create_ext args = { };
+
+ /* Find existing context to share ppgtt with. */
+ for (j = 0; j < wrk->nr_ctxs; j++) {
struct drm_i915_gem_context_param param = {
- .ctx_id = arg.ctx_id,
- .param = I915_CONTEXT_PARAM_PRIORITY,
- .value = wrk->prio,
+ .param = I915_CONTEXT_PARAM_VM,
};
- gem_context_set_param(fd, &param);
+
+ if (!wrk->ctx_list[j].id)
+ continue;
+
+ param.ctx_id = wrk->ctx_list[j].id;
+
+ gem_context_get_param(fd, &param);
+ igt_assert(param.value);
+
+ share_vm = param.value;
+
+ ext.param.value = share_vm;
+ args.flags =
+ I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS;
+ args.extensions = to_user_pointer(&ext);
+ break;
}
+
+ if ((!ctx->engine_map && !ctx->targets_instance) ||
+ (ctx->engine_map && ctx->wants_balance))
+ args.flags |=
+ I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE;
+
+ drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT,
+ &args);
+
+ ctx_id = args.ctx_id;
+ } else {
+ struct drm_i915_gem_context_create args = {};
+
+ drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &args);
+ ctx_id = args.ctx_id;
+ }
+
+ igt_assert(ctx_id);
+ ctx->id = ctx_id;
+ ctx->sseu = device_sseu.slice_mask;
+
+ if (flags & GLOBAL_BALANCE) {
+ ctx->static_vcs = context_vcs_rr;
+ context_vcs_rr ^= 1;
+ } else {
+ ctx->static_vcs = ctx_vcs;
+ ctx_vcs ^= 1;
+ }
+
+ __ctx_set_prio(ctx_id, wrk->prio);
+
+ /*
+ * Do we need a separate context to satisfy this workloads which
+ * both want to target specific engines and be balanced by i915?
+ */
+ if ((flags & I915) && ctx->wants_balance &&
+ ctx->targets_instance && !ctx->engine_map) {
+ struct drm_i915_gem_context_create_ext_setparam ext = {
+ .base.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ .param.param = I915_CONTEXT_PARAM_VM,
+ .param.value = share_vm,
+ };
+ struct drm_i915_gem_context_create_ext args = {
+ .extensions = to_user_pointer(&ext),
+ .flags =
+ I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS |
+ I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE,
+ };
+
+ igt_assert(share_vm);
+
+ drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT,
+ &args);
+
+ igt_assert(args.ctx_id);
+ ctx_id = args.ctx_id;
+ wrk->ctx_list[i + 1].id = args.ctx_id;
+
+ __ctx_set_prio(ctx_id, wrk->prio);
+ }
+
+ if (ctx->engine_map) {
+ struct i915_context_param_engines *set_engines =
+ alloca0(sizeof_param_engines(ctx->engine_map_count + 1));
+ struct i915_context_engines_load_balance *load_balance =
+ alloca0(sizeof_load_balance(ctx->engine_map_count));
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = ctx_id,
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .size = sizeof_param_engines(ctx->engine_map_count + 1),
+ .value = to_user_pointer(set_engines),
+ };
+ struct i915_context_engines_bond *last = NULL;
+
+ if (ctx->wants_balance) {
+ set_engines->extensions =
+ to_user_pointer(load_balance);
+
+ load_balance->base.name =
+ I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE;
+ load_balance->num_siblings =
+ ctx->engine_map_count;
+
+ for (j = 0; j < ctx->engine_map_count; j++)
+ load_balance->engines[j] =
+ get_engine(ctx->engine_map[j]);
+ }
+
+ /* Reserve slot for virtual engine. */
+ set_engines->engines[0].engine_class =
+ I915_ENGINE_CLASS_INVALID;
+ set_engines->engines[0].engine_instance =
+ I915_ENGINE_CLASS_INVALID_NONE;
+
+ for (j = 1; j <= ctx->engine_map_count; j++)
+ set_engines->engines[j] =
+ get_engine(ctx->engine_map[j - 1]);
+
+ last = NULL;
+ for (j = 0; j < ctx->bond_count; j++) {
+ unsigned long mask = ctx->bonds[j].mask;
+ struct i915_context_engines_bond *bond =
+ alloca0(sizeof_engines_bond(__builtin_popcount(mask)));
+ unsigned int b, e;
+
+ bond->base.next_extension = to_user_pointer(last);
+ bond->base.name = I915_CONTEXT_ENGINES_EXT_BOND;
+
+ bond->virtual_index = 0;
+ bond->master = get_engine(ctx->bonds[j].master);
+
+ for (b = 0, e = 0; mask; e++, mask >>= 1) {
+ unsigned int idx;
+
+ if (!(mask & 1))
+ continue;
+
+ idx = find_engine(&set_engines->engines[1],
+ ctx->engine_map_count,
+ e);
+ bond->engines[b++] =
+ set_engines->engines[1 + idx];
+ }
+
+ last = bond;
+ }
+ load_balance->base.next_extension = to_user_pointer(last);
+
+ gem_context_set_param(fd, &param);
+ } else if (ctx->wants_balance) {
+ const unsigned int count = num_engines_in_class(VCS);
+ struct i915_context_engines_load_balance *load_balance =
+ alloca0(sizeof_load_balance(count));
+ struct i915_context_param_engines *set_engines =
+ alloca0(sizeof_param_engines(count + 1));
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = ctx_id,
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .size = sizeof_param_engines(count + 1),
+ .value = to_user_pointer(set_engines),
+ };
+
+ set_engines->extensions = to_user_pointer(load_balance);
+
+ set_engines->engines[0].engine_class =
+ I915_ENGINE_CLASS_INVALID;
+ set_engines->engines[0].engine_instance =
+ I915_ENGINE_CLASS_INVALID_NONE;
+ fill_engines_class(&set_engines->engines[1], VCS);
+
+ load_balance->base.name =
+ I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE;
+ load_balance->num_siblings = count;
+
+ fill_engines_class(&load_balance->engines[0], VCS);
+
+ gem_context_set_param(fd, &param);
+ }
+
+ if (wrk->sseu) {
+ /* Set to slice 0 only, one slice. */
+ ctx->sseu = set_ctx_sseu(ctx, 1);
}
+
+ if (share_vm)
+ vm_destroy(fd, share_vm);
}
/* Record default preemption. */
@@ -1027,7 +1815,6 @@ prepare_workload(unsigned int id, struct workload *wrk, unsigned int flags)
*/
for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
struct w_step *w2;
- int j;
if (w->type != PREEMPTION)
continue;
@@ -1047,6 +1834,16 @@ prepare_workload(unsigned int id, struct workload *wrk, unsigned int flags)
}
/*
+ * Scan for SSEU control steps.
+ */
+ for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
+ if (w->type == SSEU) {
+ get_device_sseu();
+ break;
+ }
+ }
+
+ /*
* Allocate batch buffers.
*/
for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
@@ -1061,6 +1858,8 @@ prepare_workload(unsigned int id, struct workload *wrk, unsigned int flags)
alloc_step_batch(wrk, w, _flags);
}
+
+ return 0;
}
static double elapsed(const struct timespec *start, const struct timespec *end)
@@ -1385,7 +2184,7 @@ static enum intel_engine_id
context_balance(const struct workload_balancer *balancer,
struct workload *wrk, struct w_step *w)
{
- return get_vcs_engine(wrk->ctx_list[w->context].static_vcs);
+ return get_vcs_engine(__get_ctx(wrk, w)->static_vcs);
}
static unsigned int
@@ -1579,6 +2378,12 @@ static const struct workload_balancer all_balancers[] = {
.get_qd = get_engine_busy,
.balance = busy_avg_balance,
},
+ {
+ .id = 11,
+ .name = "i915",
+ .desc = "i915 balancing.",
+ .flags = I915,
+ },
};
static unsigned int
@@ -1664,6 +2469,18 @@ update_bb_rt(struct w_step *w, enum intel_engine_id engine, uint32_t seqno)
}
}
+static void
+update_bb_start(struct w_step *w)
+{
+ if (!w->unbound_duration)
+ return;
+
+ gem_set_domain(fd, w->bb_handle,
+ I915_GEM_DOMAIN_WC, I915_GEM_DOMAIN_WC);
+
+ *w->recursive_bb_start = MI_BATCH_BUFFER_START | (1 << 8) | 1;
+}
+
static void w_sync_to(struct workload *wrk, struct w_step *w, int target)
{
if (target < 0)
@@ -1792,16 +2609,20 @@ do_eb(struct workload *wrk, struct w_step *w, enum intel_engine_id engine,
uint32_t seqno = new_seqno(wrk, engine);
unsigned int i;
- eb_update_flags(w, engine, flags);
+ eb_update_flags(wrk, w, engine, flags);
if (flags & SEQNO)
update_bb_seqno(w, engine, seqno);
if (flags & RT)
update_bb_rt(w, engine, seqno);
+ update_bb_start(w);
+
w->eb.batch_start_offset =
- ALIGN(w->bb_sz - get_bb_sz(get_duration(w)),
- 2 * sizeof(uint32_t));
+ w->unbound_duration ?
+ 0 :
+ ALIGN(w->bb_sz - get_bb_sz(get_duration(wrk, w)),
+ 2 * sizeof(uint32_t));
for (i = 0; i < w->fence_deps.nr; i++) {
int tgt = w->idx + w->fence_deps.list[i];
@@ -1811,16 +2632,20 @@ do_eb(struct workload *wrk, struct w_step *w, enum intel_engine_id engine,
igt_assert(tgt >= 0 && tgt < w->idx);
igt_assert(wrk->steps[tgt].emit_fence > 0);
- w->eb.flags |= LOCAL_I915_EXEC_FENCE_IN;
+ if (w->fence_deps.submit_fence)
+ w->eb.flags |= I915_EXEC_FENCE_SUBMIT;
+ else
+ w->eb.flags |= I915_EXEC_FENCE_IN;
+
w->eb.rsvd2 = wrk->steps[tgt].emit_fence;
}
- if (w->eb.flags & LOCAL_I915_EXEC_FENCE_OUT)
+ if (w->eb.flags & I915_EXEC_FENCE_OUT)
gem_execbuf_wr(fd, &w->eb);
else
gem_execbuf(fd, &w->eb);
- if (w->eb.flags & LOCAL_I915_EXEC_FENCE_OUT) {
+ if (w->eb.flags & I915_EXEC_FENCE_OUT) {
w->emit_fence = w->eb.rsvd2 >> 32;
igt_assert(w->emit_fence > 0);
}
@@ -1865,9 +2690,6 @@ static void *run_workload(void *data)
clock_gettime(CLOCK_MONOTONIC, &t_start);
- hars_petruska_f54_1_random_seed((wrk->flags & SYNCEDCLIENTS) ?
- 0 : wrk->id);
-
init_status_page(wrk, INIT_ALL);
for (count = 0; wrk->run && (wrk->background || count < wrk->repeat);
count++) {
@@ -1937,7 +2759,28 @@ static void *run_workload(void *data)
w->priority;
}
continue;
- } else if (w->type == PREEMPTION) {
+ } else if (w->type == TERMINATE) {
+ unsigned int t_idx = i + w->target;
+
+ igt_assert(t_idx >= 0 && t_idx < i);
+ igt_assert(wrk->steps[t_idx].type == BATCH);
+ igt_assert(wrk->steps[t_idx].unbound_duration);
+
+ *wrk->steps[t_idx].recursive_bb_start =
+ MI_BATCH_BUFFER_END;
+ __sync_synchronize();
+ continue;
+ } else if (w->type == PREEMPTION ||
+ w->type == ENGINE_MAP ||
+ w->type == LOAD_BALANCE ||
+ w->type == BOND) {
+ continue;
+ } else if (w->type == SSEU) {
+ if (w->sseu != wrk->ctx_list[w->context * 2].sseu) {
+ wrk->ctx_list[w->context * 2].sseu =
+ set_ctx_sseu(&wrk->ctx_list[w->context * 2],
+ w->sseu);
+ }
continue;
}
@@ -1957,7 +2800,8 @@ static void *run_workload(void *data)
last_sync = false;
wrk->nr_bb[engine]++;
- if (engine == VCS && wrk->balancer) {
+ if (engine == VCS && wrk->balancer &&
+ wrk->balancer->balance) {
engine = wrk->balancer->balance(wrk->balancer,
wrk, w);
wrk->nr_bb[engine]++;
@@ -2112,6 +2956,7 @@ static void print_help(void)
" -t <n> Nop calibration tolerance percentage.\n"
" Use when there is a difficulty obtaining calibration with the\n"
" default settings.\n"
+" -I <n> Initial randomness seed.\n"
" -p <n> Context priority to use for the following workload on the\n"
" command line.\n"
" -w <desc|path> Filename or a workload descriptor.\n"
@@ -2142,6 +2987,8 @@ static void print_help(void)
" -R Round-robin initial VCS assignment per client.\n"
" -H Send heartbeat on synchronisation points with seqno based\n"
" balancers. Gives better engine busyness view in some cases.\n"
+" -s Turn on small SSEU config for the next workload on the\n"
+" command line. Subsequent -s switches it off.\n"
" -S Synchronize the sequence of random batch durations between\n"
" clients.\n"
" -G Global load balancing - a single load balancer will be shared\n"
@@ -2184,11 +3031,12 @@ static char *load_workload_descriptor(char *filename)
}
static struct w_arg *
-add_workload_arg(struct w_arg *w_args, unsigned int nr_args, char *w_arg, int prio)
+add_workload_arg(struct w_arg *w_args, unsigned int nr_args, char *w_arg,
+ int prio, bool sseu)
{
w_args = realloc(w_args, sizeof(*w_args) * nr_args);
igt_assert(w_args);
- w_args[nr_args - 1] = (struct w_arg) { w_arg, NULL, prio };
+ w_args[nr_args - 1] = (struct w_arg) { w_arg, NULL, prio, sseu };
return w_args;
}
@@ -2281,28 +3129,28 @@ int main(int argc, char **argv)
init_clocks();
- while ((c = getopt(argc, argv, "hqv2RSHxGdc:n:r:w:W:a:t:b:p:")) != -1) {
+ master_prng = time(NULL);
+
+ while ((c = getopt(argc, argv,
+ "hqv2RsSHxGdc:n:r:w:W:a:t:b:p:I:")) != -1) {
switch (c) {
case 'W':
if (master_workload >= 0) {
- if (verbose)
- fprintf(stderr,
- "Only one master workload can be given!\n");
+ wsim_err("Only one master workload can be given!\n");
return 1;
}
master_workload = nr_w_args;
/* Fall through */
case 'w':
- w_args = add_workload_arg(w_args, ++nr_w_args, optarg, prio);
+ w_args = add_workload_arg(w_args, ++nr_w_args, optarg,
+ prio, flags & SSEU);
break;
case 'p':
prio = atoi(optarg);
break;
case 'a':
if (append_workload_arg) {
- if (verbose)
- fprintf(stderr,
- "Only one append workload can be given!\n");
+ wsim_err("Only one append workload can be given!\n");
return 1;
}
append_workload_arg = optarg;
@@ -2337,6 +3185,9 @@ int main(int argc, char **argv)
case 'S':
flags |= SYNCEDCLIENTS;
break;
+ case 's':
+ flags ^= SSEU;
+ break;
case 'H':
flags |= HEARTBEAT;
break;
@@ -2363,13 +3214,14 @@ int main(int argc, char **argv)
}
if (!balancer) {
- if (verbose)
- fprintf(stderr,
- "Unknown balancing mode '%s'!\n",
- optarg);
+ wsim_err("Unknown balancing mode '%s'!\n",
+ optarg);
return 1;
}
break;
+ case 'I':
+ master_prng = strtol(optarg, NULL, 0);
+ break;
case 'h':
print_help();
return 0;
@@ -2379,8 +3231,12 @@ int main(int argc, char **argv)
}
if ((flags & HEARTBEAT) && !(flags & SEQNO)) {
- if (verbose)
- fprintf(stderr, "Heartbeat needs a seqno based balancer!\n");
+ wsim_err("Heartbeat needs a seqno based balancer!\n");
+ return 1;
+ }
+
+ if ((flags & VCS2REMAP) && (flags & I915)) {
+ wsim_err("VCS remapping not supported with i915 balancing!\n");
return 1;
}
@@ -2397,31 +3253,24 @@ int main(int argc, char **argv)
}
if (!nr_w_args) {
- if (verbose)
- fprintf(stderr, "No workload descriptor(s)!\n");
+ wsim_err("No workload descriptor(s)!\n");
return 1;
}
if (nr_w_args > 1 && clients > 1) {
- if (verbose)
- fprintf(stderr,
- "Cloned clients cannot be combined with multiple workloads!\n");
+ wsim_err("Cloned clients cannot be combined with multiple workloads!\n");
return 1;
}
if ((flags & GLOBAL_BALANCE) && !balancer) {
- if (verbose)
- fprintf(stderr,
- "Balancer not specified in global balancing mode!\n");
+ wsim_err("Balancer not specified in global balancing mode!\n");
return 1;
}
if (append_workload_arg) {
append_workload_arg = load_workload_descriptor(append_workload_arg);
if (!append_workload_arg) {
- if (verbose)
- fprintf(stderr,
- "Failed to load append workload descriptor!\n");
+ wsim_err("Failed to load append workload descriptor!\n");
return 1;
}
}
@@ -2430,9 +3279,7 @@ int main(int argc, char **argv)
struct w_arg arg = { NULL, append_workload_arg, 0 };
app_w = parse_workload(&arg, flags, NULL);
if (!app_w) {
- if (verbose)
- fprintf(stderr,
- "Failed to parse append workload!\n");
+ wsim_err("Failed to parse append workload!\n");
return 1;
}
}
@@ -2444,18 +3291,13 @@ int main(int argc, char **argv)
w_args[i].desc = load_workload_descriptor(w_args[i].filename);
if (!w_args[i].desc) {
- if (verbose)
- fprintf(stderr,
- "Failed to load workload descriptor %u!\n",
- i);
+ wsim_err("Failed to load workload descriptor %u!\n", i);
return 1;
}
wrk[i] = parse_workload(&w_args[i], flags, app_w);
if (!wrk[i]) {
- if (verbose)
- fprintf(stderr,
- "Failed to parse workload %u!\n", i);
+ wsim_err("Failed to parse workload %u!\n", i);
return 1;
}
}
@@ -2464,18 +3306,28 @@ int main(int argc, char **argv)
clients = nr_w_args;
if (verbose > 1) {
+ printf("Random seed is %u.\n", master_prng);
printf("Using %lu nop calibration for %uus delay.\n",
nop_calibration, nop_calibration_us);
printf("%u client%s.\n", clients, clients > 1 ? "s" : "");
if (flags & SWAPVCS)
printf("Swapping VCS rings between clients.\n");
- if (flags & GLOBAL_BALANCE)
- printf("Using %s balancer in global mode.\n",
- balancer->name);
- else if (balancer)
+ if (flags & GLOBAL_BALANCE) {
+ if (flags & I915) {
+ printf("Ignoring global balancing with i915!\n");
+ flags &= ~GLOBAL_BALANCE;
+ } else {
+ printf("Using %s balancer in global mode.\n",
+ balancer->name);
+ }
+ } else if (balancer) {
printf("Using %s balancer.\n", balancer->name);
+ }
}
+ srand(master_prng);
+ master_prng = rand();
+
if (master_workload >= 0 && clients == 1)
master_workload = -1;
@@ -2490,7 +3342,7 @@ int main(int argc, char **argv)
if (flags & SWAPVCS && i & 1)
flags_ &= ~SWAPVCS;
- if (flags & GLOBAL_BALANCE) {
+ if ((flags & GLOBAL_BALANCE) && !(flags & I915)) {
w[i]->balancer = &global_balancer;
w[i]->global_wrk = w[0];
w[i]->global_balancer = balancer;
@@ -2504,15 +3356,17 @@ int main(int argc, char **argv)
w[i]->print_stats = verbose > 1 ||
(verbose > 0 && master_workload == i);
- prepare_workload(i, w[i], flags_);
+ if (prepare_workload(i, w[i], flags_)) {
+ wsim_err("Failed to prepare workload %u!\n", i);
+ return 1;
+ }
+
if (balancer && balancer->init) {
int ret = balancer->init(balancer, w[i]);
if (ret) {
- if (verbose)
- fprintf(stderr,
- "Failed to initialize balancing! (%u=%d)\n",
- i, ret);
+ wsim_err("Failed to initialize balancing! (%u=%d)\n",
+ i, ret);
return 1;
}
}
diff --git a/benchmarks/wsim/README b/benchmarks/wsim/README
index 205cd6c9..9f770217 100644
--- a/benchmarks/wsim/README
+++ b/benchmarks/wsim/README
@@ -2,9 +2,12 @@ Workload descriptor format
==========================
ctx.engine.duration_us.dependency.wait,...
-<uint>.<str>.<uint>[-<uint>].<int <= 0>[/<int <= 0>][...].<0|1>,...
-P|X.<uint>.<int>
-d|p|s|t|q|a.<int>,...
+<uint>.<str>.<uint>[-<uint>]|*.<int <= 0>[/<int <= 0>][...].<0|1>,...
+B.<uint>
+M.<uint>.<str>[|<str>]...
+P|S|X.<uint>.<int>
+d|p|s|t|q|a|T.<int>,...
+b.<uint>.<str>[|<str>].<str>
f
For duration a range can be given from which a random value will be picked
@@ -23,10 +26,15 @@ Additional workload steps are also supported:
'q' - Throttle to n max queue depth.
'f' - Create a sync fence.
'a' - Advance the previously created sync fence.
+ 'B' - Turn on context load balancing.
+ 'b' - Set up engine bonds.
+ 'M' - Set up engine map.
'P' - Context priority.
+ 'S' - Context SSEU configuration.
+ 'T' - Terminate an infinite batch.
'X' - Context preemption control.
-Engine ids: RCS, BCS, VCS, VCS1, VCS2, VECS
+Engine ids: DEFAULT, RCS, BCS, VCS, VCS1, VCS2, VECS
Example (leading spaces must not be present in the actual file):
----------------------------------------------------------------
@@ -71,6 +79,10 @@ Example:
I this case the last step has a data dependency on both first and second steps.
+Batch durations can also be specified as infinite by using the '*' in the
+duration field. Such batches must be ended by the terminate command ('T')
+otherwise they will cause a GPU hang to be reported.
+
Sync (fd) fences
----------------
@@ -114,6 +126,23 @@ runnable. When the second RCS batch completes the standalone fence is signaled
which allows the two VCS batches to be executed. Finally we wait until the both
VCS batches have completed before starting the (optional) next iteration.
+Submit fences
+-------------
+
+Submit fences are a type of input fence which are signalled when the originating
+batch buffer is submitted to the GPU. (In contrary to normal sync fences, which
+are signalled when completed.)
+
+Submit fences have the identical syntax as the sync fences with the lower-case
+'s' being used to select them. Eg:
+
+ 1.RCS.500-1000.0.0
+ 1.VCS1.3000.s-1.0
+ 1.VCS2.3000.s-2.0
+
+Here VCS1 and VCS2 batches will only be submitted for executing once the RCS
+batch enters the GPU.
+
Context priority
----------------
@@ -144,3 +173,105 @@ The same context is then marked to have batches which can be preempted every
Same as with context priority, context preemption commands are valid until
optionally overriden by another preemption control change on the same context.
+
+Engine maps
+-----------
+
+Engine maps are a per context feature which changes the way engine selection is
+done in the driver.
+
+Example:
+
+ M.1.VCS1|VCS2
+
+This sets up context 1 with an engine map containing VCS1 and VCS2 engine.
+Submission to this context can now only reference these two engines.
+
+Engine maps can also be defined based on class like VCS.
+
+Example:
+
+M.1.VCS
+
+This sets up the engine map to all available VCS class engines.
+
+Context load balancing
+----------------------
+
+Context load balancing (aka Virtual Engine) is an i915 feature where the driver
+will pick the best engine (most idle) to submit to given previously configured
+engine map.
+
+Example:
+
+ B.1
+
+This enables load balancing for context number one.
+
+Engine bonds
+------------
+
+Engine bonds are extensions on load balanced contexts. They allow expressing
+rules of engine selection between two co-operating contexts tied with submit
+fences. In other words, the rule expression is telling the driver: "If you pick
+this engine for context one, then you have to pick that engine for context two".
+
+Syntax is:
+ b.<context>.<engine_list>.<master_engine>
+
+Engine list is a list of one or more sibling engines separated by a pipe
+character (eg. "VCS1|VCS2").
+
+There can be multiple bonds tied to the same context.
+
+Example:
+
+ M.1.RCS|VECS
+ B.1
+ M.2.VCS1|VCS2
+ B.2
+ b.2.VCS1.RCS
+ b.2.VCS2.VECS
+
+This tells the driver that if it picked RCS for context one, it has to pick VCS1
+for context two. And if it picked VECS for context one, it has to pick VCS1 for
+context two.
+
+If we extend the above example with more workload directives:
+
+ 1.DEFAULT.1000.0.0
+ 2.DEFAULT.1000.s-1.0
+
+We get to a fully functional example where two batch buffers are submitted in a
+load balanced fashion, telling the driver they should run simultaneously and
+that valid engine pairs are either RCS + VCS1 (for two contexts respectively),
+or VECS + VCS2.
+
+This can also be extended using sync fences to improve chances of the first
+submission not getting on the hardware after the second one. Second block would
+then look like:
+
+ f
+ 1.DEFAULT.1000.f-1.0
+ 2.DEFAULT.1000.s-1.0
+ a.-3
+
+Context SSEU configuration
+--------------------------
+
+ S.1.1
+ 1.RCS.1000.0.0
+ S.2.-1
+ 2.RCS.1000.0.0
+
+Context 1 is configured to run with one enabled slice (slice mask 1) and a batch
+is sumitted against it. Context 2 is configured to run with all slices (this is
+the default so the command could also be omitted) and a batch submitted against
+it.
+
+This shows the dynamic SSEU reconfiguration cost beween two contexts competing
+for the render engine.
+
+Slice mask of -1 has a special meaning of "all slices". Otherwise any integer
+can be specifying as the slice mask, but beware any apart from 1 and -1 can make
+the workload not portable between different GPUs.
diff --git a/benchmarks/wsim/frame-split-60fps.wsim b/benchmarks/wsim/frame-split-60fps.wsim
new file mode 100644
index 00000000..17490ddf
--- /dev/null
+++ b/benchmarks/wsim/frame-split-60fps.wsim
@@ -0,0 +1,18 @@
+X.1.0
+M.1.VCS1
+B.1
+X.2.0
+M.2.VCS2
+B.2
+b.2.VCS2.VCS1
+f
+1.DEFAULT.*.f-1.0
+2.DEFAULT.4000-6000.s-1.0
+a.-3
+s.-2
+T.-4
+3.RCS.2000-4000.-5/-4.0
+3.VECS.2000.-1.0
+4.BCS.1000.-1.0
+s.-2
+p.16667
diff --git a/benchmarks/wsim/high-composited-game.wsim b/benchmarks/wsim/high-composited-game.wsim
new file mode 100644
index 00000000..a90a2b2b
--- /dev/null
+++ b/benchmarks/wsim/high-composited-game.wsim
@@ -0,0 +1,11 @@
+1.RCS.500.0.0
+1.RCS.2000.0.0
+1.RCS.2000.0.0
+1.RCS.2000.0.0
+1.RCS.2000.0.0
+1.RCS.2000.0.0
+1.RCS.2000.0.0
+P.2.1
+2.BCS.1000.-2.0
+2.RCS.2000.-1.1
+p.16667
diff --git a/benchmarks/wsim/media-1080p-player.wsim b/benchmarks/wsim/media-1080p-player.wsim
new file mode 100644
index 00000000..bcbb0cfd
--- /dev/null
+++ b/benchmarks/wsim/media-1080p-player.wsim
@@ -0,0 +1,5 @@
+1.VCS.5000-10000.0.0
+2.RCS.1000-2000.-1.0
+P.3.1
+3.BCS.1000.-2.0
+p.16667
diff --git a/benchmarks/wsim/medium-composited-game.wsim b/benchmarks/wsim/medium-composited-game.wsim
new file mode 100644
index 00000000..58088351
--- /dev/null
+++ b/benchmarks/wsim/medium-composited-game.wsim
@@ -0,0 +1,9 @@
+1.RCS.1000-2000.0.0
+1.RCS.1000-2000.0.0
+1.RCS.1000-2000.0.0
+1.RCS.1000-2000.0.0
+1.RCS.1000-2000.0.0
+P.2.1
+2.BCS.1000.-2.0
+2.RCS.2000.-1.1
+p.16667
diff --git a/configure.ac b/configure.ac
index 7467e620..f9e4942e 100644
--- a/configure.ac
+++ b/configure.ac
@@ -22,7 +22,7 @@
AC_PREREQ([2.60])
AC_INIT([igt-gpu-tools],
- [1.23],
+ [1.24],
[https://bugs.freedesktop.org/enter_bug.cgi?product=DRI&component=IGT],
[igt-gpu-tools])
@@ -122,6 +122,20 @@ case "$target_cpu" in
;;
esac
+if test x"$build_x86" = xyes; then
+ PKG_CHECK_MODULES(OVERLAY_XVLIB, [xv x11 xext dri2proto >= 2.6], enable_overlay_xvlib=yes, enable_overlay_xvlib=no)
+ PKG_CHECK_MODULES(OVERLAY_XLIB, [cairo-xlib dri2proto >= 2.6], enable_overlay_xlib=yes, enable_overlay_xlib=no)
+
+ AC_CHECK_TOOL([LEG], [leg])
+ if test x"$LEG" != xleg; then
+ enable_overlay_xvlib="no"
+ enable_overlay_xlib="no"
+ AC_MSG_NOTICE([Failed to find leg, required for overlay, try : apt-get install peg])
+ fi
+else
+ enable_overlay_xvlib="no"
+ enable_overlay_xlib="no"
+fi
AM_CONDITIONAL(BUILD_X86, [test "x$build_x86" = xyes])
AM_CONDITIONAL(BUILD_OVERLAY_XVLIB, [test "x$enable_overlay_xvlib" = xyes])
diff --git a/docs/chamelium.txt b/docs/chamelium.txt
index 5cc85d6e..aaa0646e 100644
--- a/docs/chamelium.txt
+++ b/docs/chamelium.txt
@@ -42,7 +42,17 @@ section. At this point, IGT has to be configured to connect to the Chamelium.
It may be necessary to give the Chamelium a static IP address, depending on
the network setup. This can be configured (via the serial console) by editing
-the Debian-styled /etc/network/interfaces configuration file.
+the Debian-styled /etc/network/interfaces configuration file. Example setup:
+
+ sudo screen /dev/ttyUSB0 115200
+ sudo vi /etc/network/interfaces
+
+and then configure eth0 like so:
+
+ iface eth0 inet static
+ address 192.168.1.2
+ netmask 255.255.255.0
+ gateway 192.168.1.1
This document supposes that target PC's network IP has "192.168.1.1/24" and
the Chamelium's network IP has "192.168.1.2/24".
@@ -117,6 +127,16 @@ $ ./scripts/run-tests.sh -t chamelium
Debugging the Chamelium
-----------------------
+It is possible to manually send Chamelium RPC calls with the xmlrpc utility
+(from xmlrpc-c). For instance, to plug the DisplayPort port:
+$ xmlrpc http://192.168.1.2:9992 Plug i/1
+
+The xmlrpc utility documentation is available at:
+http://xmlrpc-c.sourceforge.net/doc/xmlrpc.html
+
+The XML-RPC Chamelium interface is described here:
+https://chromium.googlesource.com/chromiumos/platform/chameleon/+/refs/heads/master/chameleond/interface.py
+
Logs that may be useful for debugging can be obtained either by connecting to
the board via SSH or serial console and looking at the daemon logs from
/var/log, such as:
diff --git a/docs/reference/igt-gpu-tools/meson.build b/docs/reference/igt-gpu-tools/meson.build
index b2b2c1c2..4d177e49 100644
--- a/docs/reference/igt-gpu-tools/meson.build
+++ b/docs/reference/igt-gpu-tools/meson.build
@@ -54,37 +54,18 @@ configure_file(input: 'version.xml.in',
output: 'version.xml',
install: false, configuration: config)
-if meson.version().version_compare('>= 0.47')
- foreach group : test_groups
- programs_xml = 'igt_test_programs_' + group + '_programs.xml'
- generated_docs += custom_target(programs_xml,
- output : programs_xml,
- command : [ gen_programs, '@OUTPUT@', group, test_list_target ])
+foreach group : test_groups
+ programs_xml = 'igt_test_programs_' + group + '_programs.xml'
+ generated_docs += custom_target(programs_xml,
+ output : programs_xml,
+ command : [ gen_programs, '@OUTPUT@', group, test_list_target ])
- description_xml = 'igt_test_programs_' + group + '_description.xml'
- generated_docs += custom_target(description_xml,
- output : description_xml,
- depends : test_executables,
- command : [ gen_description, '@OUTPUT@', group, test_list_target ])
- endforeach
-else
- # older meson needs the build_by_default hack because gtkdoc dependency
- # handling is broken
- foreach group : test_groups
- programs_xml = 'igt_test_programs_' + group + '_programs.xml'
- custom_target(programs_xml,
- build_by_default : true,
- output : programs_xml,
- command : [ gen_programs, '@OUTPUT@', group, test_list_target ])
-
- description_xml = 'igt_test_programs_' + group + '_description.xml'
- custom_target(description_xml,
- build_by_default : true,
- output : description_xml,
- depends : test_executables,
- command : [ gen_description, '@OUTPUT@', group, test_list_target ])
- endforeach
-endif
+ description_xml = 'igt_test_programs_' + group + '_description.xml'
+ generated_docs += custom_target(description_xml,
+ output : description_xml,
+ depends : test_executables,
+ command : [ gen_description, '@OUTPUT@', group, test_list_target ])
+endforeach
gnome.gtkdoc('igt-gpu-tools',
content_files : ['igt_test_programs.xml'] + generated_docs,
diff --git a/include/drm-uapi/i915_drm.h b/include/drm-uapi/i915_drm.h
index e01b3e1f..761517f1 100644
--- a/include/drm-uapi/i915_drm.h
+++ b/include/drm-uapi/i915_drm.h
@@ -136,6 +136,8 @@ enum drm_i915_gem_engine_class {
struct i915_engine_class_instance {
__u16 engine_class; /* see enum drm_i915_gem_engine_class */
__u16 engine_instance;
+#define I915_ENGINE_CLASS_INVALID_NONE -1
+#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
};
/**
@@ -355,6 +357,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_PERF_ADD_CONFIG 0x37
#define DRM_I915_PERF_REMOVE_CONFIG 0x38
#define DRM_I915_QUERY 0x39
+#define DRM_I915_GEM_VM_CREATE 0x3a
+#define DRM_I915_GEM_VM_DESTROY 0x3b
/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -415,6 +419,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
+#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
+#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -598,6 +604,12 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_MMAP_GTT_COHERENT 52
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
+ * execution through use of explicit fence support.
+ * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
+ */
+#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
/* Must be kept compact -- no holes and well documented */
typedef struct drm_i915_getparam {
@@ -1120,7 +1132,16 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_FENCE_ARRAY (1<<19)
-#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
+/*
+ * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
+ * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
+ * the batch.
+ *
+ * Returns -EINVAL if the sync_file fd cannot be found.
+ */
+#define I915_EXEC_FENCE_SUBMIT (1 << 20)
+
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1464,8 +1485,9 @@ struct drm_i915_gem_context_create_ext {
__u32 ctx_id; /* output: id of new context*/
__u32 flags;
#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
+#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)
#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
- (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
+ (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
__u64 extensions;
};
@@ -1507,6 +1529,41 @@ struct drm_i915_gem_context_param {
* On creation, all new contexts are marked as recoverable.
*/
#define I915_CONTEXT_PARAM_RECOVERABLE 0x8
+
+ /*
+ * The id of the associated virtual memory address space (ppGTT) of
+ * this context. Can be retrieved and passed to another context
+ * (on the same fd) for both to use the same ppGTT and so share
+ * address layouts, and avoid reloading the page tables on context
+ * switches between themselves.
+ *
+ * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
+ */
+#define I915_CONTEXT_PARAM_VM 0x9
+
+/*
+ * I915_CONTEXT_PARAM_ENGINES:
+ *
+ * Bind this context to operate on this subset of available engines. Henceforth,
+ * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
+ * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
+ * and upwards. Slots 0...N are filled in using the specified (class, instance).
+ * Use
+ * engine_class: I915_ENGINE_CLASS_INVALID,
+ * engine_instance: I915_ENGINE_CLASS_INVALID_NONE
+ * to specify a gap in the array that can be filled in later, e.g. by a
+ * virtual engine used for load balancing.
+ *
+ * Setting the number of engines bound to the context to 0, by passing a zero
+ * sized argument, will revert back to default settings.
+ *
+ * See struct i915_context_param_engines.
+ *
+ * Extensions:
+ * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
+ * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
+ */
+#define I915_CONTEXT_PARAM_ENGINES 0xa
/* Must be kept compact -- no holes and well documented */
__u64 value;
@@ -1540,9 +1597,10 @@ struct drm_i915_gem_context_param_sseu {
struct i915_engine_class_instance engine;
/*
- * Unused for now. Must be cleared to zero.
+ * Unknown flags must be cleared to zero.
*/
__u32 flags;
+#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
/*
* Mask of slices to enable for the context. Valid values are a subset
@@ -1570,12 +1628,115 @@ struct drm_i915_gem_context_param_sseu {
__u32 rsvd;
};
+/*
+ * i915_context_engines_load_balance:
+ *
+ * Enable load balancing across this set of engines.
+ *
+ * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
+ * used will proxy the execbuffer request onto one of the set of engines
+ * in such a way as to distribute the load evenly across the set.
+ *
+ * The set of engines must be compatible (e.g. the same HW class) as they
+ * will share the same logical GPU context and ring.
+ *
+ * To intermix rendering with the virtual engine and direct rendering onto
+ * the backing engines (bypassing the load balancing proxy), the context must
+ * be defined to use a single timeline for all engines.
+ */
+struct i915_context_engines_load_balance {
+ struct i915_user_extension base;
+
+ __u16 engine_index;
+ __u16 num_siblings;
+ __u32 flags; /* all undefined flags must be zero */
+
+ __u64 mbz64; /* reserved for future use; must be zero */
+
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
+ struct i915_user_extension base; \
+ __u16 engine_index; \
+ __u16 num_siblings; \
+ __u32 flags; \
+ __u64 mbz64; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/*
+ * i915_context_engines_bond:
+ *
+ * Constructed bonded pairs for execution within a virtual engine.
+ *
+ * All engines are equal, but some are more equal than others. Given
+ * the distribution of resources in the HW, it may be preferable to run
+ * a request on a given subset of engines in parallel to a request on a
+ * specific engine. We enable this selection of engines within a virtual
+ * engine by specifying bonding pairs, for any given master engine we will
+ * only execute on one of the corresponding siblings within the virtual engine.
+ *
+ * To execute a request in parallel on the master engine and a sibling requires
+ * coordination with a I915_EXEC_FENCE_SUBMIT.
+ */
+struct i915_context_engines_bond {
+ struct i915_user_extension base;
+
+ struct i915_engine_class_instance master;
+
+ __u16 virtual_index; /* index of virtual engine in ctx->engines[] */
+ __u16 num_bonds;
+
+ __u64 flags; /* all undefined flags must be zero */
+ __u64 mbz64[4]; /* reserved for future use; must be zero */
+
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
+ struct i915_user_extension base; \
+ struct i915_engine_class_instance master; \
+ __u16 virtual_index; \
+ __u16 num_bonds; \
+ __u64 flags; \
+ __u64 mbz64[4]; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+struct i915_context_param_engines {
+ __u64 extensions; /* linked chain of extension blocks, 0 terminates */
+#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
+#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
+ __u64 extensions; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
struct drm_i915_gem_context_create_ext_setparam {
#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
struct i915_user_extension base;
struct drm_i915_gem_context_param param;
};
+struct drm_i915_gem_context_create_ext_clone {
+#define I915_CONTEXT_CREATE_EXT_CLONE 1
+ struct i915_user_extension base;
+ __u32 clone_id;
+ __u32 flags;
+#define I915_CONTEXT_CLONE_ENGINES (1u << 0)
+#define I915_CONTEXT_CLONE_FLAGS (1u << 1)
+#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2)
+#define I915_CONTEXT_CLONE_SSEU (1u << 3)
+#define I915_CONTEXT_CLONE_TIMELINE (1u << 4)
+#define I915_CONTEXT_CLONE_VM (1u << 5)
+#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
+ __u64 rsvd;
+};
+
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
__u32 pad;
@@ -1821,6 +1982,7 @@ struct drm_i915_perf_oa_config {
struct drm_i915_query_item {
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+#define DRM_I915_QUERY_ENGINE_INFO 2
/* Must be kept compact -- no holes and well documented */
/*
@@ -1919,6 +2081,47 @@ struct drm_i915_query_topology_info {
__u8 data[];
};
+/**
+ * struct drm_i915_engine_info
+ *
+ * Describes one engine and it's capabilities as known to the driver.
+ */
+struct drm_i915_engine_info {
+ /** Engine class and instance. */
+ struct i915_engine_class_instance engine;
+
+ /** Reserved field. */
+ __u32 rsvd0;
+
+ /** Engine flags. */
+ __u64 flags;
+
+ /** Capabilities of this engine. */
+ __u64 capabilities;
+#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
+#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
+
+ /** Reserved fields. */
+ __u64 rsvd1[4];
+};
+
+/**
+ * struct drm_i915_query_engine_info
+ *
+ * Engine info query enumerates all engines known to the driver by filling in
+ * an array of struct drm_i915_engine_info structures.
+ */
+struct drm_i915_query_engine_info {
+ /** Number of struct drm_i915_engine_info structs following. */
+ __u32 num_engines;
+
+ /** MBZ */
+ __u32 rsvd[3];
+
+ /** Marker for drm_i915_engine_info structures. */
+ struct drm_i915_engine_info engines[];
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/drm-uapi/panfrost_drm.h b/include/drm-uapi/panfrost_drm.h
new file mode 100644
index 00000000..a52e0283
--- /dev/null
+++ b/include/drm-uapi/panfrost_drm.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2018 Broadcom
+ * Copyright © 2019 Collabora ltd.
+ */
+#ifndef _PANFROST_DRM_H_
+#define _PANFROST_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_PANFROST_SUBMIT 0x00
+#define DRM_PANFROST_WAIT_BO 0x01
+#define DRM_PANFROST_CREATE_BO 0x02
+#define DRM_PANFROST_MMAP_BO 0x03
+#define DRM_PANFROST_GET_PARAM 0x04
+#define DRM_PANFROST_GET_BO_OFFSET 0x05
+
+#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
+#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
+#define DRM_IOCTL_PANFROST_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_CREATE_BO, struct drm_panfrost_create_bo)
+#define DRM_IOCTL_PANFROST_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo)
+#define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
+#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
+
+#define PANFROST_JD_REQ_FS (1 << 0)
+/**
+ * struct drm_panfrost_submit - ioctl argument for submitting commands to the 3D
+ * engine.
+ *
+ * This asks the kernel to have the GPU execute a render command list.
+ */
+struct drm_panfrost_submit {
+
+ /** Address to GPU mapping of job descriptor */
+ __u64 jc;
+
+ /** An optional array of sync objects to wait on before starting this job. */
+ __u64 in_syncs;
+
+ /** Number of sync objects to wait on before starting this job. */
+ __u32 in_sync_count;
+
+ /** An optional sync object to place the completion fence in. */
+ __u32 out_sync;
+
+ /** Pointer to a u32 array of the BOs that are referenced by the job. */
+ __u64 bo_handles;
+
+ /** Number of BO handles passed in (size is that times 4). */
+ __u32 bo_handle_count;
+
+ /** A combination of PANFROST_JD_REQ_* */
+ __u32 requirements;
+};
+
+/**
+ * struct drm_panfrost_wait_bo - ioctl argument for waiting for
+ * completion of the last DRM_PANFROST_SUBMIT on a BO.
+ *
+ * This is useful for cases where multiple processes might be
+ * rendering to a BO and you want to wait for all rendering to be
+ * completed.
+ */
+struct drm_panfrost_wait_bo {
+ __u32 handle;
+ __u32 pad;
+ __s64 timeout_ns; /* absolute */
+};
+
+/**
+ * struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_panfrost_create_bo {
+ __u32 size;
+ __u32 flags;
+ /** Returned GEM handle for the BO. */
+ __u32 handle;
+ /* Pad, must be zero-filled. */
+ __u32 pad;
+ /**
+ * Returned offset for the BO in the GPU address space. This offset
+ * is private to the DRM fd and is valid for the lifetime of the GEM
+ * handle.
+ *
+ * This offset value will always be nonzero, since various HW
+ * units treat 0 specially.
+ */
+ __u64 offset;
+};
+
+/**
+ * struct drm_panfrost_mmap_bo - ioctl argument for mapping Panfrost BOs.
+ *
+ * This doesn't actually perform an mmap. Instead, it returns the
+ * offset you need to use in an mmap on the DRM device node. This
+ * means that tools like valgrind end up knowing about the mapped
+ * memory.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_panfrost_mmap_bo {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 flags;
+ /** offset into the drm node to use for subsequent mmap call. */
+ __u64 offset;
+};
+
+enum drm_panfrost_param {
+ DRM_PANFROST_PARAM_GPU_PROD_ID,
+};
+
+struct drm_panfrost_get_param {
+ __u32 param;
+ __u32 pad;
+ __u64 value;
+};
+
+/**
+ * Returns the offset for the BO in the GPU address space for this DRM fd.
+ * This is the same value returned by drm_panfrost_create_bo, if that was called
+ * from this DRM fd.
+ */
+struct drm_panfrost_get_bo_offset {
+ __u32 handle;
+ __u32 pad;
+ __u64 offset;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _PANFROST_DRM_H_ */
diff --git a/lib/Makefile.sources b/lib/Makefile.sources
index 97685823..e16de86e 100644
--- a/lib/Makefile.sources
+++ b/lib/Makefile.sources
@@ -5,6 +5,8 @@ lib_source_list = \
i830_reg.h \
i915/gem_context.c \
i915/gem_context.h \
+ i915/gem_engine_topology.c \
+ i915/gem_engine_topology.h \
i915/gem_scheduler.c \
i915/gem_scheduler.h \
i915/gem_submission.c \
@@ -13,6 +15,8 @@ lib_source_list = \
i915/gem_ring.c \
i915/gem_mman.c \
i915/gem_mman.h \
+ i915/gem_vm.c \
+ i915/gem_vm.h \
i915_3d.h \
i915_reg.h \
i915_pciids.h \
@@ -27,6 +31,8 @@ lib_source_list = \
igt_color_encoding.h \
igt_edid.c \
igt_edid.h \
+ igt_eld.c \
+ igt_eld.h \
igt_gpu_power.c \
igt_gpu_power.h \
igt_gt.c \
diff --git a/lib/drmtest.c b/lib/drmtest.c
index 4a92fb5c..25f20353 100644
--- a/lib/drmtest.c
+++ b/lib/drmtest.c
@@ -187,6 +187,12 @@ static int modprobe(const char *driver)
return igt_kmod_load(driver, "");
}
+/* virtio's driver name is virtio_gpu but the module is virtio-gpu.ko */
+static void modprobe_virtio(const char *name)
+{
+ igt_kmod_load("virtio-gpu", "");
+}
+
static void modprobe_i915(const char *name)
{
/* When loading i915, we also want to load snd-hda et al */
@@ -200,11 +206,11 @@ static const struct module {
} modules[] = {
{ DRIVER_AMDGPU, "amdgpu" },
{ DRIVER_INTEL, "i915", modprobe_i915 },
+ { DRIVER_PANFROST, "panfrost" },
{ DRIVER_V3D, "v3d" },
{ DRIVER_VC4, "vc4" },
{ DRIVER_VGEM, "vgem" },
- { DRIVER_VIRTIO, "virtio-gpu" },
- { DRIVER_VIRTIO, "virtio_gpu" },
+ { DRIVER_VIRTIO, "virtio_gpu", modprobe_virtio },
{}
};
@@ -361,6 +367,8 @@ static const char *chipset_to_str(int chipset)
return "virtio";
case DRIVER_AMDGPU:
return "amdgpu";
+ case DRIVER_PANFROST:
+ return "panfrost";
case DRIVER_ANY:
return "any";
default:
diff --git a/lib/drmtest.h b/lib/drmtest.h
index 71d197f3..6c4c3899 100644
--- a/lib/drmtest.h
+++ b/lib/drmtest.h
@@ -44,6 +44,7 @@
#define DRIVER_VIRTIO (1 << 3)
#define DRIVER_AMDGPU (1 << 4)
#define DRIVER_V3D (1 << 5)
+#define DRIVER_PANFROST (1 << 6)
/*
* Exclude DRVER_VGEM from DRIVER_ANY since if you run on a system
* with vgem as well as a supported driver, you can end up with a
diff --git a/lib/i915/gem_context.c b/lib/i915/gem_context.c
index f94d89cb..83c5df96 100644
--- a/lib/i915/gem_context.c
+++ b/lib/i915/gem_context.c
@@ -272,6 +272,76 @@ void gem_context_set_priority(int fd, uint32_t ctx_id, int prio)
igt_assert_eq(__gem_context_set_priority(fd, ctx_id, prio), 0);
}
+int
+__gem_context_clone(int i915,
+ uint32_t src, unsigned int share,
+ unsigned int flags,
+ uint32_t *out)
+{
+ struct drm_i915_gem_context_create_ext_clone clone = {
+ { .name = I915_CONTEXT_CREATE_EXT_CLONE },
+ .clone_id = src,
+ .flags = share,
+ };
+ struct drm_i915_gem_context_create_ext arg = {
+ .flags = flags | I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ .extensions = to_user_pointer(&clone),
+ };
+ int err = 0;
+
+ if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, &arg)) {
+ err = -errno;
+ igt_assume(err);
+ }
+
+ *out = arg.ctx_id;
+
+ errno = 0;
+ return err;
+}
+
+static bool __gem_context_has(int i915, uint32_t share, unsigned int flags)
+{
+ uint32_t ctx;
+
+ __gem_context_clone(i915, 0, share, flags, &ctx);
+ if (ctx)
+ gem_context_destroy(i915, ctx);
+
+ errno = 0;
+ return ctx;
+}
+
+bool gem_contexts_has_shared_gtt(int i915)
+{
+ return __gem_context_has(i915, I915_CONTEXT_CLONE_VM, 0);
+}
+
+bool gem_has_queues(int i915)
+{
+ return __gem_context_has(i915,
+ I915_CONTEXT_CLONE_VM,
+ I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+}
+
+uint32_t gem_context_clone(int i915,
+ uint32_t src, unsigned int share,
+ unsigned int flags)
+{
+ uint32_t ctx;
+
+ igt_assert_eq(__gem_context_clone(i915, src, share, flags, &ctx), 0);
+
+ return ctx;
+}
+
+uint32_t gem_queue_create(int i915)
+{
+ return gem_context_clone(i915, 0,
+ I915_CONTEXT_CLONE_VM,
+ I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+}
+
bool gem_context_has_engine(int fd, uint32_t ctx, uint64_t engine)
{
struct drm_i915_gem_exec_object2 exec = {};
@@ -290,7 +360,7 @@ bool gem_context_has_engine(int fd, uint32_t ctx, uint64_t engine)
* wouldn't produce any result.
*/
if ((engine & ~(3<<13)) == I915_EXEC_BSD) {
- if (engine & (3 << 13) && !gem_has_bsd2(fd))
+ if (engine & (2 << 13) && !gem_has_bsd2(fd))
return false;
}
diff --git a/lib/i915/gem_context.h b/lib/i915/gem_context.h
index a052714d..8043c340 100644
--- a/lib/i915/gem_context.h
+++ b/lib/i915/gem_context.h
@@ -29,6 +29,19 @@ int __gem_context_create(int fd, uint32_t *ctx_id);
void gem_context_destroy(int fd, uint32_t ctx_id);
int __gem_context_destroy(int fd, uint32_t ctx_id);
+int __gem_context_clone(int i915,
+ uint32_t src, unsigned int share,
+ unsigned int flags,
+ uint32_t *out);
+uint32_t gem_context_clone(int i915,
+ uint32_t src, unsigned int share,
+ unsigned int flags);
+
+uint32_t gem_queue_create(int i915);
+
+bool gem_contexts_has_shared_gtt(int i915);
+bool gem_has_queues(int i915);
+
bool gem_has_contexts(int fd);
void gem_require_contexts(int fd);
void gem_context_require_bannable(int fd);
diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c
new file mode 100644
index 00000000..fdd1b951
--- /dev/null
+++ b/lib/i915/gem_engine_topology.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "drmtest.h"
+#include "ioctl_wrappers.h"
+
+#include "i915/gem_engine_topology.h"
+
+/*
+ * Limit what we support for simplicity due limitation in how much we
+ * can address via execbuf2.
+ */
+#define SIZEOF_CTX_PARAM offsetof(struct i915_context_param_engines, \
+ engines[GEM_MAX_ENGINES])
+#define SIZEOF_QUERY offsetof(struct drm_i915_query_engine_info, \
+ engines[GEM_MAX_ENGINES])
+
+#define DEFINE_CONTEXT_ENGINES_PARAM(e__, p__, c__, N__) \
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(e__, N__); \
+ struct drm_i915_gem_context_param p__ = { \
+ .param = I915_CONTEXT_PARAM_ENGINES, \
+ .ctx_id = c__, \
+ .size = SIZEOF_CTX_PARAM, \
+ .value = to_user_pointer(&e__), \
+ }
+
+static int __gem_query(int fd, struct drm_i915_query *q)
+{
+ int err = 0;
+
+ if (igt_ioctl(fd, DRM_IOCTL_I915_QUERY, q))
+ err = -errno;
+
+ errno = 0;
+ return err;
+}
+
+static void gem_query(int fd, struct drm_i915_query *q)
+{
+ igt_assert_eq(__gem_query(fd, q), 0);
+}
+
+static void query_engines(int fd,
+ struct drm_i915_query_engine_info *query_engines,
+ int length)
+{
+ struct drm_i915_query_item item = { };
+ struct drm_i915_query query = { };
+
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ query.items_ptr = to_user_pointer(&item);
+ query.num_items = 1;
+ item.length = length;
+
+ item.data_ptr = to_user_pointer(query_engines);
+
+ gem_query(fd, &query);
+}
+
+static void ctx_map_engines(int fd, struct intel_engine_data *ed,
+ struct drm_i915_gem_context_param *param)
+{
+ struct i915_context_param_engines *engines =
+ from_user_pointer(param->value);
+ int i = 0;
+
+ for (typeof(engines->engines[0]) *p =
+ &engines->engines[0];
+ i < ed->nengines; i++, p++) {
+ p->engine_class = ed->engines[i].class;
+ p->engine_instance = ed->engines[i].instance;
+ }
+
+ param->size = offsetof(typeof(*engines), engines[i]);
+ engines->extensions = 0;
+
+ gem_context_set_param(fd, param);
+}
+
+static void init_engine(struct intel_execution_engine2 *e2,
+ int class, int instance, uint64_t flags)
+{
+ const struct intel_execution_engine2 *__e2;
+ static const char *unknown_name = "unknown",
+ *virtual_name = "virtual";
+
+ e2->class = class;
+ e2->instance = instance;
+ e2->flags = flags;
+
+ /* engine is a virtual engine */
+ if (class == I915_ENGINE_CLASS_INVALID &&
+ instance == I915_ENGINE_CLASS_INVALID_VIRTUAL) {
+ e2->name = virtual_name;
+ e2->is_virtual = true;
+ return;
+ }
+
+ __for_each_static_engine(__e2)
+ if (__e2->class == class && __e2->instance == instance)
+ break;
+
+ if (__e2->name) {
+ e2->name = __e2->name;
+ } else {
+ igt_warn("found unknown engine (%d, %d)\n", class, instance);
+ e2->name = unknown_name;
+ e2->flags = -1;
+ }
+
+ /* just to remark it */
+ e2->is_virtual = false;
+}
+
+static void query_engine_list(int fd, struct intel_engine_data *ed)
+{
+ uint8_t buff[SIZEOF_QUERY] = { };
+ struct drm_i915_query_engine_info *query_engine =
+ (struct drm_i915_query_engine_info *) buff;
+ int i;
+
+ query_engines(fd, query_engine, SIZEOF_QUERY);
+
+ for (i = 0; i < query_engine->num_engines; i++)
+ init_engine(&ed->engines[i],
+ query_engine->engines[i].engine.engine_class,
+ query_engine->engines[i].engine.engine_instance, i);
+
+ ed->nengines = query_engine->num_engines;
+}
+
+struct intel_execution_engine2 *
+intel_get_current_engine(struct intel_engine_data *ed)
+{
+ if (!ed->n)
+ ed->current_engine = &ed->engines[0];
+ else if (ed->n >= ed->nengines)
+ ed->current_engine = NULL;
+
+ return ed->current_engine;
+}
+
+void intel_next_engine(struct intel_engine_data *ed)
+{
+ if (ed->n + 1 < ed->nengines) {
+ ed->n++;
+ ed->current_engine = &ed->engines[ed->n];
+ } else {
+ ed->n = ed->nengines;
+ ed->current_engine = NULL;
+ }
+}
+
+struct intel_execution_engine2 *
+intel_get_current_physical_engine(struct intel_engine_data *ed)
+{
+ struct intel_execution_engine2 *e;
+
+ for (e = intel_get_current_engine(ed);
+ e && e->is_virtual;
+ intel_next_engine(ed))
+ ;
+
+ return e;
+}
+
+static int gem_topology_get_param(int fd,
+ struct drm_i915_gem_context_param *p)
+{
+ if (igt_only_list_subtests())
+ return -ENODEV;
+
+ if (__gem_context_get_param(fd, p))
+ return -1; /* using default engine map */
+
+ if (!p->size)
+ return 0;
+
+ /* size will store the engine count */
+ p->size = (p->size - sizeof(struct i915_context_param_engines)) /
+ (offsetof(struct i915_context_param_engines,
+ engines[1]) -
+ sizeof(struct i915_context_param_engines));
+
+ igt_assert_f(p->size <= GEM_MAX_ENGINES, "unsupported engine count\n");
+
+ return 0;
+}
+
+struct intel_engine_data intel_init_engine_list(int fd, uint32_t ctx_id)
+{
+ DEFINE_CONTEXT_ENGINES_PARAM(engines, param, ctx_id, GEM_MAX_ENGINES);
+ struct intel_engine_data engine_data = { };
+ int i;
+
+ if (gem_topology_get_param(fd, &param)) {
+ /* if kernel does not support engine/context mapping */
+ const struct intel_execution_engine2 *e2;
+
+ igt_debug("using pre-allocated engine list\n");
+
+ __for_each_static_engine(e2) {
+ struct intel_execution_engine2 *__e2 =
+ &engine_data.engines[engine_data.nengines];
+
+ __e2->name = e2->name;
+ __e2->instance = e2->instance;
+ __e2->class = e2->class;
+ __e2->flags = e2->flags;
+ __e2->is_virtual = false;
+
+ if (igt_only_list_subtests() ||
+ gem_has_ring(fd, e2->flags))
+ engine_data.nengines++;
+ }
+ return engine_data;
+ }
+
+ if (!param.size) {
+ query_engine_list(fd, &engine_data);
+ ctx_map_engines(fd, &engine_data, &param);
+ } else {
+ /* param.size contains the engine count */
+ for (i = 0; i < param.size; i++)
+ init_engine(&engine_data.engines[i],
+ engines.engines[i].engine_class,
+ engines.engines[i].engine_instance,
+ i);
+
+ engine_data.nengines = i;
+ }
+
+ return engine_data;
+}
+
+int gem_context_lookup_engine(int fd, uint64_t engine, uint32_t ctx_id,
+ struct intel_execution_engine2 *e)
+{
+ DEFINE_CONTEXT_ENGINES_PARAM(engines, param, ctx_id, GEM_MAX_ENGINES);
+
+ /* a bit paranoic */
+ igt_assert(e);
+
+ if (gem_topology_get_param(fd, &param) || !param.size)
+ return -EINVAL;
+
+ e->class = engines.engines[engine].engine_class;
+ e->instance = engines.engines[engine].engine_instance;
+
+ return 0;
+}
+
+void gem_context_set_all_engines(int fd, uint32_t ctx)
+{
+ DEFINE_CONTEXT_ENGINES_PARAM(engines, param, ctx, GEM_MAX_ENGINES);
+ struct intel_engine_data engine_data = { };
+
+ if (!gem_topology_get_param(fd, &param) && !param.size) {
+ query_engine_list(fd, &engine_data);
+ ctx_map_engines(fd, &engine_data, &param);
+ }
+}
+
+bool gem_has_engine_topology(int fd)
+{
+ struct drm_i915_gem_context_param param = {
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ };
+
+ return !__gem_context_get_param(fd, &param);
+}
diff --git a/lib/i915/gem_engine_topology.h b/lib/i915/gem_engine_topology.h
new file mode 100644
index 00000000..2415fd1e
--- /dev/null
+++ b/lib/i915/gem_engine_topology.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef GEM_ENGINE_TOPOLOGY_H
+#define GEM_ENGINE_TOPOLOGY_H
+
+#include "igt_gt.h"
+#include "i915_drm.h"
+
+#define GEM_MAX_ENGINES I915_EXEC_RING_MASK + 1
+
+struct intel_engine_data {
+ uint32_t nengines;
+ uint32_t n;
+ struct intel_execution_engine2 *current_engine;
+ struct intel_execution_engine2 engines[GEM_MAX_ENGINES];
+};
+
+bool gem_has_engine_topology(int fd);
+struct intel_engine_data intel_init_engine_list(int fd, uint32_t ctx_id);
+
+/* iteration functions */
+struct intel_execution_engine2 *
+intel_get_current_engine(struct intel_engine_data *ed);
+
+struct intel_execution_engine2 *
+intel_get_current_physical_engine(struct intel_engine_data *ed);
+
+void intel_next_engine(struct intel_engine_data *ed);
+
+int gem_context_lookup_engine(int fd, uint64_t engine, uint32_t ctx_id,
+ struct intel_execution_engine2 *e);
+
+void gem_context_set_all_engines(int fd, uint32_t ctx);
+
+#define __for_each_static_engine(e__) \
+ for ((e__) = intel_execution_engines2; (e__)->name; (e__)++)
+
+#define for_each_context_engine(fd__, ctx__, e__) \
+ for (struct intel_engine_data i__ = intel_init_engine_list(fd__, ctx__); \
+ ((e__) = intel_get_current_engine(&i__)); \
+ intel_next_engine(&i__))
+
+/* needs to replace "for_each_physical_engine" when conflicts are fixed */
+#define __for_each_physical_engine(fd__, e__) \
+ for (struct intel_engine_data i__ = intel_init_engine_list(fd__, 0); \
+ ((e__) = intel_get_current_physical_engine(&i__)); \
+ intel_next_engine(&i__))
+
+#endif /* GEM_ENGINE_TOPOLOGY_H */
diff --git a/lib/i915/gem_vm.c b/lib/i915/gem_vm.c
new file mode 100644
index 00000000..9a022a56
--- /dev/null
+++ b/lib/i915/gem_vm.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <errno.h>
+#include <string.h>
+
+#include "ioctl_wrappers.h"
+#include "drmtest.h"
+
+#include "i915/gem_vm.h"
+
+/**
+ * SECTION:gem_vm
+ * @short_description: Helpers for dealing with address spaces (vm/GTT)
+ * @title: GEM Virtual Memory
+ *
+ * This helper library contains functions used for handling gem address
+ * spaces.
+ */
+
+/**
+ * gem_has_vm:
+ * @i915: open i915 drm file descriptor
+ *
+ * Returns: whether VM creation is supported or not.
+ */
+bool gem_has_vm(int i915)
+{
+ uint32_t vm_id = 0;
+
+ __gem_vm_create(i915, &vm_id);
+ if (vm_id)
+ gem_vm_destroy(i915, vm_id);
+
+ return vm_id;
+}
+
+/**
+ * gem_require_vm:
+ * @i915: open i915 drm file descriptor
+ *
+ * This helper will automatically skip the test on platforms where address
+ * space creation is not available.
+ */
+void gem_require_vm(int i915)
+{
+ igt_require(gem_has_vm(i915));
+}
+
+int __gem_vm_create(int i915, uint32_t *vm_id)
+{
+ struct drm_i915_gem_vm_control ctl = {};
+ int err = 0;
+
+ if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_VM_CREATE, &ctl) == 0) {
+ *vm_id = ctl.vm_id;
+ } else {
+ err = -errno;
+ igt_assume(err != 0);
+ }
+
+ errno = 0;
+ return err;
+}
+
+/**
+ * gem_vm_create:
+ * @i915: open i915 drm file descriptor
+ *
+ * This wraps the VM_CREATE ioctl, which is used to allocate a new
+ * address space for use with GEM contexts.
+ *
+ * Returns: The id of the allocated address space.
+ */
+uint32_t gem_vm_create(int i915)
+{
+ uint32_t vm_id;
+
+ igt_assert_eq(__gem_vm_create(i915, &vm_id), 0);
+ igt_assert(vm_id != 0);
+
+ return vm_id;
+}
+
+int __gem_vm_destroy(int i915, uint32_t vm_id)
+{
+ struct drm_i915_gem_vm_control ctl = { .vm_id = vm_id };
+ int err = 0;
+
+ if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_VM_DESTROY, &ctl)) {
+ err = -errno;
+ igt_assume(err);
+ }
+
+ errno = 0;
+ return err;
+}
+
+/**
+ * gem_vm_destroy:
+ * @i915: open i915 drm file descriptor
+ * @vm_id: i915 VM id
+ *
+ * This wraps the VM_DESTROY ioctl, which is used to free an address space
+ * handle.
+ */
+void gem_vm_destroy(int i915, uint32_t vm_id)
+{
+ igt_assert_eq(__gem_vm_destroy(i915, vm_id), 0);
+}
diff --git a/lib/i915/gem_vm.h b/lib/i915/gem_vm.h
new file mode 100644
index 00000000..27af899d
--- /dev/null
+++ b/lib/i915/gem_vm.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef GEM_VM_H
+#define GEM_VM_H
+
+#include <stdint.h>
+
+bool gem_has_vm(int i915);
+void gem_require_vm(int i915);
+
+uint32_t gem_vm_create(int i915);
+int __gem_vm_create(int i915, uint32_t *vm_id);
+
+void gem_vm_destroy(int i915, uint32_t vm_id);
+int __gem_vm_destroy(int i915, uint32_t vm_id);
+
+#endif /* GEM_VM_H */
diff --git a/lib/igt.h b/lib/igt.h
index 5852d557..a6c4e44d 100644
--- a/lib/igt.h
+++ b/lib/igt.h
@@ -35,13 +35,13 @@
#include "igt_dummyload.h"
#include "igt_fb.h"
#include "igt_frame.h"
-#include "igt_alsa.h"
-#include "igt_audio.h"
#include "igt_gt.h"
#include "igt_kms.h"
#include "igt_pm.h"
#include "igt_stats.h"
#ifdef HAVE_CHAMELIUM
+#include "igt_alsa.h"
+#include "igt_audio.h"
#include "igt_chamelium.h"
#include "igt_chamelium_stream.h"
#endif
@@ -54,5 +54,6 @@
#include "media_spin.h"
#include "rendercopy.h"
#include "i915/gem_mman.h"
+#include "i915/gem_engine_topology.h"
#endif /* IGT_H */
diff --git a/lib/igt_alsa.c b/lib/igt_alsa.c
index a478686a..5b5980a9 100644
--- a/lib/igt_alsa.c
+++ b/lib/igt_alsa.c
@@ -27,7 +27,6 @@
#include "config.h"
#include <limits.h>
-#include <alsa/asoundlib.h>
#include "igt_alsa.h"
#include "igt_aux.h"
@@ -47,20 +46,13 @@
struct alsa {
snd_pcm_t *output_handles[HANDLES_MAX];
int output_handles_count;
+ snd_pcm_format_t output_format;
int output_sampling_rate;
int output_channels;
- int (*output_callback)(void *data, short *buffer, int samples);
+ int (*output_callback)(void *data, void *buffer, int samples);
void *output_callback_data;
int output_samples_trigger;
-
- snd_pcm_t *input_handle;
- int input_sampling_rate;
- int input_channels;
-
- int (*input_callback)(void *data, short *buffer, int samples);
- void *input_callback_data;
- int input_samples_trigger;
};
/**
@@ -250,40 +242,6 @@ int alsa_open_output(struct alsa *alsa, const char *device_name)
}
/**
- * alsa_open_input:
- * @alsa: The target alsa structure
- * @device_name: The name of the input device to open
- *
- * Open the ALSA input device whose name matches the provided name prefix.
- *
- * Returns: An integer equal to zero for success and negative for failure
- */
-int alsa_open_input(struct alsa *alsa, const char *device_name)
-{
- snd_pcm_t *handle;
- char *identifier;
- int ret;
-
- identifier = alsa_resolve_indentifier(device_name, 0);
-
- ret = snd_pcm_open(&handle, device_name, SND_PCM_STREAM_CAPTURE,
- SND_PCM_NONBLOCK);
- if (ret < 0)
- goto complete;
-
- igt_debug("Opened input %s\n", identifier);
-
- alsa->input_handle = handle;
-
- ret = 0;
-
-complete:
- free(identifier);
-
- return ret;
-}
-
-/**
* alsa_close_output:
* @alsa: The target alsa structure
*
@@ -308,26 +266,8 @@ void alsa_close_output(struct alsa *alsa)
alsa->output_callback = NULL;
}
-/**
- * alsa_close_output:
- * @alsa: The target alsa structure
- *
- * Close the open ALSA input.
- */
-void alsa_close_input(struct alsa *alsa)
-{
- snd_pcm_t *handle = alsa->input_handle;
- if (!handle)
- return;
-
- snd_pcm_close(handle);
- alsa->input_handle = NULL;
-
- alsa->input_callback = NULL;
-}
-
-static bool alsa_test_configuration(snd_pcm_t *handle, int channels,
- int sampling_rate)
+static bool alsa_test_configuration(snd_pcm_t *handle, snd_pcm_format_t fmt,
+ int channels, int sampling_rate)
{
snd_pcm_hw_params_t *params;
int ret;
@@ -341,6 +281,13 @@ static bool alsa_test_configuration(snd_pcm_t *handle, int channels,
if (ret < 0)
return false;
+ ret = snd_pcm_hw_params_test_format(handle, params, fmt);
+ if (ret < 0) {
+ igt_debug("Output device doesn't support the format %s\n",
+ snd_pcm_format_name(fmt));
+ return false;
+ }
+
ret = snd_pcm_hw_params_test_rate(handle, params, sampling_rate, 0);
if (ret < 0) {
snd_pcm_hw_params_get_rate_min(params, &min_rate, &min_rate_dir);
@@ -367,6 +314,7 @@ static bool alsa_test_configuration(snd_pcm_t *handle, int channels,
/**
* alsa_test_output_configuration:
* @alsa: The target alsa structure
+ * @fmt: The format to test
* @channels: The number of channels to test
* @sampling_rate: The sampling rate to test
*
@@ -375,8 +323,8 @@ static bool alsa_test_configuration(snd_pcm_t *handle, int channels,
*
* Returns: A boolean indicating whether the test succeeded
*/
-bool alsa_test_output_configuration(struct alsa *alsa, int channels,
- int sampling_rate)
+bool alsa_test_output_configuration(struct alsa *alsa, snd_pcm_format_t fmt,
+ int channels, int sampling_rate)
{
snd_pcm_t *handle;
bool ret;
@@ -385,7 +333,7 @@ bool alsa_test_output_configuration(struct alsa *alsa, int channels,
for (i = 0; i < alsa->output_handles_count; i++) {
handle = alsa->output_handles[i];
- ret = alsa_test_configuration(handle, channels, sampling_rate);
+ ret = alsa_test_configuration(handle, fmt, channels, sampling_rate);
if (!ret)
return false;
}
@@ -394,24 +342,6 @@ bool alsa_test_output_configuration(struct alsa *alsa, int channels,
}
/**
- * alsa_test_input_configuration:
- * @alsa: The target alsa structure
- * @channels: The number of channels to test
- * @sampling_rate: The sampling rate to test
- *
- * Test the input configuration specified by @channels and @sampling_rate
- * for the input device.
- *
- * Returns: A boolean indicating whether the test succeeded
- */
-bool alsa_test_input_configuration(struct alsa *alsa, int channels,
- int sampling_rate)
-{
- return alsa_test_configuration(alsa->input_handle, channels,
- sampling_rate);
-}
-
-/**
* alsa_configure_output:
* @alsa: The target alsa structure
* @channels: The number of channels to test
@@ -420,8 +350,8 @@ bool alsa_test_input_configuration(struct alsa *alsa, int channels,
* Configure the output devices with the configuration specified by @channels
* and @sampling_rate.
*/
-void alsa_configure_output(struct alsa *alsa, int channels,
- int sampling_rate)
+void alsa_configure_output(struct alsa *alsa, snd_pcm_format_t fmt,
+ int channels, int sampling_rate)
{
snd_pcm_t *handle;
int ret;
@@ -432,45 +362,19 @@ void alsa_configure_output(struct alsa *alsa, int channels,
for (i = 0; i < alsa->output_handles_count; i++) {
handle = alsa->output_handles[i];
- ret = snd_pcm_set_params(handle, SND_PCM_FORMAT_S16_LE,
+ ret = snd_pcm_set_params(handle, fmt,
SND_PCM_ACCESS_RW_INTERLEAVED,
channels, sampling_rate,
soft_resample, latency);
igt_assert(ret >= 0);
}
+ alsa->output_format = fmt;
alsa->output_channels = channels;
alsa->output_sampling_rate = sampling_rate;
}
/**
- * alsa_configure_input:
- * @alsa: The target alsa structure
- * @channels: The number of channels to test
- * @sampling_rate: The sampling rate to test
- *
- * Configure the input device with the configuration specified by @channels
- * and @sampling_rate.
- */
-void alsa_configure_input(struct alsa *alsa, int channels,
- int sampling_rate)
-{
- snd_pcm_t *handle;
- int ret;
-
- handle = alsa->input_handle;
-
- ret = snd_pcm_set_params(handle, SND_PCM_FORMAT_S16_LE,
- SND_PCM_ACCESS_RW_INTERLEAVED, channels,
- sampling_rate, 0, 0);
- igt_assert(ret >= 0);
-
- alsa->input_channels = channels;
- alsa->input_sampling_rate = sampling_rate;
-
-}
-
-/**
* alsa_register_output_callback:
* @alsa: The target alsa structure
* @callback: The callback function to call to fill output data
@@ -484,7 +388,7 @@ void alsa_configure_input(struct alsa *alsa, int channels,
* for failure.
*/
void alsa_register_output_callback(struct alsa *alsa,
- int (*callback)(void *data, short *buffer, int samples),
+ int (*callback)(void *data, void *buffer, int samples),
void *callback_data, int samples_trigger)
{
alsa->output_callback = callback;
@@ -493,28 +397,6 @@ void alsa_register_output_callback(struct alsa *alsa,
}
/**
- * alsa_register_input_callback:
- * @alsa: The target alsa structure
- * @callback: The callback function to call when input data is available
- * @callback_data: The data pointer to pass to the callback function
- * @samples_trigger: The required number of samples to trigger the callback
- *
- * Register a callback function to be called when input data is available during
- * a run. The callback is called when @samples_trigger samples are available.
- *
- * The callback should return an integer equal to zero for success, negative for
- * failure and positive to indicate that the run should stop.
- */
-void alsa_register_input_callback(struct alsa *alsa,
- int (*callback)(void *data, short *buffer, int samples),
- void *callback_data, int samples_trigger)
-{
- alsa->input_callback = callback;
- alsa->input_callback_data = callback_data;
- alsa->input_samples_trigger = samples_trigger;
-}
-
-/**
* alsa_run:
* @alsa: The target alsa structure
* @duration_ms: The maximum duration of the run in milliseconds, or -1 for an
@@ -529,19 +411,14 @@ void alsa_register_input_callback(struct alsa *alsa,
int alsa_run(struct alsa *alsa, int duration_ms)
{
snd_pcm_t *handle;
- short *output_buffer = NULL;
- short *input_buffer = NULL;
+ char *output_buffer = NULL;
int output_limit;
int output_total = 0;
int output_counts[alsa->output_handles_count];
bool output_ready = false;
int output_channels;
+ int bytes_per_sample;
int output_trigger;
- int input_limit;
- int input_total = 0;
- int input_count = 0;
- int input_channels;
- int input_trigger;
bool reached;
int index;
int count;
@@ -551,17 +428,10 @@ int alsa_run(struct alsa *alsa, int duration_ms)
output_limit = alsa->output_sampling_rate * duration_ms / 1000;
output_channels = alsa->output_channels;
+ bytes_per_sample = snd_pcm_format_physical_width(alsa->output_format) / 8;
output_trigger = alsa->output_samples_trigger;
- output_buffer = malloc(sizeof(short) * output_channels *
- output_trigger);
-
- if (alsa->input_callback) {
- input_limit = alsa->input_sampling_rate * duration_ms / 1000;
- input_trigger = alsa->input_samples_trigger;
- input_channels = alsa->input_channels;
- input_buffer = malloc(sizeof(short) * input_channels *
- input_trigger);
- }
+ output_buffer = malloc(output_channels * output_trigger *
+ bytes_per_sample);
do {
reached = true;
@@ -595,7 +465,7 @@ int alsa_run(struct alsa *alsa, int duration_ms)
count = avail < count ? avail : count;
ret = snd_pcm_writei(handle,
- &output_buffer[index],
+ &output_buffer[index * bytes_per_sample],
count);
if (ret < 0) {
ret = snd_pcm_recover(handle,
@@ -627,63 +497,12 @@ int alsa_run(struct alsa *alsa, int duration_ms)
output_total += output_trigger;
}
-
- if (alsa->input_callback &&
- (input_limit < 0 || input_total < input_limit)) {
- reached = false;
-
- if (input_count == input_trigger) {
- input_count = 0;
-
- ret = alsa->input_callback(alsa->input_callback_data,
- input_buffer,
- input_trigger);
- if (ret != 0)
- goto complete;
- }
-
- handle = alsa->input_handle;
-
- ret = snd_pcm_avail(handle);
- if (input_count < input_trigger &&
- (ret > 0 || input_total == 0)) {
- index = input_count * input_channels;
- count = input_trigger - input_count;
- avail = snd_pcm_avail(handle);
-
- count = avail > 0 && avail < count ? avail :
- count;
-
- ret = snd_pcm_readi(handle,
- &input_buffer[index],
- count);
- if (ret == -EAGAIN) {
- ret = 0;
- } else if (ret < 0) {
- ret = snd_pcm_recover(handle, ret, 0);
- if (ret < 0) {
- igt_debug("snd_pcm_recover after snd_pcm_readi failed");
- goto complete;
- }
- }
-
- input_count += ret;
- input_total += ret;
- } else if (input_count < input_trigger && ret < 0) {
- ret = snd_pcm_recover(handle, ret, 0);
- if (ret < 0) {
- igt_debug("snd_pcm_recover failed");
- goto complete;
- }
- }
- }
} while (!reached);
ret = 0;
complete:
free(output_buffer);
- free(input_buffer);
return ret;
}
diff --git a/lib/igt_alsa.h b/lib/igt_alsa.h
index 5c804b46..1ece9f52 100644
--- a/lib/igt_alsa.h
+++ b/lib/igt_alsa.h
@@ -29,6 +29,7 @@
#include "config.h"
+#include <alsa/asoundlib.h>
#include <stdbool.h>
struct alsa;
@@ -36,23 +37,14 @@ struct alsa;
bool alsa_has_exclusive_access(void);
struct alsa *alsa_init(void);
int alsa_open_output(struct alsa *alsa, const char *device_name);
-int alsa_open_input(struct alsa *alsa, const char *device_name);
void alsa_close_output(struct alsa *alsa);
-void alsa_close_input(struct alsa *alsa);
-bool alsa_test_output_configuration(struct alsa *alsa, int channels,
- int sampling_rate);
-bool alsa_test_input_configuration(struct alsa *alsa, int channels,
- int sampling_rate);
-void alsa_configure_output(struct alsa *alsa, int channels,
- int sampling_rate);
-void alsa_configure_input(struct alsa *alsa, int channels,
- int sampling_rate);
+bool alsa_test_output_configuration(struct alsa *alsa, snd_pcm_format_t dmt,
+ int channels, int sampling_rate);
+void alsa_configure_output(struct alsa *alsa, snd_pcm_format_t fmt,
+ int channels, int sampling_rate);
void alsa_register_output_callback(struct alsa *alsa,
- int (*callback)(void *data, short *buffer, int samples),
+ int (*callback)(void *data, void *buffer, int samples),
void *callback_data, int samples_trigger);
-void alsa_register_input_callback(struct alsa *alsa,
- int (*callback)(void *data, short *buffer, int samples),
- void *callback_data, int samples_trigger);
int alsa_run(struct alsa *alsa, int duration_ms);
#endif
diff --git a/lib/igt_audio.c b/lib/igt_audio.c
index fd8cf07c..e0b1bafe 100644
--- a/lib/igt_audio.c
+++ b/lib/igt_audio.c
@@ -36,6 +36,20 @@
#include "igt_core.h"
#define FREQS_MAX 64
+#define CHANNELS_MAX 8
+#define SYNTHESIZE_AMPLITUDE 0.9
+#define SYNTHESIZE_ACCURACY 0.2
+/** MIN_FREQ: minimum frequency that audio_signal can generate.
+ *
+ * To make sure the audio signal doesn't contain noise, #audio_signal_detect
+ * checks that low frequencies have a power lower than #NOISE_THRESHOLD.
+ * However if too-low frequencies are generated, noise detection can fail.
+ *
+ * This value should be at least 100Hz plus one bin. Best is not to change this
+ * value.
+ */
+#define MIN_FREQ 200 /* Hz */
+#define NOISE_THRESHOLD 0.0005
/**
* SECTION:igt_audio
@@ -51,7 +65,7 @@ struct audio_signal_freq {
int freq;
int channel;
- int16_t *period;
+ double *period;
size_t period_len;
int offset;
};
@@ -77,12 +91,12 @@ struct audio_signal *audio_signal_init(int channels, int sampling_rate)
{
struct audio_signal *signal;
- signal = malloc(sizeof(struct audio_signal));
- memset(signal, 0, sizeof(struct audio_signal));
+ igt_assert(channels > 0);
+ igt_assert(channels <= CHANNELS_MAX);
+ signal = calloc(1, sizeof(struct audio_signal));
signal->sampling_rate = sampling_rate;
signal->channels = channels;
-
return signal;
}
@@ -105,6 +119,7 @@ int audio_signal_add_frequency(struct audio_signal *signal, int frequency,
igt_assert(index < FREQS_MAX);
igt_assert(channel < signal->channels);
+ igt_assert(frequency >= MIN_FREQ);
/* Stay within the Nyquist–Shannon sampling theorem. */
if (frequency > signal->sampling_rate / 2) {
@@ -142,7 +157,7 @@ int audio_signal_add_frequency(struct audio_signal *signal, int frequency,
*/
void audio_signal_synthesize(struct audio_signal *signal)
{
- int16_t *period;
+ double *period;
double value;
size_t period_len;
int freq;
@@ -152,13 +167,13 @@ void audio_signal_synthesize(struct audio_signal *signal)
freq = signal->freqs[i].freq;
period_len = signal->sampling_rate / freq;
- period = calloc(1, period_len * sizeof(int16_t));
+ period = calloc(period_len, sizeof(double));
for (j = 0; j < period_len; j++) {
value = 2.0 * M_PI * freq / signal->sampling_rate * j;
- value = sin(value) * INT16_MAX / signal->freqs_count;
+ value = sin(value) * SYNTHESIZE_AMPLITUDE;
- period[j] = (int16_t) value;
+ period[j] = value;
}
signal->freqs[i].period = period;
@@ -195,6 +210,49 @@ void audio_signal_reset(struct audio_signal *signal)
signal->freqs_count = 0;
}
+static size_t audio_signal_count_freqs(struct audio_signal *signal, int channel)
+{
+ size_t n, i;
+ struct audio_signal_freq *freq;
+
+ n = 0;
+ for (i = 0; i < signal->freqs_count; i++) {
+ freq = &signal->freqs[i];
+ if (freq->channel < 0 || freq->channel == channel)
+ n++;
+ }
+
+ return n;
+}
+
+/** audio_sanity_check:
+ *
+ * Make sure our generated signal is not messed up. In particular, make sure
+ * the maximum reaches a reasonable value but doesn't exceed our
+ * SYNTHESIZE_AMPLITUDE limit. Same for the minimum.
+ *
+ * We want the signal to be powerful enough to be able to hear something. We
+ * want the signal not to reach 1.0 so that we're sure it won't get capped by
+ * the audio card or the receiver.
+ */
+static void audio_sanity_check(double *samples, size_t samples_len)
+{
+ size_t i;
+ double min = 0, max = 0;
+
+ for (i = 0; i < samples_len; i++) {
+ if (samples[i] < min)
+ min = samples[i];
+ if (samples[i] > max)
+ max = samples[i];
+ }
+
+ igt_assert(-SYNTHESIZE_AMPLITUDE <= min);
+ igt_assert(min <= -SYNTHESIZE_AMPLITUDE + SYNTHESIZE_ACCURACY);
+ igt_assert(SYNTHESIZE_AMPLITUDE - SYNTHESIZE_ACCURACY <= max);
+ igt_assert(max <= SYNTHESIZE_AMPLITUDE);
+}
+
/**
* audio_signal_fill:
* @signal: The target signal structure
@@ -202,19 +260,27 @@ void audio_signal_reset(struct audio_signal *signal)
* @samples: The number of samples to fill
*
* Fill the requested number of samples to the target buffer with the audio
- * signal data (in interleaved S16_LE format), at the requested sampling rate
+ * signal data (in interleaved double format), at the requested sampling rate
* and number of channels.
+ *
+ * Each sample is normalized (ie. between 0 and 1).
*/
-void audio_signal_fill(struct audio_signal *signal, int16_t *buffer,
- size_t buffer_len)
+void audio_signal_fill(struct audio_signal *signal, double *buffer,
+ size_t samples)
{
- int16_t *destination, *source;
+ double *dst, *src;
struct audio_signal_freq *freq;
int total;
int count;
int i, j, k;
+ size_t freqs_per_channel[CHANNELS_MAX];
+
+ memset(buffer, 0, sizeof(double) * signal->channels * samples);
- memset(buffer, 0, sizeof(int16_t) * signal->channels * buffer_len);
+ for (i = 0; i < signal->channels; i++) {
+ freqs_per_channel[i] = audio_signal_count_freqs(signal, i);
+ igt_assert(freqs_per_channel[i] > 0);
+ }
for (i = 0; i < signal->freqs_count; i++) {
freq = &signal->freqs[i];
@@ -222,13 +288,13 @@ void audio_signal_fill(struct audio_signal *signal, int16_t *buffer,
igt_assert(freq->period);
- while (total < buffer_len) {
- source = freq->period + freq->offset;
- destination = buffer + total * signal->channels;
+ while (total < samples) {
+ src = freq->period + freq->offset;
+ dst = buffer + total * signal->channels;
count = freq->period_len - freq->offset;
- if (count > buffer_len - total)
- count = buffer_len - total;
+ if (count > samples - total)
+ count = samples - total;
freq->offset += count;
freq->offset %= freq->period_len;
@@ -238,24 +304,36 @@ void audio_signal_fill(struct audio_signal *signal, int16_t *buffer,
if (freq->channel >= 0 &&
freq->channel != k)
continue;
- destination[j * signal->channels + k] += source[j];
+ dst[j * signal->channels + k] +=
+ src[j] / freqs_per_channel[k];
}
}
total += count;
}
}
+
+ audio_sanity_check(buffer, signal->channels * samples);
+}
+
+/* See https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows */
+static double hann_window(double v, size_t i, size_t N)
+{
+ return v * 0.5 * (1 - cos(2.0 * M_PI * (double) i / (double) N));
}
/**
* Checks that frequencies specified in signal, and only those, are included
* in the input data.
*
- * sampling_rate is given in Hz. data_len is the number of elements in data.
+ * sampling_rate is given in Hz. samples_len is the number of elements in
+ * samples.
*/
bool audio_signal_detect(struct audio_signal *signal, int sampling_rate,
- int channel, double *data, size_t data_len)
+ int channel, const double *samples, size_t samples_len)
{
+ double *data;
+ size_t data_len = samples_len;
size_t bin_power_len = data_len / 2 + 1;
double bin_power[bin_power_len];
bool detected[FREQS_MAX];
@@ -264,15 +342,31 @@ bool audio_signal_detect(struct audio_signal *signal, int sampling_rate,
size_t i, j;
bool above, success;
+ /* gsl will mutate the array in-place, so make a copy */
+ data = malloc(samples_len * sizeof(double));
+ memcpy(data, samples, samples_len * sizeof(double));
+
+ /* Apply a Hann window to the input signal, to reduce frequency leaks
+ * due to the endpoints of the signal being discontinuous.
+ *
+ * For more info:
+ * - https://download.ni.com/evaluation/pxi/Understanding%20FFTs%20and%20Windowing.pdf
+ * - https://en.wikipedia.org/wiki/Window_function
+ */
+ for (i = 0; i < data_len; i++)
+ data[i] = hann_window(data[i], i, data_len);
+
/* Allowed error in Hz due to FFT step */
freq_accuracy = sampling_rate / data_len;
igt_debug("Allowed freq. error: %d Hz\n", freq_accuracy);
ret = gsl_fft_real_radix2_transform(data, 1, data_len);
- igt_assert(ret == 0);
+ if (ret != 0) {
+ free(data);
+ igt_assert(0);
+ }
- /* Compute the power received by every bin of the FFT, and record the
- * maximum power received as a way to normalize all the others.
+ /* Compute the power received by every bin of the FFT.
*
* For i < data_len / 2, the real part of the i-th term is stored at
* data[i] and its imaginary part is stored at data[data_len - i].
@@ -282,15 +376,36 @@ bool audio_signal_detect(struct audio_signal *signal, int sampling_rate,
* The power is encoded as the magnitude of the complex number and the
* phase is encoded as its angle.
*/
- max = 0;
bin_power[0] = data[0];
for (i = 1; i < bin_power_len - 1; i++) {
bin_power[i] = hypot(data[i], data[data_len - i]);
- if (bin_power[i] > max)
- max = bin_power[i];
}
bin_power[bin_power_len - 1] = data[data_len / 2];
+ /* Normalize the power */
+ for (i = 0; i < bin_power_len; i++)
+ bin_power[i] = 2 * bin_power[i] / data_len;
+
+ /* Detect noise with a threshold on the power of low frequencies */
+ for (i = 0; i < bin_power_len; i++) {
+ freq = sampling_rate * i / data_len;
+ if (freq > MIN_FREQ - 100)
+ break;
+ if (bin_power[i] > NOISE_THRESHOLD) {
+ igt_debug("Noise level too high: freq=%d power=%f\n",
+ freq, bin_power[i]);
+ return false;
+ }
+ }
+
+ /* Record the maximum power received as a way to normalize all the
+ * others. */
+ max = NAN;
+ for (i = 0; i < bin_power_len; i++) {
+ if (isnan(max) || bin_power[i] > max)
+ max = bin_power[i];
+ }
+
for (i = 0; i < signal->freqs_count; i++)
detected[i] = false;
@@ -372,11 +487,19 @@ bool audio_signal_detect(struct audio_signal *signal, int sampling_rate,
}
}
+ free(data);
+
return success;
}
/**
- * Extracts a single channel from a multi-channel S32_LE input buffer.
+ * audio_extract_channel_s32_le: extracts a single channel from a multi-channel
+ * S32_LE input buffer.
+ *
+ * If dst_cap is zero, no copy is performed. This can be used to compute the
+ * minimum required capacity.
+ *
+ * Returns: the number of samples extracted.
*/
size_t audio_extract_channel_s32_le(double *dst, size_t dst_cap,
int32_t *src, size_t src_len,
@@ -387,13 +510,58 @@ size_t audio_extract_channel_s32_le(double *dst, size_t dst_cap,
igt_assert(channel < n_channels);
igt_assert(src_len % n_channels == 0);
dst_len = src_len / n_channels;
+ if (dst_cap == 0)
+ return dst_len;
+
igt_assert(dst_len <= dst_cap);
for (i = 0; i < dst_len; i++)
- dst[i] = (double) src[i * n_channels + channel];
+ dst[i] = (double) src[i * n_channels + channel] / INT32_MAX;
return dst_len;
}
+static void audio_convert_to_s16_le(int16_t *dst, double *src, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; ++i)
+ dst[i] = INT16_MAX * src[i];
+}
+
+static void audio_convert_to_s24_le(int32_t *dst, double *src, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; ++i)
+ dst[i] = 0x7FFFFF * src[i];
+}
+
+static void audio_convert_to_s32_le(int32_t *dst, double *src, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; ++i)
+ dst[i] = INT32_MAX * src[i];
+}
+
+void audio_convert_to(void *dst, double *src, size_t len,
+ snd_pcm_format_t format)
+{
+ switch (format) {
+ case SND_PCM_FORMAT_S16_LE:
+ audio_convert_to_s16_le(dst, src, len);
+ break;
+ case SND_PCM_FORMAT_S24_LE:
+ audio_convert_to_s24_le(dst, src, len);
+ break;
+ case SND_PCM_FORMAT_S32_LE:
+ audio_convert_to_s32_le(dst, src, len);
+ break;
+ default:
+ assert(false); /* unreachable */
+ }
+}
+
#define RIFF_TAG "RIFF"
#define WAVE_TAG "WAVE"
#define FMT_TAG "fmt "
diff --git a/lib/igt_audio.h b/lib/igt_audio.h
index 466e772a..5c910c27 100644
--- a/lib/igt_audio.h
+++ b/lib/igt_audio.h
@@ -32,6 +32,8 @@
#include <stdbool.h>
#include <stdint.h>
+#include <alsa/asoundlib.h>
+
struct audio_signal;
struct audio_signal *audio_signal_init(int channels, int sampling_rate);
@@ -40,13 +42,15 @@ int audio_signal_add_frequency(struct audio_signal *signal, int frequency,
int channel);
void audio_signal_synthesize(struct audio_signal *signal);
void audio_signal_reset(struct audio_signal *signal);
-void audio_signal_fill(struct audio_signal *signal, int16_t *buffer,
- size_t buffer_len);
+void audio_signal_fill(struct audio_signal *signal, double *buffer,
+ size_t samples);
bool audio_signal_detect(struct audio_signal *signal, int sampling_rate,
- int channel, double *data, size_t data_len);
+ int channel, const double *samples, size_t samples_len);
size_t audio_extract_channel_s32_le(double *dst, size_t dst_cap,
int32_t *src, size_t src_len,
int n_channels, int channel);
+void audio_convert_to(void *dst, double *src, size_t len,
+ snd_pcm_format_t format);
int audio_create_wav_file_s32_le(const char *qualifier, uint32_t sample_rate,
uint16_t channels, char **path);
diff --git a/lib/igt_aux.c b/lib/igt_aux.c
index 1a70edcc..578f8579 100644
--- a/lib/igt_aux.c
+++ b/lib/igt_aux.c
@@ -49,6 +49,7 @@
#include <sys/utsname.h>
#include <termios.h>
#include <assert.h>
+#include <grp.h>
#include <proc/readproc.h>
#include <libudev.h>
@@ -957,13 +958,15 @@ int igt_get_autoresume_delay(enum igt_suspend_state state)
*/
void igt_drop_root(void)
{
- igt_assert(getuid() == 0);
+ igt_assert_eq(getuid(), 0);
- igt_assert(setgid(2) == 0);
- igt_assert(setuid(2) == 0);
+ igt_assert_eq(setgroups(0, NULL), 0);
+ igt_assert_eq(setgid(2), 0);
+ igt_assert_eq(setuid(2), 0);
- igt_assert(getgid() == 2);
- igt_assert(getuid() == 2);
+ igt_assert_eq(getgroups(0, NULL), 0);
+ igt_assert_eq(getgid(), 2);
+ igt_assert_eq(getuid(), 2);
}
/**
diff --git a/lib/igt_aux.h b/lib/igt_aux.h
index 67e8fbf4..04d22904 100644
--- a/lib/igt_aux.h
+++ b/lib/igt_aux.h
@@ -32,11 +32,20 @@
#include <stdbool.h>
#include <stddef.h>
#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+#ifdef __linux__
+# include <sys/syscall.h>
+#endif
#include <i915/gem_submission.h>
/* signal interrupt helpers */
-#define gettid() syscall(__NR_gettid)
+#ifdef __linux__
+# ifndef HAVE_GETTID
+# define gettid() (pid_t)(syscall(__NR_gettid))
+# endif
+#endif
#define sigev_notify_thread_id _sigev_un._tid
/* auxialiary igt helpers from igt_aux.c */
diff --git a/lib/igt_chamelium.c b/lib/igt_chamelium.c
index ffc68f35..b83ff395 100644
--- a/lib/igt_chamelium.c
+++ b/lib/igt_chamelium.c
@@ -38,6 +38,7 @@
#include "igt_chamelium.h"
#include "igt_core.h"
#include "igt_aux.h"
+#include "igt_edid.h"
#include "igt_frame.h"
#include "igt_list.h"
#include "igt_kms.h"
@@ -117,7 +118,7 @@ struct chamelium {
int drm_fd;
- struct chamelium_edid *edids;
+ struct igt_list edids;
struct chamelium_port *ports;
int port_count;
};
@@ -265,14 +266,13 @@ static void *chamelium_fsm_mon(void *data)
return NULL;
}
-static xmlrpc_value *chamelium_rpc(struct chamelium *chamelium,
- struct chamelium_port *fsm_port,
- const char *method_name,
- const char *format_str,
- ...)
+static xmlrpc_value *__chamelium_rpc_va(struct chamelium *chamelium,
+ struct chamelium_port *fsm_port,
+ const char *method_name,
+ const char *format_str,
+ va_list va_args)
{
- xmlrpc_value *res;
- va_list va_args;
+ xmlrpc_value *res = NULL;
struct fsm_monitor_args monitor_args;
pthread_t fsm_thread_id;
@@ -296,17 +296,50 @@ static xmlrpc_value *chamelium_rpc(struct chamelium *chamelium,
&monitor_args);
}
- va_start(va_args, format_str);
xmlrpc_client_call2f_va(&chamelium->env, chamelium->client,
chamelium->url, method_name, format_str, &res,
va_args);
- va_end(va_args);
if (fsm_port) {
pthread_cancel(fsm_thread_id);
+ pthread_join(fsm_thread_id, NULL);
igt_cleanup_hotplug(monitor_args.mon);
}
+ return res;
+}
+
+static xmlrpc_value *__chamelium_rpc(struct chamelium *chamelium,
+ struct chamelium_port *fsm_port,
+ const char *method_name,
+ const char *format_str,
+ ...)
+{
+ xmlrpc_value *res;
+ va_list va_args;
+
+ va_start(va_args, format_str);
+ res = __chamelium_rpc_va(chamelium, fsm_port, method_name,
+ format_str, va_args);
+ va_end(va_args);
+
+ return res;
+}
+
+static xmlrpc_value *chamelium_rpc(struct chamelium *chamelium,
+ struct chamelium_port *fsm_port,
+ const char *method_name,
+ const char *format_str,
+ ...)
+{
+ xmlrpc_value *res;
+ va_list va_args;
+
+ va_start(va_args, format_str);
+ res = __chamelium_rpc_va(chamelium, fsm_port, method_name,
+ format_str, va_args);
+ va_end(va_args);
+
igt_assert_f(!chamelium->env.fault_occurred,
"Chamelium RPC call failed: %s\n",
chamelium->env.fault_string);
@@ -497,32 +530,30 @@ void chamelium_schedule_hpd_toggle(struct chamelium *chamelium,
* Uploads and registers a new EDID with the chamelium. The EDID will be
* destroyed automatically when #chamelium_deinit is called.
*
- * Returns: The ID of the EDID uploaded to the chamelium.
+ * Returns: An opaque pointer to the Chamelium EDID
*/
-int chamelium_new_edid(struct chamelium *chamelium, const unsigned char *edid)
+struct chamelium_edid *chamelium_new_edid(struct chamelium *chamelium,
+ const unsigned char *raw_edid)
{
xmlrpc_value *res;
- struct chamelium_edid *allocated_edid;
+ struct chamelium_edid *chamelium_edid;
int edid_id;
+ struct edid *edid = (struct edid *) raw_edid;
+ size_t edid_size = sizeof(struct edid) +
+ edid->extensions_len * sizeof(struct edid_ext);
res = chamelium_rpc(chamelium, NULL, "CreateEdid", "(6)",
- edid, EDID_LENGTH);
+ raw_edid, edid_size);
xmlrpc_read_int(&chamelium->env, res, &edid_id);
xmlrpc_DECREF(res);
- allocated_edid = malloc(sizeof(struct chamelium_edid));
- memset(allocated_edid, 0, sizeof(*allocated_edid));
-
- allocated_edid->id = edid_id;
- igt_list_init(&allocated_edid->link);
+ chamelium_edid = calloc(1, sizeof(struct chamelium_edid));
+ chamelium_edid->id = edid_id;
- if (chamelium->edids)
- igt_list_add(&chamelium->edids->link, &allocated_edid->link);
- else
- chamelium->edids = allocated_edid;
+ igt_list_add(&chamelium_edid->link, &chamelium->edids);
- return edid_id;
+ return chamelium_edid;
}
static void chamelium_destroy_edid(struct chamelium *chamelium, int edid_id)
@@ -535,7 +566,7 @@ static void chamelium_destroy_edid(struct chamelium *chamelium, int edid_id)
* chamelium_port_set_edid:
* @chamelium: The Chamelium instance to use
* @port: The port on the Chamelium to set the EDID on
- * @edid_id: The ID of an EDID on the chamelium created with
+ * @edid: The Chamelium EDID to set
* #chamelium_new_edid, or 0 to disable the EDID on the port
*
* Sets a port on the chamelium to use the specified EDID. This does not fire a
@@ -545,10 +576,11 @@ static void chamelium_destroy_edid(struct chamelium *chamelium, int edid_id)
* change.
*/
void chamelium_port_set_edid(struct chamelium *chamelium,
- struct chamelium_port *port, int edid_id)
+ struct chamelium_port *port,
+ struct chamelium_edid *edid)
{
xmlrpc_DECREF(chamelium_rpc(chamelium, NULL, "ApplyEdid", "(ii)",
- port->id, edid_id));
+ port->id, edid->id));
}
/**
@@ -931,6 +963,43 @@ int chamelium_get_captured_frame_count(struct chamelium *chamelium)
}
/**
+ * chamelium_supports_get_audio_format: check the Chamelium device supports
+ * retrieving the capture audio format.
+ */
+static bool chamelium_supports_get_audio_format(struct chamelium *chamelium)
+{
+ xmlrpc_value *res;
+
+ res = __chamelium_rpc(chamelium, NULL, "GetAudioFormat", "(i)", 3);
+ if (res)
+ xmlrpc_DECREF(res);
+
+ /* XML-RPC has a special code for unsupported methods
+ * (XMLRPC_NO_SUCH_METHOD_ERROR) however the Chamelium implementation
+ * doesn't return it. */
+ return (!chamelium->env.fault_occurred ||
+ strstr(chamelium->env.fault_string, "not supported") == NULL);
+}
+
+bool chamelium_has_audio_support(struct chamelium *chamelium,
+ struct chamelium_port *port)
+{
+ xmlrpc_value *res;
+ xmlrpc_bool has_support;
+
+ if (!chamelium_supports_get_audio_format(chamelium)) {
+ igt_debug("The Chamelium device doesn't support GetAudioFormat\n");
+ return false;
+ }
+
+ res = chamelium_rpc(chamelium, port, "HasAudioSupport", "(i)", port->id);
+ xmlrpc_read_bool(&chamelium->env, res, &has_support);
+ xmlrpc_DECREF(res);
+
+ return has_support;
+}
+
+/**
* chamelium_get_audio_channel_mapping:
* @chamelium: the Chamelium instance
* @port: the audio port
@@ -947,7 +1016,7 @@ int chamelium_get_captured_frame_count(struct chamelium *chamelium)
*/
void chamelium_get_audio_channel_mapping(struct chamelium *chamelium,
struct chamelium_port *port,
- int mapping[static 8])
+ int mapping[static CHAMELIUM_MAX_AUDIO_CHANNELS])
{
xmlrpc_value *res, *res_channel;
int res_len, i;
@@ -955,7 +1024,7 @@ void chamelium_get_audio_channel_mapping(struct chamelium *chamelium,
res = chamelium_rpc(chamelium, port, "GetAudioChannelMapping", "(i)",
port->id);
res_len = xmlrpc_array_size(&chamelium->env, res);
- igt_assert(res_len == 8);
+ igt_assert(res_len == CHAMELIUM_MAX_AUDIO_CHANNELS);
for (i = 0; i < res_len; i++) {
xmlrpc_array_read_item(&chamelium->env, res, i, &res_channel);
xmlrpc_read_int(&chamelium->env, res_channel, &mapping[i]);
@@ -985,8 +1054,10 @@ static void audio_format_from_xml(struct chamelium *chamelium,
if (rate)
xmlrpc_read_int(&chamelium->env, res_rate, rate);
- if (channels)
+ if (channels) {
xmlrpc_read_int(&chamelium->env, res_channel, channels);
+ igt_assert(*channels <= CHAMELIUM_MAX_AUDIO_CHANNELS);
+ }
xmlrpc_DECREF(res_channel);
xmlrpc_DECREF(res_sample_format);
@@ -1774,6 +1845,7 @@ struct chamelium *chamelium_init(int drm_fd)
memset(chamelium, 0, sizeof(*chamelium));
chamelium->drm_fd = drm_fd;
+ igt_list_init(&chamelium->edids);
/* Setup the libxmlrpc context */
xmlrpc_env_init(&chamelium->env);
@@ -1825,7 +1897,7 @@ void chamelium_deinit(struct chamelium *chamelium)
chamelium_plug(chamelium, &chamelium->ports[i]);
/* Destroy any EDIDs we created to make sure we don't leak them */
- igt_list_for_each_safe(pos, tmp, &chamelium->edids->link, link) {
+ igt_list_for_each_safe(pos, tmp, &chamelium->edids, link) {
chamelium_destroy_edid(chamelium, pos->id);
free(pos);
}
diff --git a/lib/igt_chamelium.h b/lib/igt_chamelium.h
index f47b84cb..ce9e9ced 100644
--- a/lib/igt_chamelium.h
+++ b/lib/igt_chamelium.h
@@ -59,6 +59,20 @@ struct chamelium_audio_file {
int channels;
};
+struct chamelium_edid;
+
+/**
+ * CHAMELIUM_DEFAULT_EDID: provide this ID to #chamelium_port_set_edid to use
+ * the default EDID.
+ */
+#define CHAMELIUM_DEFAULT_EDID 0
+
+/**
+ * CHAMELIUM_MAX_AUDIO_CHANNELS: the maximum number of audio capture channels
+ * supported by Chamelium.
+ */
+#define CHAMELIUM_MAX_AUDIO_CHANNELS 8
+
struct chamelium *chamelium_init(int drm_fd);
void chamelium_deinit(struct chamelium *chamelium);
void chamelium_reset(struct chamelium *chamelium);
@@ -86,9 +100,11 @@ void chamelium_fire_hpd_pulses(struct chamelium *chamelium,
void chamelium_schedule_hpd_toggle(struct chamelium *chamelium,
struct chamelium_port *port, int delay_ms,
bool rising_edge);
-int chamelium_new_edid(struct chamelium *chamelium, const unsigned char *edid);
+struct chamelium_edid *chamelium_new_edid(struct chamelium *chamelium,
+ const unsigned char *edid);
void chamelium_port_set_edid(struct chamelium *chamelium,
- struct chamelium_port *port, int edid_id);
+ struct chamelium_port *port,
+ struct chamelium_edid *edid);
bool chamelium_port_get_ddc_state(struct chamelium *chamelium,
struct chamelium_port *port);
void chamelium_port_set_ddc_state(struct chamelium *chamelium,
@@ -106,9 +122,11 @@ void chamelium_start_capture(struct chamelium *chamelium,
void chamelium_stop_capture(struct chamelium *chamelium, int frame_count);
void chamelium_capture(struct chamelium *chamelium, struct chamelium_port *port,
int x, int y, int w, int h, int frame_count);
+bool chamelium_has_audio_support(struct chamelium *chamelium,
+ struct chamelium_port *port);
void chamelium_get_audio_channel_mapping(struct chamelium *chamelium,
struct chamelium_port *port,
- int mapping[static 8]);
+ int mapping[static CHAMELIUM_MAX_AUDIO_CHANNELS]);
void chamelium_get_audio_format(struct chamelium *chamelium,
struct chamelium_port *port,
int *rate, int *channels);
diff --git a/lib/igt_core.c b/lib/igt_core.c
index 3141d923..6b9f0425 100644
--- a/lib/igt_core.c
+++ b/lib/igt_core.c
@@ -280,12 +280,16 @@ int test_children_sz;
bool test_child;
enum {
- OPT_LIST_SUBTESTS,
- OPT_RUN_SUBTEST,
- OPT_DESCRIPTION,
- OPT_DEBUG,
- OPT_INTERACTIVE_DEBUG,
- OPT_HELP = 'h'
+ /*
+ * Let the first values be used by individual tests so options don't
+ * conflict with core ones
+ */
+ OPT_LIST_SUBTESTS = 500,
+ OPT_RUN_SUBTEST,
+ OPT_DESCRIPTION,
+ OPT_DEBUG,
+ OPT_INTERACTIVE_DEBUG,
+ OPT_HELP = 'h'
};
static int igt_exitcode = IGT_EXIT_SUCCESS;
@@ -554,7 +558,7 @@ static void print_usage(const char *help_str, bool output_on_stderr)
" --debug[=log-domain]\n"
" --interactive-debug[=domain]\n"
" --help-description\n"
- " --help\n");
+ " --help|-h\n");
if (help_str)
fprintf(f, "%s\n", help_str);
}
@@ -666,16 +670,17 @@ static int common_init(int *argc, char **argv,
{
int c, option_index = 0, i, x;
static struct option long_options[] = {
- {"list-subtests", 0, 0, OPT_LIST_SUBTESTS},
- {"run-subtest", 1, 0, OPT_RUN_SUBTEST},
- {"help-description", 0, 0, OPT_DESCRIPTION},
- {"debug", optional_argument, 0, OPT_DEBUG},
- {"interactive-debug", optional_argument, 0, OPT_INTERACTIVE_DEBUG},
- {"help", 0, 0, OPT_HELP},
+ {"list-subtests", no_argument, NULL, OPT_LIST_SUBTESTS},
+ {"run-subtest", required_argument, NULL, OPT_RUN_SUBTEST},
+ {"help-description", no_argument, NULL, OPT_DESCRIPTION},
+ {"debug", optional_argument, NULL, OPT_DEBUG},
+ {"interactive-debug", optional_argument, NULL, OPT_INTERACTIVE_DEBUG},
+ {"help", no_argument, NULL, OPT_HELP},
{0, 0, 0, 0}
};
char *short_opts;
const char *std_short_opts = "h";
+ size_t std_short_opts_len = strlen(std_short_opts);
struct option *combined_opts;
int extra_opt_count;
int all_opt_count;
@@ -687,48 +692,58 @@ static int common_init(int *argc, char **argv,
if (strrchr(command_str, '/'))
command_str = strrchr(command_str, '/') + 1;
- /* First calculate space for all passed-in extra long options */
- all_opt_count = 0;
- while (extra_long_opts && extra_long_opts[all_opt_count].name) {
+ /* Check for conflicts and calculate space for passed-in extra long options */
+ for (extra_opt_count = 0; extra_long_opts && extra_long_opts[extra_opt_count].name; extra_opt_count++) {
+ char *conflicting_char;
/* check for conflicts with standard long option values */
- for (i = 0; long_options[i].name; i++)
- if (extra_long_opts[all_opt_count].val == long_options[i].val)
- igt_warn("Conflicting long option values between --%s and --%s\n",
- extra_long_opts[all_opt_count].name,
- long_options[i].name);
-
- /* check for conflicts with short options */
- if (extra_long_opts[all_opt_count].val != ':'
- && strchr(std_short_opts, extra_long_opts[all_opt_count].val)) {
- igt_warn("Conflicting long and short option values between --%s and -%s\n",
- extra_long_opts[all_opt_count].name,
- long_options[i].name);
- }
+ for (i = 0; long_options[i].name; i++) {
+ if (0 == strcmp(extra_long_opts[extra_opt_count].name, long_options[i].name)) {
+ igt_critical("Conflicting extra long option defined --%s\n", long_options[i].name);
+ assert(0);
+ }
+
+ if (extra_long_opts[extra_opt_count].val == long_options[i].val) {
+ igt_critical("Conflicting long option 'val' representation between --%s and --%s\n",
+ extra_long_opts[extra_opt_count].name,
+ long_options[i].name);
+ assert(0);
+ }
+ }
- all_opt_count++;
+ /* check for conflicts with standard short options */
+ if (extra_long_opts[extra_opt_count].val != ':'
+ && (conflicting_char = memchr(std_short_opts, extra_long_opts[extra_opt_count].val, std_short_opts_len))) {
+ igt_critical("Conflicting long and short option 'val' representation between --%s and -%c\n",
+ extra_long_opts[extra_opt_count].name,
+ *conflicting_char);
+ assert(0);
+ }
}
- extra_opt_count = all_opt_count;
/* check for conflicts in extra short options*/
for (i = 0; extra_short_opts && extra_short_opts[i]; i++) {
-
if (extra_short_opts[i] == ':')
continue;
/* check for conflicts with standard short options */
- if (strchr(std_short_opts, extra_short_opts[i]))
- igt_warn("Conflicting short option: -%c\n", std_short_opts[i]);
+ if (memchr(std_short_opts, extra_short_opts[i], std_short_opts_len)) {
+ igt_critical("Conflicting short option: -%c\n", std_short_opts[i]);
+ assert(0);
+ }
/* check for conflicts with standard long option values */
- for (x = 0; long_options[x].name; x++)
- if (long_options[x].val == extra_short_opts[i])
- igt_warn("Conflicting short option and long option value: --%s and -%c\n",
- long_options[x].name, extra_short_opts[i]);
+ for (x = 0; long_options[x].name; x++) {
+ if (long_options[x].val == extra_short_opts[i]) {
+ igt_critical("Conflicting short option and long option 'val' representation: --%s and -%c\n",
+ long_options[x].name, extra_short_opts[i]);
+ assert(0);
+ }
+ }
}
- all_opt_count += ARRAY_SIZE(long_options);
+ all_opt_count = extra_opt_count + ARRAY_SIZE(long_options);
combined_opts = malloc(all_opt_count * sizeof(*combined_opts));
if (extra_opt_count > 0)
@@ -847,8 +862,18 @@ out:
* additional knobs to tune when run manually like the number of rounds execute
* or the size of the allocated buffer objects.
*
- * Tests without special needs should just use igt_subtest_init() or use
- * #igt_main directly instead of their own main() function.
+ * Tests should use #igt_main_args instead of their own main()
+ * function and calling this function.
+ *
+ * The @help_str parameter is printed directly after the help text of
+ * standard arguments. The formatting of the string should be:
+ * - One line per option
+ * - Two spaces, option flag, tab character, help text, newline character
+ *
+ * Example: " -s\tBuffer size\n"
+ *
+ * The opt handler function must return #IGT_OPT_HANDLER_SUCCESS on
+ * successful handling, #IGT_OPT_HANDLER_ERROR on errors.
*
* Returns: Forwards any option parsing errors from getopt_long.
*/
@@ -881,7 +906,22 @@ enum igt_log_level igt_log_level = IGT_LOG_INFO;
* @handler_data: user data given to @extra_opt_handler when invoked
*
* This initializes a simple test without any support for subtests and allows
- * an arbitrary set of additional options.
+ * an arbitrary set of additional options. This is useful for tests which have
+ * additional knobs to tune when run manually like the number of rounds execute
+ * or the size of the allocated buffer objects.
+ *
+ * Tests should use #igt_simple_main_args instead of their own main()
+ * function and calling this function.
+ *
+ * The @help_str parameter is printed directly after the help text of
+ * standard arguments. The formatting of the string should be:
+ * - One line per option
+ * - Two spaces, option flag, tab character, help text, newline character
+ *
+ * Example: " -s\tBuffer size\n"
+ *
+ * The opt handler function must return #IGT_OPT_HANDLER_SUCCESS on
+ * successful handling, #IGT_OPT_HANDLER_ERROR on errors.
*/
void igt_simple_init_parse_opts(int *argc, char **argv,
const char *extra_short_opts,
@@ -903,9 +943,7 @@ bool __igt_run_subtest(const char *subtest_name)
{
int i;
- assert(!in_subtest);
- assert(!in_fixture);
- assert(test_with_subtests);
+ assert(!igt_can_fail());
/* check the subtest name only contains a-z, A-Z, 0-9, '-' and '_' */
for (i = 0; subtest_name[i] != '\0'; i++)
@@ -1733,6 +1771,7 @@ bool __igt_fork(void)
exit_handler_count = 0;
reset_helper_process_list();
oom_adjust_for_doom();
+ igt_unshare_spins();
return true;
default:
@@ -1992,7 +2031,7 @@ static void fatal_sig_handler(int sig)
#ifdef __linux__
/* Workaround cached PID and TID races on glibc and Bionic libc. */
pid_t pid = syscall(SYS_getpid);
- pid_t tid = syscall(SYS_gettid);
+ pid_t tid = gettid();
syscall(SYS_tgkill, pid, tid, sig);
#else
diff --git a/lib/igt_core.h b/lib/igt_core.h
index 47ffd9e7..88a95ec2 100644
--- a/lib/igt_core.h
+++ b/lib/igt_core.h
@@ -119,7 +119,7 @@ extern char *igt_frame_dump_path;
*
* Exit status indicating a test failure
*/
-#define IGT_EXIT_FAILURE 99
+#define IGT_EXIT_FAILURE 98
bool __igt_fixture(void);
void __igt_fixture_complete(void);
@@ -145,6 +145,8 @@ void __igt_fixture_end(void) __attribute__((noreturn));
/* subtest infrastructure */
jmp_buf igt_subtest_jmpbuf;
typedef int (*igt_opt_handler_t)(int opt, int opt_index, void *data);
+#define IGT_OPT_HANDLER_SUCCESS 0
+#define IGT_OPT_HANDLER_ERROR -2
#ifndef __GTK_DOC_IGNORE__ /* gtkdoc wants to document this forward decl */
struct option;
#endif
@@ -250,23 +252,38 @@ void __igt_subtest_group_restore(int);
__igt_subtest_group_restore(igt_tokencat(__save,__LINE__) ))
/**
- * igt_main:
+ * igt_main_args:
+ * @extra_short_opts: getopt_long() compliant list with additional short options
+ * @extra_long_opts: getopt_long() compliant list with additional long options
+ * @help_str: help string for the additional options
+ * @extra_opt_handler: handler for the additional options
+ * @handler_data: user data given to @extra_opt_handler when invoked
*
- * This is a magic control flow block used instead of a main() function for
- * tests with subtests. Open-coding the main() function is only recommended if
- * the test needs to parse additional command line arguments of its own.
+ * This is a magic control flow block used instead of a main()
+ * function for tests with subtests, along with custom command line
+ * arguments. The macro parameters are passed directly to
+ * #igt_subtest_init_parse_opts.
*/
-#define igt_main \
+#define igt_main_args(short_opts, long_opts, help_str, opt_handler, handler_data) \
static void igt_tokencat(__real_main, __LINE__)(void); \
int main(int argc, char **argv) { \
- igt_subtest_init_parse_opts(&argc, argv, NULL, NULL, NULL, \
- NULL, NULL); \
+ igt_subtest_init_parse_opts(&argc, argv, \
+ short_opts, long_opts, help_str, \
+ opt_handler, handler_data); \
igt_tokencat(__real_main, __LINE__)(); \
igt_exit(); \
} \
static void igt_tokencat(__real_main, __LINE__)(void) \
+/**
+ * igt_main:
+ *
+ * This is a magic control flow block used instead of a main() function for
+ * tests with subtests. Open-coding the main() function is not recommended.
+ */
+#define igt_main igt_main_args(NULL, NULL, NULL, NULL, NULL)
+
const char *igt_test_name(void);
void igt_simple_init_parse_opts(int *argc, char **argv,
const char *extra_short_opts,
@@ -289,23 +306,40 @@ void igt_simple_init_parse_opts(int *argc, char **argv,
#define igt_simple_init(argc, argv) \
igt_simple_init_parse_opts(&argc, argv, NULL, NULL, NULL, NULL, NULL);
+
/**
- * igt_simple_main:
+ * igt_simple_main_args:
+ * @extra_short_opts: getopt_long() compliant list with additional short options
+ * @extra_long_opts: getopt_long() compliant list with additional long options
+ * @help_str: help string for the additional options
+ * @extra_opt_handler: handler for the additional options
+ * @handler_data: user data given to @extra_opt_handler when invoked
*
- * This is a magic control flow block used instead of a main() function for
- * simple tests. Open-coding the main() function is only recommended if
- * the test needs to parse additional command line arguments of its own.
+ * This is a magic control flow block used instead of a main()
+ * function for simple tests with custom command line arguments. The
+ * macro parameters are passed directly to
+ * #igt_simple_init_parse_opts.
*/
-#define igt_simple_main \
+#define igt_simple_main_args(short_opts, long_opts, help_str, opt_handler, handler_data) \
static void igt_tokencat(__real_main, __LINE__)(void); \
int main(int argc, char **argv) { \
- igt_simple_init_parse_opts(&argc, argv, NULL, NULL, NULL, \
- NULL, NULL); \
+ igt_simple_init_parse_opts(&argc, argv, \
+ short_opts, long_opts, help_str, \
+ opt_handler, handler_data); \
igt_tokencat(__real_main, __LINE__)(); \
igt_exit(); \
} \
static void igt_tokencat(__real_main, __LINE__)(void) \
+
+/**
+ * igt_simple_main:
+ *
+ * This is a magic control flow block used instead of a main() function for
+ * simple tests. Open-coding the main() function is not recommended.
+ */
+#define igt_simple_main igt_simple_main_args(NULL, NULL, NULL, NULL, NULL)
+
/**
* igt_constructor:
*
diff --git a/lib/igt_debugfs.c b/lib/igt_debugfs.c
index dd229c09..82ce1834 100644
--- a/lib/igt_debugfs.c
+++ b/lib/igt_debugfs.c
@@ -1145,7 +1145,7 @@ int igt_get_stable_obj_count(int driver)
return obj_count;
}
-void igt_debugfs_dump(int device, const char *filename)
+void __igt_debugfs_dump(int device, const char *filename, int level)
{
char *contents;
int dir;
@@ -1154,6 +1154,6 @@ void igt_debugfs_dump(int device, const char *filename)
contents = igt_sysfs_get(dir, filename);
close(dir);
- igt_debug("%s:\n%s\n", filename, contents);
+ igt_log(IGT_LOG_DOMAIN, level, "%s:\n%s\n", filename, contents);
free(contents);
}
diff --git a/lib/igt_debugfs.h b/lib/igt_debugfs.h
index f8e57a6b..52520b3c 100644
--- a/lib/igt_debugfs.h
+++ b/lib/igt_debugfs.h
@@ -201,6 +201,7 @@ void igt_enable_prefault(void);
* gem buffer objects
*/
int igt_get_stable_obj_count(int driver);
-void igt_debugfs_dump(int device, const char *filename);
+void __igt_debugfs_dump(int device, const char *filename, int level);
+#define igt_debugfs_dump(d, f) __igt_debugfs_dump(d, f, IGT_LOG_DEBUG)
#endif /* __IGT_DEBUGFS_H__ */
diff --git a/lib/igt_device.c b/lib/igt_device.c
index 08f39c8b..9469e5de 100644
--- a/lib/igt_device.c
+++ b/lib/igt_device.c
@@ -32,13 +32,20 @@ int __igt_device_set_master(int fd)
int err;
err = 0;
- if (drmIoctl(fd, DRM_IOCTL_SET_MASTER, NULL))
+ if (drmIoctl(fd, DRM_IOCTL_SET_MASTER, NULL)) {
err = -errno;
+ igt_assume(err);
+ }
errno = 0;
return err;
}
+static void show_clients(int fd)
+{
+ __igt_debugfs_dump(fd, "clients", IGT_LOG_WARN);
+}
+
/**
* igt_device_set_master: Set the device fd to be DRM master
* @fd: the device
@@ -48,7 +55,7 @@ int __igt_device_set_master(int fd)
void igt_device_set_master(int fd)
{
if (__igt_device_set_master(fd)) {
- igt_debugfs_dump(fd, "clients");
+ show_clients(fd);
igt_require_f(__igt_device_set_master(fd) == 0,
"Can't become DRM master, "
"please check if no other DRM client is running.\n");
@@ -60,8 +67,10 @@ int __igt_device_drop_master(int fd)
int err;
err = 0;
- if (drmIoctl(fd, DRM_IOCTL_DROP_MASTER, NULL))
+ if (drmIoctl(fd, DRM_IOCTL_DROP_MASTER, NULL)) {
err = -errno;
+ igt_assume(err);
+ }
errno = 0;
return err;
@@ -81,7 +90,7 @@ void igt_device_drop_master(int fd)
return;
if (__igt_device_drop_master(fd)) {
- igt_debugfs_dump(fd, "clients");
+ show_clients(fd);
igt_assert_f(__igt_device_drop_master(fd) == 0,
"Failed to drop DRM master.\n");
}
diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index 15d64fad..0e06276a 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -39,6 +39,7 @@
#include "ioctl_wrappers.h"
#include "sw_sync.h"
#include "igt_vgem.h"
+#include "i915/gem_engine_topology.h"
#include "i915/gem_mman.h"
/**
@@ -72,12 +73,12 @@ emit_recursive_batch(igt_spin_t *spin,
int fd, const struct igt_spin_factory *opts)
{
#define SCRATCH 0
-#define BATCH 1
+#define BATCH IGT_SPIN_BATCH
const int gen = intel_gen(intel_get_drm_devid(fd));
struct drm_i915_gem_relocation_entry relocs[2], *r;
struct drm_i915_gem_execbuffer2 *execbuf;
struct drm_i915_gem_exec_object2 *obj;
- unsigned int engines[16];
+ unsigned int flags[GEM_MAX_ENGINES];
unsigned int nengine;
int fence_fd = -1;
uint32_t *cs, *batch;
@@ -85,17 +86,17 @@ emit_recursive_batch(igt_spin_t *spin,
nengine = 0;
if (opts->engine == ALL_ENGINES) {
- unsigned int engine;
+ struct intel_execution_engine2 *engine;
- for_each_physical_engine(fd, engine) {
+ for_each_context_engine(fd, opts->ctx, engine) {
if (opts->flags & IGT_SPIN_POLL_RUN &&
- !gem_can_store_dword(fd, engine))
+ !gem_class_can_store_dword(fd, engine->class))
continue;
- engines[nengine++] = engine;
+ flags[nengine++] = engine->flags;
}
} else {
- engines[nengine++] = opts->engine;
+ flags[nengine++] = opts->engine;
}
igt_require(nengine);
@@ -237,7 +238,7 @@ emit_recursive_batch(igt_spin_t *spin,
for (i = 0; i < nengine; i++) {
execbuf->flags &= ~ENGINE_MASK;
- execbuf->flags |= engines[i];
+ execbuf->flags |= flags[i];
gem_execbuf_wr(fd, execbuf);
@@ -261,12 +262,11 @@ emit_recursive_batch(igt_spin_t *spin,
igt_assert_lt(cs - batch, BATCH_SIZE / sizeof(*cs));
/* Make it easier for callers to resubmit. */
-
- obj[BATCH].relocation_count = 0;
- obj[BATCH].relocs_ptr = 0;
-
- obj[SCRATCH].flags = EXEC_OBJECT_PINNED;
- obj[BATCH].flags = EXEC_OBJECT_PINNED;
+ for (i = 0; i < ARRAY_SIZE(spin->obj); i++) {
+ spin->obj[i].relocation_count = 0;
+ spin->obj[i].relocs_ptr = 0;
+ spin->obj[i].flags = EXEC_OBJECT_PINNED;
+ }
spin->cmd_precondition = *spin->condition;
@@ -316,9 +316,19 @@ igt_spin_factory(int fd, const struct igt_spin_factory *opts)
igt_require_gem(fd);
if (opts->engine != ALL_ENGINES) {
- gem_require_ring(fd, opts->engine);
+ struct intel_execution_engine2 e;
+ int class;
+
+ if (!gem_context_lookup_engine(fd, opts->engine,
+ opts->ctx, &e)) {
+ class = e.class;
+ } else {
+ gem_require_ring(fd, opts->engine);
+ class = gem_execbuf_flags_to_engine_class(opts->engine);
+ }
+
if (opts->flags & IGT_SPIN_POLL_RUN)
- igt_require(gem_can_store_dword(fd, opts->engine));
+ igt_require(gem_class_can_store_dword(fd, class));
}
spin = spin_create(fd, opts);
@@ -453,6 +463,16 @@ void igt_terminate_spins(void)
pthread_mutex_unlock(&list_lock);
}
+void igt_unshare_spins(void)
+{
+ struct igt_spin *it, *n;
+
+ /* Disable the automatic termination on inherited spinners */
+ igt_list_for_each_safe(it, n, &spin_list, link)
+ igt_list_init(&it->link);
+ igt_list_init(&spin_list);
+}
+
static uint32_t plug_vgem_handle(struct igt_cork *cork, int fd)
{
struct vgem_bo bo;
diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h
index 61a9f2fc..bb25751a 100644
--- a/lib/igt_dummyload.h
+++ b/lib/igt_dummyload.h
@@ -42,6 +42,7 @@ typedef struct igt_spin {
int out_fence;
struct drm_i915_gem_exec_object2 obj[2];
+#define IGT_SPIN_BATCH 1
struct drm_i915_gem_execbuffer2 execbuf;
uint32_t poll_handle;
uint32_t *poll;
@@ -92,6 +93,7 @@ static inline void igt_spin_busywait_until_started(igt_spin_t *spin)
}
void igt_terminate_spins(void);
+void igt_unshare_spins(void);
enum igt_cork_type {
CORK_SYNC_FD = 1,
diff --git a/lib/igt_edid.c b/lib/igt_edid.c
index 52e66ab2..e71136f4 100644
--- a/lib/igt_edid.c
+++ b/lib/igt_edid.c
@@ -110,7 +110,7 @@ void detailed_timing_set_mode(struct detailed_timing *dt, drmModeModeInfo *mode,
pt->width_height_mm_hi = (width_mm & 0xF00) >> 4
| (height_mm & 0xF00) >> 8;
- pt->misc = 0;
+ pt->misc = EDID_PT_SYNC_DIGITAL_SEPARATE;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
pt->misc |= EDID_PT_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -252,19 +252,128 @@ void edid_init_with_mode(struct edid *edid, drmModeModeInfo *mode)
EDID_DETAIL_MONITOR_NAME, "IGT");
}
+static uint8_t compute_checksum(const uint8_t *buf, size_t size)
+{
+ size_t i;
+ uint8_t sum = 0;
+
+ assert(size > 0);
+ for (i = 0; i < size - 1; i++) {
+ sum += buf[i];
+ }
+
+ return 256 - sum;
+}
+
/**
* edid_update_checksum: compute and update the EDID checksum
*/
void edid_update_checksum(struct edid *edid)
{
- size_t i;
- const uint8_t *buf = (const uint8_t *) edid;
- uint8_t sum = 0;
+ edid->checksum = compute_checksum((uint8_t *) edid,
+ sizeof(struct edid));
+}
- /* calculate checksum */
- for (i = 0; i < sizeof(struct edid) - 1; i++) {
- sum = sum + buf[i];
- }
+/**
+ * cea_sad_init_pcm:
+ * @channels: the number of supported channels (max. 8)
+ * @sampling_rates: bitfield of enum cea_sad_sampling_rate
+ * @sample_sizes: bitfield of enum cea_sad_pcm_sample_size
+ *
+ * Initialize a Short Audio Descriptor to advertise PCM support.
+ */
+void cea_sad_init_pcm(struct cea_sad *sad, int channels,
+ uint8_t sampling_rates, uint8_t sample_sizes)
+{
+ assert(channels <= 8);
+ sad->format_channels = CEA_SAD_FORMAT_PCM << 3 | (channels - 1);
+ sad->sampling_rates = sampling_rates;
+ sad->bitrate = sample_sizes;
+}
+
+/**
+ * cea_vsd_get_hdmi_default:
+ *
+ * Returns the default Vendor Specific Data block for HDMI.
+ */
+const struct cea_vsd *cea_vsd_get_hdmi_default(size_t *size)
+{
+ static char raw[sizeof(struct cea_vsd) + 4] = {0};
+ struct cea_vsd *vsd;
+
+ *size = sizeof(raw);
+
+ /* Magic incantation. Works better if you orient your screen in the
+ * direction of the VESA headquarters. */
+ vsd = (struct cea_vsd *) raw;
+ vsd->ieee_oui[0] = 0x03;
+ vsd->ieee_oui[1] = 0x0C;
+ vsd->ieee_oui[2] = 0x00;
+ vsd->data[0] = 0x10;
+ vsd->data[1] = 0x00;
+ vsd->data[2] = 0x38;
+ vsd->data[3] = 0x2D;
+
+ return vsd;
+}
+
+static void edid_cea_data_block_init(struct edid_cea_data_block *block,
+ enum edid_cea_data_type type, size_t size)
+{
+ assert(size <= 0xFF);
+ block->type_len = type << 5 | size;
+}
+
+size_t edid_cea_data_block_set_sad(struct edid_cea_data_block *block,
+ const struct cea_sad *sads, size_t sads_len)
+{
+ size_t sads_size;
+
+ sads_size = sizeof(struct cea_sad) * sads_len;
+ edid_cea_data_block_init(block, EDID_CEA_DATA_AUDIO, sads_size);
+
+ memcpy(block->data.sads, sads, sads_size);
+
+ return sizeof(struct edid_cea_data_block) + sads_size;
+}
+
+size_t edid_cea_data_block_set_vsd(struct edid_cea_data_block *block,
+ const struct cea_vsd *vsd, size_t vsd_size)
+{
+ edid_cea_data_block_init(block, EDID_CEA_DATA_VENDOR_SPECIFIC,
+ vsd_size);
+
+ memcpy(block->data.vsds, vsd, vsd_size);
+
+ return sizeof(struct edid_cea_data_block) + vsd_size;
+}
+
+size_t edid_cea_data_block_set_speaker_alloc(struct edid_cea_data_block *block,
+ const struct cea_speaker_alloc *speakers)
+{
+ size_t size;
+
+ size = sizeof(struct cea_speaker_alloc);
+ edid_cea_data_block_init(block, EDID_CEA_DATA_SPEAKER_ALLOC, size);
+ memcpy(block->data.speakers, speakers, size);
+
+ return sizeof(struct edid_cea_data_block) + size;
+}
- edid->checksum = 256 - sum;
+void edid_ext_set_cea(struct edid_ext *ext, size_t data_blocks_size,
+ uint8_t flags)
+{
+ struct edid_cea *cea = &ext->data.cea;
+
+ ext->tag = EDID_EXT_CEA;
+
+ cea->revision = 3;
+ cea->dtd_start = 4 + data_blocks_size;
+ cea->misc = flags; /* just flags, no DTD */
+}
+
+void edid_ext_update_cea_checksum(struct edid_ext *ext)
+{
+ ext->data.cea.checksum = compute_checksum((uint8_t *) ext,
+ sizeof(struct edid_ext));
}
diff --git a/lib/igt_edid.h b/lib/igt_edid.h
index bbcb939a..00596ef1 100644
--- a/lib/igt_edid.h
+++ b/lib/igt_edid.h
@@ -30,6 +30,8 @@
#include <stdint.h>
+#include <xf86drmMode.h>
+
struct est_timings {
uint8_t t1;
uint8_t t2;
@@ -52,11 +54,17 @@ struct std_timing {
#define DETAILED_TIMINGS_LEN 4
-#define EDID_PT_HSYNC_POSITIVE (1 << 1)
+#define EDID_PT_INTERLACED (1 << 7)
+#define EDID_PT_STEREO (1 << 5)
+
+/* Sync type */
+#define EDID_PT_SYNC_ANALOG (0b00 << 3)
+#define EDID_PT_SYNC_DIGITAL_COMPOSITE (0b10 << 3)
+#define EDID_PT_SYNC_DIGITAL_SEPARATE (0b11 << 3)
+
+/* Applies to EDID_PT_SYNC_DIGITAL_SEPARATE only */
#define EDID_PT_VSYNC_POSITIVE (1 << 2)
-#define EDID_PT_SEPARATE_SYNC (3 << 3)
-#define EDID_PT_STEREO (1 << 5)
-#define EDID_PT_INTERLACED (1 << 7)
+#define EDID_PT_HSYNC_POSITIVE (1 << 1)
struct detailed_pixel_timing {
uint8_t hactive_lo;
@@ -74,7 +82,7 @@ struct detailed_pixel_timing {
uint8_t width_height_mm_hi;
uint8_t hborder;
uint8_t vborder;
- uint8_t misc;
+ uint8_t misc; /* EDID_PT_* */
} __attribute__((packed));
struct detailed_data_string {
@@ -142,6 +150,110 @@ struct detailed_timing {
} data;
} __attribute__((packed));
+enum cea_sad_format {
+ CEA_SAD_FORMAT_PCM = 1,
+ CEA_SAD_FORMAT_AC3 = 2,
+ CEA_SAD_FORMAT_MPEG1 = 3, /* Layers 1 & 2 */
+ CEA_SAD_FORMAT_MP3 = 4,
+ CEA_SAD_FORMAT_MPEG2 = 5,
+ CEA_SAD_FORMAT_AAC = 6,
+ CEA_SAD_FORMAT_DTS = 7,
+ CEA_SAD_FORMAT_ATRAC = 8,
+ CEA_SAD_FORMAT_SACD = 9, /* One-bit audio */
+ CEA_SAD_FORMAT_DD_PLUS = 10,
+ CEA_SAD_FORMAT_DTS_HD = 11,
+ CEA_SAD_FORMAT_DOLBY = 12, /* MLP/Dolby TrueHD */
+ CEA_SAD_FORMAT_DST = 13,
+ CEA_SAD_FORMAT_WMA = 14, /* Microsoft WMA Pro */
+};
+
+enum cea_sad_sampling_rate {
+ CEA_SAD_SAMPLING_RATE_32KHZ = 1 << 0,
+ CEA_SAD_SAMPLING_RATE_44KHZ = 1 << 1,
+ CEA_SAD_SAMPLING_RATE_48KHZ = 1 << 2,
+ CEA_SAD_SAMPLING_RATE_88KHZ = 1 << 3,
+ CEA_SAD_SAMPLING_RATE_96KHZ = 1 << 4,
+ CEA_SAD_SAMPLING_RATE_176KHZ = 1 << 5,
+ CEA_SAD_SAMPLING_RATE_192KHZ = 1 << 6,
+};
+
+/* for PCM only */
+enum cea_sad_pcm_sample_size {
+ CEA_SAD_SAMPLE_SIZE_16 = 1 << 0,
+ CEA_SAD_SAMPLE_SIZE_20 = 1 << 1,
+ CEA_SAD_SAMPLE_SIZE_24 = 1 << 2,
+};
+
+/* Short Audio Descriptor */
+struct cea_sad {
+ uint8_t format_channels;
+ uint8_t sampling_rates;
+ uint8_t bitrate;
+} __attribute__((packed));
+
+/* Vendor Specific Data */
+struct cea_vsd {
+ uint8_t ieee_oui[3];
+ char data[];
+};
+
+enum cea_speaker_alloc_item {
+ CEA_SPEAKER_FRONT_LEFT_RIGHT = 1 << 0,
+ CEA_SPEAKER_LFE = 1 << 1,
+ CEA_SPEAKER_FRONT_CENTER = 1 << 2,
+ CEA_SPEAKER_REAR_LEFT_RIGHT = 1 << 3,
+ CEA_SPEAKER_REAR_CENTER = 1 << 4,
+ CEA_SPEAKER_FRONT_LEFT_RIGHT_CENTER = 1 << 5,
+ CEA_SPEAKER_REAR_LEFT_RIGHT_CENTER = 1 << 6,
+};
+
+struct cea_speaker_alloc {
+ uint8_t speakers; /* enum cea_speaker_alloc_item */
+ uint8_t reserved[2];
+} __attribute__((packed));
+
+enum edid_cea_data_type {
+ EDID_CEA_DATA_AUDIO = 1,
+ EDID_CEA_DATA_VIDEO = 2,
+ EDID_CEA_DATA_VENDOR_SPECIFIC = 3,
+ EDID_CEA_DATA_SPEAKER_ALLOC = 4,
+};
+
+struct edid_cea_data_block {
+ uint8_t type_len; /* type is from enum edid_cea_data_type */
+ union {
+ struct cea_sad sads[0];
+ struct cea_vsd vsds[0];
+ struct cea_speaker_alloc speakers[0];
+ } data;
+} __attribute__((packed));
+
+enum edid_cea_flag {
+ EDID_CEA_YCBCR422 = 1 << 4,
+ EDID_CEA_YCBCR444 = 1 << 5,
+ EDID_CEA_BASIC_AUDIO = 1 << 6,
+ EDID_CEA_UNDERSCAN = 1 << 7,
+};
+
+struct edid_cea {
+ uint8_t revision;
+ uint8_t dtd_start;
+ uint8_t misc;
+ char data[123]; /* DBC & DTD collection, padded with zeros */
+ uint8_t checksum;
+} __attribute__((packed));
+
+enum edid_ext_tag {
+ EDID_EXT_CEA = 0x02,
+};
+
+struct edid_ext {
+ uint8_t tag; /* enum edid_ext_tag */
+ union {
+ struct edid_cea cea;
+ } data;
+} __attribute__((packed));
+
struct edid {
char header[8];
/* Vendor & product info */
@@ -177,9 +289,9 @@ struct edid {
/* Detailing timings 1-4 */
struct detailed_timing detailed_timings[DETAILED_TIMINGS_LEN];
/* Number of 128 byte ext. blocks */
- uint8_t extensions;
- /* Checksum */
+ uint8_t extensions_len;
uint8_t checksum;
+ struct edid_ext extensions[];
} __attribute__((packed));
void edid_init(struct edid *edid);
@@ -193,4 +305,17 @@ void detailed_timing_set_string(struct detailed_timing *dt,
enum detailed_non_pixel_type type,
const char *str);
+void cea_sad_init_pcm(struct cea_sad *sad, int channels,
+ uint8_t sampling_rates, uint8_t sample_sizes);
+void edid_ext_update_cea_checksum(struct edid_ext *ext);
+const struct cea_vsd *cea_vsd_get_hdmi_default(size_t *size);
+size_t edid_cea_data_block_set_sad(struct edid_cea_data_block *block,
+ const struct cea_sad *sads, size_t sads_len);
+size_t edid_cea_data_block_set_vsd(struct edid_cea_data_block *block,
+ const struct cea_vsd *vsd, size_t vsd_size);
+size_t edid_cea_data_block_set_speaker_alloc(struct edid_cea_data_block *block,
+ const struct cea_speaker_alloc *speakers);
+void edid_ext_set_cea(struct edid_ext *ext, size_t data_blocks_size,
+ uint8_t flags);
+
#endif
diff --git a/lib/igt_eld.c b/lib/igt_eld.c
new file mode 100644
index 00000000..3d7fd4dd
--- /dev/null
+++ b/lib/igt_eld.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors: Simon Ser <simon.ser@intel.com>
+ */
+
+#include "config.h"
+
+#include <dirent.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "igt_core.h"
+#include "igt_eld.h"
+
+#define ELD_PREFIX "eld#"
+#define ELD_DELIM " \t"
+
+/**
+ * EDID-Like Data (ELD) is metadata parsed and exposed by ALSA for HDMI and
+ * DisplayPort connectors supporting audio. This includes the monitor name and
+ * the supported audio parameters (formats, sampling rates, sample sizes and so
+ * on).
+ *
+ * Audio parameters come from Short Audio Descriptors (SAD) blocks in the
+ * EDID. Enumerations from igt_edid are used since they are the same.
+ */
+
+static enum cea_sad_format parse_sad_coding_type(const char *value)
+{
+ if (strcmp(value, "LPCM") == 0)
+ return CEA_SAD_FORMAT_PCM;
+ else
+ return 0;
+}
+
+static enum cea_sad_sampling_rate parse_sad_rate(const char *value)
+{
+ switch (atoi(value)) {
+ case 32000:
+ return CEA_SAD_SAMPLING_RATE_32KHZ;
+ case 44100:
+ return CEA_SAD_SAMPLING_RATE_44KHZ;
+ case 48000:
+ return CEA_SAD_SAMPLING_RATE_48KHZ;
+ case 88000:
+ return CEA_SAD_SAMPLING_RATE_88KHZ;
+ case 96000:
+ return CEA_SAD_SAMPLING_RATE_96KHZ;
+ case 176000:
+ return CEA_SAD_SAMPLING_RATE_176KHZ;
+ case 192000:
+ return CEA_SAD_SAMPLING_RATE_192KHZ;
+ default:
+ return 0;
+ }
+}
+
+static enum cea_sad_pcm_sample_size parse_sad_bit(const char *value)
+{
+ switch (atoi(value)) {
+ case 16:
+ return CEA_SAD_SAMPLE_SIZE_16;
+ case 20:
+ return CEA_SAD_SAMPLE_SIZE_20;
+ case 24:
+ return CEA_SAD_SAMPLE_SIZE_24;
+ default:
+ return 0;
+ }
+}
+
+static void parse_sad_field(struct eld_sad *sad, const char *key, char *value)
+{
+ char *tok;
+
+ /* Some fields are prefixed with the raw hex value, strip it */
+ if (value[0] == '[') {
+ value = strchr(value, ' ');
+ igt_assert(value != NULL);
+ value++; /* skip the space */
+ }
+
+ /* Single-value fields */
+ if (strcmp(key, "coding_type") == 0)
+ sad->coding_type = parse_sad_coding_type(value);
+ else if (strcmp(key, "channels") == 0)
+ sad->channels = atoi(value);
+
+ /* Multiple-value fields */
+ tok = strtok(value, " ");
+ while (tok) {
+ if (strcmp(key, "rates") == 0)
+ sad->rates |= parse_sad_rate(tok);
+ else if (strcmp(key, "bits") == 0)
+ sad->bits |= parse_sad_bit(tok);
+
+ tok = strtok(NULL, " ");
+ }
+}
+
+/** eld_parse_entry: parse an ELD entry
+ *
+ * Here is an example of an ELD entry:
+ *
+ * $ cat /proc/asound/card0/eld#0.2
+ * monitor_present 1
+ * eld_valid 1
+ * monitor_name U2879G6
+ * connection_type DisplayPort
+ * eld_version [0x2] CEA-861D or below
+ * edid_version [0x3] CEA-861-B, C or D
+ * manufacture_id 0xe305
+ * product_id 0x2879
+ * port_id 0x800
+ * support_hdcp 0
+ * support_ai 0
+ * audio_sync_delay 0
+ * speakers [0x1] FL/FR
+ * sad_count 1
+ * sad0_coding_type [0x1] LPCM
+ * sad0_channels 2
+ * sad0_rates [0xe0] 32000 44100 48000
+ * sad0_bits [0xe0000] 16 20 24
+ *
+ * Each entry contains one or more SAD blocks. Their contents is exposed in
+ * sadN_* fields.
+ */
+static bool eld_parse_entry(const char *path, struct eld_entry *eld)
+{
+ FILE *f;
+ char buf[1024];
+ char *key, *value, *sad_key;
+ size_t len;
+ bool monitor_present = false;
+ int sad_index;
+
+ memset(eld, 0, sizeof(*eld));
+
+ f = fopen(path, "r");
+ if (!f) {
+ igt_debug("Failed to open ELD file: %s\n", path);
+ return false;
+ }
+
+ while ((fgets(buf, sizeof(buf), f)) != NULL) {
+ len = strlen(buf);
+ if (buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ key = strtok(buf, ELD_DELIM);
+ value = strtok(NULL, "");
+ /* Skip whitespace at the beginning */
+ value += strspn(value, ELD_DELIM);
+
+ if (strcmp(key, "monitor_present") == 0)
+ monitor_present = strcmp(value, "1") == 0;
+ else if (strcmp(key, "eld_valid") == 0)
+ eld->valid = strcmp(value, "1") == 0;
+ else if (strcmp(key, "monitor_name") == 0)
+ snprintf(eld->monitor_name, sizeof(eld->monitor_name),
+ "%s", value);
+ else if (strcmp(key, "sad_count") == 0)
+ eld->sads_len = atoi(value);
+ else if (sscanf(key, "sad%d_%ms", &sad_index, &sad_key) == 2) {
+ igt_assert(sad_index < ELD_SADS_CAP);
+ igt_assert(sad_index < eld->sads_len);
+ parse_sad_field(&eld->sads[sad_index], sad_key, value);
+ free(sad_key);
+ }
+ }
+
+ if (ferror(f) != 0) {
+ igt_debug("Failed to read ELD file %s: %d\n", path, ferror(f));
+ return false;
+ }
+
+ fclose(f);
+
+ if (!monitor_present)
+ igt_debug("Monitor not present in ELD: %s\n", path);
+ return monitor_present;
+}
+
+/** eld_get_igt: retrieve the ALSA ELD entry matching the IGT EDID */
+bool eld_get_igt(struct eld_entry *eld)
+{
+ DIR *dir;
+ struct dirent *dirent;
+ int i;
+ char card[64];
+ char path[PATH_MAX];
+
+ for (i = 0; i < 8; i++) {
+ snprintf(card, sizeof(card), "/proc/asound/card%d", i);
+ dir = opendir(card);
+ if (!dir)
+ continue;
+
+ while ((dirent = readdir(dir))) {
+ if (strncmp(dirent->d_name, ELD_PREFIX,
+ strlen(ELD_PREFIX)) != 0)
+ continue;
+
+ snprintf(path, sizeof(path), "%s/%s", card,
+ dirent->d_name);
+ if (!eld_parse_entry(path, eld)) {
+ continue;
+ }
+
+ if (!eld->valid) {
+ igt_debug("Skipping invalid ELD: %s\n", path);
+ continue;
+ }
+
+ if (strcmp(eld->monitor_name, "IGT") != 0) {
+ igt_debug("Skipping non-IGT ELD: %s "
+ "(monitor name: %s)\n",
+ path, eld->monitor_name);
+ continue;
+ }
+
+ closedir(dir);
+ return true;
+ }
+ closedir(dir);
+ }
+
+ return false;
+}
+
+/** eld_has_igt: check whether ALSA has detected the audio-capable IGT EDID by
+ * parsing ELD entries */
+bool eld_has_igt(void)
+{
+ struct eld_entry eld;
+ return eld_get_igt(&eld);
+}
diff --git a/lib/igt_eld.h b/lib/igt_eld.h
new file mode 100644
index 00000000..7c4489f0
--- /dev/null
+++ b/lib/igt_eld.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors: Simon Ser <simon.ser@intel.com>
+ */
+
+#ifndef IGT_ELD_H
+#define IGT_ELD_H
+
+#include "config.h"
+
+#include <stdbool.h>
+
+#include "igt_edid.h"
+
+#define ELD_SADS_CAP 4
+
+/** eld_sad: Short Audio Descriptor */
+struct eld_sad {
+ enum cea_sad_format coding_type;
+ int channels;
+ unsigned int rates; /* enum cea_sad_sampling_rate */
+ unsigned int bits; /* enum cea_sad_pcm_sample_size */
+};
+
+struct eld_entry {
+ bool valid;
+ char monitor_name[16];
+ size_t sads_len;
+ struct eld_sad sads[ELD_SADS_CAP];
+};
+
+bool eld_get_igt(struct eld_entry *eld);
+bool eld_has_igt(void);
+
+#endif
diff --git a/lib/igt_fb.c b/lib/igt_fb.c
index d4929019..9d4f905e 100644
--- a/lib/igt_fb.c
+++ b/lib/igt_fb.c
@@ -98,6 +98,12 @@ static const struct format_desc_struct {
.num_planes = 1, .plane_bpp = { 16, },
.hsub = 1, .vsub = 1,
},
+ { .name = "C8", .depth = -1, .drm_id = DRM_FORMAT_C8,
+ .cairo_id = CAIRO_FORMAT_INVALID,
+ .pixman_id = PIXMAN_r3g3b2,
+ .num_planes = 1, .plane_bpp = { 8, },
+ .hsub = 1, .vsub = 1,
+ },
{ .name = "XRGB1555", .depth = -1, .drm_id = DRM_FORMAT_XRGB1555,
.cairo_id = CAIRO_FORMAT_INVALID,
.pixman_id = PIXMAN_x1r5g5b5,
@@ -1716,17 +1722,25 @@ static void free_linear_mapping(struct fb_blit_upload *blit)
struct igt_fb *fb = blit->fb;
struct fb_blit_linear *linear = &blit->linear;
- gem_munmap(linear->map, linear->fb.size);
- gem_set_domain(fd, linear->fb.gem_handle,
- I915_GEM_DOMAIN_GTT, 0);
+ if (igt_vc4_is_tiled(fb->modifier)) {
+ void *map = igt_vc4_mmap_bo(fd, fb->gem_handle, fb->size, PROT_WRITE);
- if (blit->batch)
- rendercopy(blit, fb, &linear->fb);
- else
- blitcopy(fb, &linear->fb);
+ vc4_fb_convert_plane_to_tiled(fb, map, &linear->fb, &linear->map);
+
+ munmap(map, fb->size);
+ } else {
+ gem_munmap(linear->map, linear->fb.size);
+ gem_set_domain(fd, linear->fb.gem_handle,
+ I915_GEM_DOMAIN_GTT, 0);
+
+ if (blit->batch)
+ rendercopy(blit, fb, &linear->fb);
+ else
+ blitcopy(fb, &linear->fb);
- gem_sync(fd, linear->fb.gem_handle);
- gem_close(fd, linear->fb.gem_handle);
+ gem_sync(fd, linear->fb.gem_handle);
+ gem_close(fd, linear->fb.gem_handle);
+ }
if (blit->batch) {
intel_batchbuffer_free(blit->batch);
@@ -1751,7 +1765,7 @@ static void setup_linear_mapping(struct fb_blit_upload *blit)
struct igt_fb *fb = blit->fb;
struct fb_blit_linear *linear = &blit->linear;
- if (use_rendercopy(fb)) {
+ if (!igt_vc4_is_tiled(fb->modifier) && use_rendercopy(fb)) {
blit->bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
blit->batch = intel_batchbuffer_alloc(blit->bufmgr,
intel_get_drm_devid(fd));
@@ -1771,23 +1785,35 @@ static void setup_linear_mapping(struct fb_blit_upload *blit)
igt_assert(linear->fb.gem_handle > 0);
- /* Copy fb content to linear BO */
- gem_set_domain(fd, linear->fb.gem_handle,
- I915_GEM_DOMAIN_GTT, 0);
+ if (igt_vc4_is_tiled(fb->modifier)) {
+ void *map = igt_vc4_mmap_bo(fd, fb->gem_handle, fb->size, PROT_READ);
- if (blit->batch)
- rendercopy(blit, &linear->fb, fb);
- else
- blitcopy(&linear->fb, fb);
+ linear->map = igt_vc4_mmap_bo(fd, linear->fb.gem_handle,
+ linear->fb.size,
+ PROT_READ | PROT_WRITE);
+
+ vc4_fb_convert_plane_from_tiled(&linear->fb, &linear->map, fb, map);
+
+ munmap(map, fb->size);
+ } else {
+ /* Copy fb content to linear BO */
+ gem_set_domain(fd, linear->fb.gem_handle,
+ I915_GEM_DOMAIN_GTT, 0);
+
+ if (blit->batch)
+ rendercopy(blit, &linear->fb, fb);
+ else
+ blitcopy(&linear->fb, fb);
- gem_sync(fd, linear->fb.gem_handle);
+ gem_sync(fd, linear->fb.gem_handle);
- gem_set_domain(fd, linear->fb.gem_handle,
- I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+ gem_set_domain(fd, linear->fb.gem_handle,
+ I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
- /* Setup cairo context */
- linear->map = gem_mmap__cpu(fd, linear->fb.gem_handle,
- 0, linear->fb.size, PROT_READ | PROT_WRITE);
+ /* Setup cairo context */
+ linear->map = gem_mmap__cpu(fd, linear->fb.gem_handle,
+ 0, linear->fb.size, PROT_READ | PROT_WRITE);
+ }
}
static void create_cairo_surface__gpu(int fd, struct igt_fb *fb)
@@ -2902,7 +2928,7 @@ static void create_cairo_surface__convert(int fd, struct igt_fb *fb)
&blit->shadow_fb);
igt_assert(blit->shadow_ptr);
- if (use_rendercopy(fb) || use_blitter(fb)) {
+ if (use_rendercopy(fb) || use_blitter(fb) || igt_vc4_is_tiled(fb->modifier)) {
setup_linear_mapping(&blit->base);
} else {
blit->base.linear.fb = *fb;
@@ -2983,7 +3009,7 @@ cairo_surface_t *igt_get_cairo_surface(int fd, struct igt_fb *fb)
((f->cairo_id == CAIRO_FORMAT_INVALID) &&
(f->pixman_id != PIXMAN_invalid)))
create_cairo_surface__convert(fd, fb);
- else if (use_blitter(fb) || use_rendercopy(fb))
+ else if (use_blitter(fb) || use_rendercopy(fb) || igt_vc4_is_tiled(fb->modifier))
create_cairo_surface__gpu(fd, fb);
else
create_cairo_surface__gtt(fd, fb);
@@ -3102,58 +3128,23 @@ unsigned int igt_fb_convert_with_stride(struct igt_fb *dst, struct igt_fb *src,
uint64_t dst_modifier,
unsigned int dst_stride)
{
- struct fb_convert cvt = { };
- struct igt_fb linear;
- void *dst_ptr, *src_ptr;
- uint64_t base_modifier;
+ /* Use the cairo api to convert */
+ cairo_surface_t *surf = igt_get_cairo_surface(src->fd, src);
+ cairo_t *cr;
int fb_id;
- if (is_vc4_device(src->fd))
- base_modifier = fourcc_mod_broadcom_mod(dst_modifier);
- else
- base_modifier = dst_modifier;
-
- fb_id = igt_create_fb_with_bo_size(src->fd, src->width, src->height,
- dst_fourcc,
- LOCAL_DRM_FORMAT_MOD_NONE, &linear,
- 0, dst_stride);
+ fb_id = igt_create_fb_with_bo_size(src->fd, src->width,
+ src->height, dst_fourcc,
+ dst_modifier, dst, 0,
+ dst_stride);
igt_assert(fb_id > 0);
- src_ptr = igt_fb_map_buffer(src->fd, src);
- igt_assert(src_ptr);
-
- dst_ptr = igt_fb_map_buffer(linear.fd, &linear);
- igt_assert(dst_ptr);
-
- cvt.dst.ptr = dst_ptr;
- cvt.dst.fb = &linear;
- cvt.src.ptr = src_ptr;
- cvt.src.fb = src;
- fb_convert(&cvt);
-
- igt_fb_unmap_buffer(dst, dst_ptr);
- igt_fb_unmap_buffer(src, src_ptr);
-
- switch (base_modifier) {
- case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
- fb_id = igt_vc4_fb_t_tiled_convert(dst, &linear);
- break;
- case DRM_FORMAT_MOD_BROADCOM_SAND32:
- case DRM_FORMAT_MOD_BROADCOM_SAND64:
- case DRM_FORMAT_MOD_BROADCOM_SAND128:
- case DRM_FORMAT_MOD_BROADCOM_SAND256:
- fb_id = vc4_fb_sand_tiled_convert(dst, &linear, dst_modifier);
- break;
- default:
- igt_assert(dst_modifier == LOCAL_DRM_FORMAT_MOD_NONE);
- }
+ cr = igt_get_cairo_ctx(dst->fd, dst);
+ cairo_set_source_surface(cr, surf, 0, 0);
+ cairo_paint(cr);
+ igt_put_cairo_ctx(dst->fd, dst, cr);
- igt_assert(fb_id > 0);
-
- if (dst_modifier == LOCAL_DRM_FORMAT_MOD_NONE)
- *dst = linear;
- else
- igt_remove_fb(linear.fd, &linear);
+ cairo_surface_destroy(surf);
return fb_id;
}
@@ -3248,6 +3239,15 @@ bool igt_fb_supported_format(uint32_t drm_format)
{
const struct format_desc_struct *f;
+ /*
+ * C8 needs a LUT which (at least for the time being)
+ * is the responsibility of each test. Not all tests
+ * have the required code so let's keep C8 hidden from
+ * most eyes.
+ */
+ if (drm_format == DRM_FORMAT_C8)
+ return false;
+
for_each_format(f)
if (f->drm_id == drm_format)
return (f->cairo_id != CAIRO_FORMAT_INVALID) ||
diff --git a/lib/igt_gt.c b/lib/igt_gt.c
index a2eaadf5..78e3cd08 100644
--- a/lib/igt_gt.c
+++ b/lib/igt_gt.c
@@ -41,6 +41,7 @@
#include "intel_reg.h"
#include "intel_chipset.h"
#include "igt_dummyload.h"
+#include "i915/gem_engine_topology.h"
/**
* SECTION:igt_gt
@@ -556,7 +557,7 @@ const struct intel_execution_engine intel_execution_engines[] = {
{ NULL, 0, 0 }
};
-bool gem_can_store_dword(int fd, unsigned int engine)
+bool gem_class_can_store_dword(int fd, int class)
{
uint16_t devid = intel_get_drm_devid(fd);
const struct intel_device_info *info = intel_get_device_info(devid);
@@ -568,8 +569,8 @@ bool gem_can_store_dword(int fd, unsigned int engine)
if (gen == 3 && (info->is_grantsdale || info->is_alviso))
return false; /* only supports physical addresses */
- if (gen == 6 && ((engine & 0x3f) == I915_EXEC_BSD))
- return false; /* kills the machine! */
+ if (gen == 6 && class == I915_ENGINE_CLASS_VIDEO)
+ return false;
if (info->is_broadwater)
return false; /* Not sure yet... */
@@ -577,55 +578,37 @@ bool gem_can_store_dword(int fd, unsigned int engine)
return true;
}
+bool gem_can_store_dword(int fd, unsigned int engine)
+{
+ return gem_class_can_store_dword(fd,
+ gem_execbuf_flags_to_engine_class(engine));
+}
+
const struct intel_execution_engine2 intel_execution_engines2[] = {
- { "rcs0", I915_ENGINE_CLASS_RENDER, 0 },
- { "bcs0", I915_ENGINE_CLASS_COPY, 0 },
- { "vcs0", I915_ENGINE_CLASS_VIDEO, 0 },
- { "vcs1", I915_ENGINE_CLASS_VIDEO, 1 },
- { "vecs0", I915_ENGINE_CLASS_VIDEO_ENHANCE, 0 },
+ { "rcs0", I915_ENGINE_CLASS_RENDER, 0, I915_EXEC_RENDER },
+ { "bcs0", I915_ENGINE_CLASS_COPY, 0, I915_EXEC_BLT },
+ { "vcs0", I915_ENGINE_CLASS_VIDEO, 0, I915_EXEC_BSD | I915_EXEC_BSD_RING1 },
+ { "vcs1", I915_ENGINE_CLASS_VIDEO, 1, I915_EXEC_BSD | I915_EXEC_BSD_RING2 },
+ { "vcs2", I915_ENGINE_CLASS_VIDEO, 2, -1 },
+ { "vecs0", I915_ENGINE_CLASS_VIDEO_ENHANCE, 0, I915_EXEC_VEBOX },
{ }
};
-unsigned int
-gem_class_instance_to_eb_flags(int gem_fd,
- enum drm_i915_gem_engine_class class,
- unsigned int instance)
+int gem_execbuf_flags_to_engine_class(unsigned int flags)
{
- if (class != I915_ENGINE_CLASS_VIDEO)
- igt_assert(instance == 0);
- else
- igt_assert(instance >= 0 && instance <= 1);
-
- switch (class) {
- case I915_ENGINE_CLASS_RENDER:
- return I915_EXEC_RENDER;
- case I915_ENGINE_CLASS_COPY:
- return I915_EXEC_BLT;
- case I915_ENGINE_CLASS_VIDEO:
- if (instance == 0) {
- if (gem_has_bsd2(gem_fd))
- return I915_EXEC_BSD | I915_EXEC_BSD_RING1;
- else
- return I915_EXEC_BSD;
-
- } else {
- return I915_EXEC_BSD | I915_EXEC_BSD_RING2;
- }
- case I915_ENGINE_CLASS_VIDEO_ENHANCE:
- return I915_EXEC_VEBOX;
- case I915_ENGINE_CLASS_INVALID:
+ switch (flags & 0x3f) {
+ case I915_EXEC_DEFAULT:
+ case I915_EXEC_RENDER:
+ return I915_ENGINE_CLASS_RENDER;
+ case I915_EXEC_BLT:
+ return I915_ENGINE_CLASS_COPY;
+ case I915_EXEC_BSD:
+ return I915_ENGINE_CLASS_VIDEO;
+ case I915_EXEC_VEBOX:
+ return I915_ENGINE_CLASS_VIDEO_ENHANCE;
default:
igt_assert(0);
- };
-}
-
-bool gem_has_engine(int gem_fd,
- enum drm_i915_gem_engine_class class,
- unsigned int instance)
-{
- return gem_has_ring(gem_fd,
- gem_class_instance_to_eb_flags(gem_fd, class,
- instance));
+ }
}
bool gem_ring_is_physical_engine(int fd, unsigned ring)
diff --git a/lib/igt_gt.h b/lib/igt_gt.h
index 475c0b3c..73b5002a 100644
--- a/lib/igt_gt.h
+++ b/lib/igt_gt.h
@@ -90,35 +90,16 @@ bool gem_ring_is_physical_engine(int fd, unsigned int ring);
bool gem_ring_has_physical_engine(int fd, unsigned int ring);
bool gem_can_store_dword(int fd, unsigned int engine);
+bool gem_class_can_store_dword(int fd, int class);
extern const struct intel_execution_engine2 {
const char *name;
int class;
int instance;
+ uint64_t flags;
+ bool is_virtual;
} intel_execution_engines2[];
-unsigned int
-gem_class_instance_to_eb_flags(int gem_fd,
- enum drm_i915_gem_engine_class class,
- unsigned int instance);
-
-bool gem_has_engine(int gem_fd,
- enum drm_i915_gem_engine_class class,
- unsigned int instance);
-
-static inline
-void gem_require_engine(int gem_fd,
- enum drm_i915_gem_engine_class class,
- unsigned int instance)
-{
- igt_require(gem_has_engine(gem_fd, class, instance));
-}
-
-#define __for_each_engine_class_instance(e__) \
- for ((e__) = intel_execution_engines2; (e__)->name; (e__)++)
-
-#define for_each_engine_class_instance(fd__, e__) \
- for ((e__) = intel_execution_engines2; (e__)->name; (e__)++) \
- for_if (gem_has_engine((fd__), (e__)->class, (e__)->instance))
+int gem_execbuf_flags_to_engine_class(unsigned int flags);
#endif /* IGT_GT_H */
diff --git a/lib/igt_kms.c b/lib/igt_kms.c
index df9aafd2..da188a39 100644
--- a/lib/igt_kms.c
+++ b/lib/igt_kms.c
@@ -26,6 +26,8 @@
*/
#include "config.h"
+
+#include <assert.h>
#include <inttypes.h>
#include <unistd.h>
#include <stdio.h>
@@ -180,6 +182,107 @@ const unsigned char *igt_kms_get_alt_edid(void)
return (unsigned char *) &edid;
}
+static void
+generate_audio_edid(unsigned char raw_edid[static AUDIO_EDID_LENGTH],
+ bool with_vsd, struct cea_sad *sad,
+ struct cea_speaker_alloc *speaker_alloc)
+{
+ struct edid *edid;
+ struct edid_ext *edid_ext;
+ struct edid_cea *edid_cea;
+ char *cea_data;
+ struct edid_cea_data_block *block;
+ const struct cea_vsd *vsd;
+ size_t cea_data_size, vsd_size;
+
+ /* Create a new EDID from the base IGT EDID, and add an
+ * extension that advertises audio support. */
+ edid = (struct edid *) raw_edid;
+ memcpy(edid, igt_kms_get_base_edid(), sizeof(struct edid));
+ edid->extensions_len = 1;
+ edid_ext = &edid->extensions[0];
+ edid_cea = &edid_ext->data.cea;
+ cea_data = edid_cea->data;
+ cea_data_size = 0;
+
+ /* Short Audio Descriptor block */
+ block = (struct edid_cea_data_block *) &cea_data[cea_data_size];
+ cea_data_size += edid_cea_data_block_set_sad(block, sad, 1);
+
+ /* A Vendor Specific Data block is needed for HDMI audio */
+ if (with_vsd) {
+ block = (struct edid_cea_data_block *) &cea_data[cea_data_size];
+ vsd = cea_vsd_get_hdmi_default(&vsd_size);
+ cea_data_size += edid_cea_data_block_set_vsd(block, vsd,
+ vsd_size);
+ }
+
+ /* Speaker Allocation Data block */
+ block = (struct edid_cea_data_block *) &cea_data[cea_data_size];
+ cea_data_size += edid_cea_data_block_set_speaker_alloc(block,
+ speaker_alloc);
+
+ assert(cea_data_size <= sizeof(edid_cea->data));
+
+ edid_ext_set_cea(edid_ext, cea_data_size,
+ EDID_CEA_BASIC_AUDIO);
+
+ edid_update_checksum(edid);
+ edid_ext_update_cea_checksum(edid_ext);
+}
+
+const unsigned char *igt_kms_get_hdmi_audio_edid(void)
+{
+ int channels;
+ uint8_t sampling_rates, sample_sizes;
+ static unsigned char raw_edid[AUDIO_EDID_LENGTH] = {0};
+ struct cea_sad sad = {0};
+ struct cea_speaker_alloc speaker_alloc = {0};
+
+ /* Initialize the Short Audio Descriptor for PCM */
+ channels = 2;
+ sampling_rates = CEA_SAD_SAMPLING_RATE_32KHZ |
+ CEA_SAD_SAMPLING_RATE_44KHZ |
+ CEA_SAD_SAMPLING_RATE_48KHZ;
+ sample_sizes = CEA_SAD_SAMPLE_SIZE_16 |
+ CEA_SAD_SAMPLE_SIZE_20 |
+ CEA_SAD_SAMPLE_SIZE_24;
+ cea_sad_init_pcm(&sad, channels, sampling_rates, sample_sizes);
+
+ /* Initialize the Speaker Allocation Data */
+ speaker_alloc.speakers = CEA_SPEAKER_FRONT_LEFT_RIGHT_CENTER;
+
+ generate_audio_edid(raw_edid, true, &sad, &speaker_alloc);
+
+ return raw_edid;
+}
+
+const unsigned char *igt_kms_get_dp_audio_edid(void)
+{
+ int channels;
+ uint8_t sampling_rates, sample_sizes;
+ static unsigned char raw_edid[AUDIO_EDID_LENGTH] = {0};
+ struct cea_sad sad = {0};
+ struct cea_speaker_alloc speaker_alloc = {0};
+
+ /* Initialize the Short Audio Descriptor for PCM */
+ channels = 2;
+ sampling_rates = CEA_SAD_SAMPLING_RATE_32KHZ |
+ CEA_SAD_SAMPLING_RATE_44KHZ |
+ CEA_SAD_SAMPLING_RATE_48KHZ;
+ sample_sizes = CEA_SAD_SAMPLE_SIZE_16 |
+ CEA_SAD_SAMPLE_SIZE_20 |
+ CEA_SAD_SAMPLE_SIZE_24;
+ cea_sad_init_pcm(&sad, channels, sampling_rates, sample_sizes);
+
+ /* Initialize the Speaker Allocation Data */
+ speaker_alloc.speakers = CEA_SPEAKER_FRONT_LEFT_RIGHT_CENTER;
+
+ generate_audio_edid(raw_edid, false, &sad, &speaker_alloc);
+
+ return raw_edid;
+}
+
const char * const igt_plane_prop_names[IGT_NUM_PLANE_PROPS] = {
[IGT_PLANE_SRC_X] = "SRC_X",
[IGT_PLANE_SRC_Y] = "SRC_Y",
@@ -1357,64 +1460,6 @@ void kmstest_edid_add_4k(const unsigned char *edid, size_t length,
}
/**
- * kmstest_edid_add_audio:
- * @edid: an existing valid edid block
- * @length: length of @edid
- * @new_edid_ptr: pointer to where the new edid will be placed
- * @new_length: pointer to the size of the new edid
- *
- * Makes a copy of an existing edid block and adds an extension indicating
- * basic audio support and speaker data block.
- *
- */
-void kmstest_edid_add_audio(const unsigned char *edid, size_t length,
- unsigned char *new_edid_ptr[], size_t *new_length)
-{
- char vsdb_block_len = 10, audio_block_len = 4, spkr_block_len = 4;
- struct edid_block new_edid = init_cea_block(edid, length, new_edid_ptr,
- new_length,
- vsdb_block_len +
- audio_block_len +
- spkr_block_len,
- DTD_SUPPORTS_AUDIO);
- int pos = new_edid.pos;
-
- /* audio block, short audio block descriptors */
- new_edid.data[pos++] = (1 << 5) | (audio_block_len - 1);
- new_edid.data[pos++] = 0x09; /* Audio Format, PCM */
- new_edid.data[pos++] = 0x07; /* Frequency, 32, 44.1, 48kHz */
- new_edid.data[pos++] = 0x07; /* Bit Rate 16, 20, 24 bit */
-
-
- /* vsdb block ( id | length ) -- need vsdb as well
- * otherwise the kernel will fallback to lower clock modes */
- new_edid.data[pos++] = 3 << 5 | (vsdb_block_len - 1);
- /* registration id */
- new_edid.data[pos++] = 0x3;
- new_edid.data[pos++] = 0xc;
- new_edid.data[pos++] = 0x0;
- /* source physical address */
- new_edid.data[pos++] = 0x10;
- new_edid.data[pos++] = 0x00;
- /* Supports_AI ... etc */
- new_edid.data[pos++] = 0x00;
- /* Max TMDS Clock */
- new_edid.data[pos++] = 0x00;
- /* Latency present, HDMI Video Present */
- new_edid.data[pos++] = 0x20;
- /* HDMI Video */
- new_edid.data[pos++] = 0x00; /* 3D present */
-
- /* speaker data block */
- new_edid.data[pos++] = (4 << 5) | (spkr_block_len - 1);
- new_edid.data[pos++] = (1 << 5);
- new_edid.data[pos++] = 0x00;
- new_edid.data[pos++] = 0x00;
-
- update_edid_csum(new_edid.data, length);
-}
-
-/**
* kmstest_unset_all_crtcs:
* @drm_fd: the DRM fd
* @resources: libdrm resources pointer
@@ -1656,6 +1701,27 @@ void igt_assert_plane_visible(int fd, enum pipe pipe, int plane_index, bool visi
igt_assert_eq(visible, visibility);
}
+/**
+ * kms_has_vblank:
+ * @fd: DRM fd
+ *
+ * Get the VBlank errno after an attempt to call drmWaitVBlank(). This
+ * function is useful for checking if a driver has support or not for VBlank.
+ *
+ * Returns: true if target driver has VBlank support, otherwise return false.
+ */
+bool kms_has_vblank(int fd)
+{
+ drmVBlank dummy_vbl;
+
+ memset(&dummy_vbl, 0, sizeof(drmVBlank));
+ dummy_vbl.request.type = DRM_VBLANK_RELATIVE;
+
+ errno = 0;
+ drmWaitVBlank(fd, &dummy_vbl);
+ return (errno != EOPNOTSUPP);
+}
+
/*
* A small modeset API
*/
@@ -2674,7 +2740,9 @@ static int igt_cursor_commit_legacy(igt_plane_t *cursor,
uint32_t crtc_id = pipe->crtc_id;
int ret;
- if (igt_plane_is_prop_changed(cursor, IGT_PLANE_FB_ID)) {
+ if (igt_plane_is_prop_changed(cursor, IGT_PLANE_FB_ID) ||
+ igt_plane_is_prop_changed(cursor, IGT_PLANE_CRTC_W) ||
+ igt_plane_is_prop_changed(cursor, IGT_PLANE_CRTC_H)) {
if (cursor->gem_handle)
LOG(display,
"SetCursor pipe %s, fb %u %dx%d\n",
@@ -3274,7 +3342,6 @@ static int igt_atomic_commit(igt_display_t *display, uint32_t flags, void *user_
if (display->is_atomic != 1)
return -1;
req = drmModeAtomicAlloc();
- drmModeAtomicSetCursor(req, 0);
for_each_pipe(display, pipe) {
igt_pipe_t *pipe_obj = &display->pipes[pipe];
diff --git a/lib/igt_kms.h b/lib/igt_kms.h
index 38bdc08f..a448a003 100644
--- a/lib/igt_kms.h
+++ b/lib/igt_kms.h
@@ -196,7 +196,6 @@ bool kmstest_force_connector(int fd, drmModeConnector *connector,
enum kmstest_force_connector_state state);
void kmstest_edid_add_3d(const unsigned char *edid, size_t length, unsigned char *new_edid_ptr[], size_t *new_length);
void kmstest_edid_add_4k(const unsigned char *edid, size_t length, unsigned char *new_edid_ptr[], size_t *new_length);
-void kmstest_edid_add_audio(const unsigned char *edid, size_t length, unsigned char *new_edid_ptr[], size_t *new_length);
void kmstest_force_edid(int drm_fd, drmModeConnector *connector,
const unsigned char *edid, size_t length);
@@ -230,6 +229,8 @@ void kmstest_wait_for_pageflip(int fd);
unsigned int kmstest_get_vblank(int fd, int pipe, unsigned int flags);
void igt_assert_plane_visible(int fd, enum pipe pipe, int plane_index, bool visibility);
+bool kms_has_vblank(int fd);
+
/*
* A small modeset API
*/
@@ -754,9 +755,15 @@ void igt_reset_connectors(void);
uint32_t kmstest_get_vbl_flag(uint32_t pipe_id);
+struct cea_sad;
+struct cea_speaker_alloc;
+
#define EDID_LENGTH 128
-const unsigned char* igt_kms_get_base_edid(void);
-const unsigned char* igt_kms_get_alt_edid(void);
+#define AUDIO_EDID_LENGTH (2 * EDID_LENGTH)
+const unsigned char *igt_kms_get_base_edid(void);
+const unsigned char *igt_kms_get_alt_edid(void);
+const unsigned char *igt_kms_get_hdmi_audio_edid(void);
+const unsigned char *igt_kms_get_dp_audio_edid(void);
struct udev_monitor *igt_watch_hotplug(void);
bool igt_hotplug_detected(struct udev_monitor *mon,
diff --git a/lib/igt_panfrost.c b/lib/igt_panfrost.c
new file mode 100644
index 00000000..8b0c2b77
--- /dev/null
+++ b/lib/igt_panfrost.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright © 2016 Broadcom
+ * Copyright © 2019 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+#include <signal.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+
+#include "drmtest.h"
+#include "igt_aux.h"
+#include "igt_core.h"
+#include "igt_panfrost.h"
+#include "ioctl_wrappers.h"
+#include "intel_reg.h"
+#include "intel_chipset.h"
+#include "panfrost_drm.h"
+#include "panfrost-job.h"
+
+/**
+ * SECTION:igt_panfrost
+ * @short_description: PANFROST support library
+ * @title: PANFROST
+ * @include: igt.h
+ *
+ * This library provides various auxiliary helper functions for writing PANFROST
+ * tests.
+ */
+
+struct panfrost_bo *
+igt_panfrost_gem_new(int fd, size_t size)
+{
+ struct panfrost_bo *bo = calloc(1, sizeof(*bo));
+
+ struct drm_panfrost_create_bo create_bo = {
+ .size = size,
+ };
+
+ do_ioctl(fd, DRM_IOCTL_PANFROST_CREATE_BO, &create_bo);
+
+ bo->handle = create_bo.handle;
+ bo->offset = create_bo.offset;
+ bo->size = size;
+ return bo;
+}
+
+void
+igt_panfrost_free_bo(int fd, struct panfrost_bo *bo)
+{
+ if (bo->map)
+ munmap(bo->map, bo->size);
+ gem_close(fd, bo->handle);
+ free(bo);
+}
+
+uint32_t
+igt_panfrost_get_bo_offset(int fd, uint32_t handle)
+{
+ struct drm_panfrost_get_bo_offset get = {
+ .handle = handle,
+ };
+
+ do_ioctl(fd, DRM_IOCTL_PANFROST_GET_BO_OFFSET, &get);
+
+ return get.offset;
+}
+
+uint32_t
+igt_panfrost_get_param(int fd, int param)
+{
+ struct drm_panfrost_get_param get = {
+ .param = param,
+ };
+
+ do_ioctl(fd, DRM_IOCTL_PANFROST_GET_PARAM, &get);
+
+ return get.value;
+}
+
+void *
+igt_panfrost_mmap_bo(int fd, uint32_t handle, uint32_t size, unsigned prot)
+{
+ struct drm_panfrost_mmap_bo mmap_bo = {
+ .handle = handle,
+ };
+ void *ptr;
+
+ mmap_bo.handle = handle;
+ do_ioctl(fd, DRM_IOCTL_PANFROST_MMAP_BO, &mmap_bo);
+
+ ptr = mmap(0, size, prot, MAP_SHARED, fd, mmap_bo.offset);
+ if (ptr == MAP_FAILED)
+ return NULL;
+ else
+ return ptr;
+}
+
+void igt_panfrost_bo_mmap(int fd, struct panfrost_bo *bo)
+{
+ bo->map = igt_panfrost_mmap_bo(fd, bo->handle, bo->size,
+ PROT_READ | PROT_WRITE);
+ igt_assert(bo->map);
+}
+
+struct panfrost_submit *igt_panfrost_trivial_job(int fd, bool do_crash, int width, int height, uint32_t color)
+{
+ struct panfrost_submit *submit;
+ struct mali_job_descriptor_header header = {
+ .job_type = JOB_TYPE_FRAGMENT,
+ .job_index = 1,
+ .job_descriptor_size = 1,
+ };
+ struct mali_payload_fragment payload = {
+ .min_tile_coord = MALI_COORDINATE_TO_TILE_MIN(0, 0),
+ .max_tile_coord = MALI_COORDINATE_TO_TILE_MAX(ALIGN(width, 16), height),
+ };
+ struct bifrost_framebuffer mfbd_framebuffer = {
+ .unk0 = 0x0,
+ .unknown1 = 0x0,
+ .tiler_meta = 0xff00000000,
+ .width1 = MALI_POSITIVE(ALIGN(width, 16)),
+ .height1 = MALI_POSITIVE(height),
+ .width2 = MALI_POSITIVE(ALIGN(width, 16)),
+ .height2 = MALI_POSITIVE(height),
+ .unk1 = 0x1080,
+ .unk2 = 0x0,
+ .rt_count_1 = MALI_POSITIVE(1),
+ .rt_count_2 = 1,
+ .unk3 = 0x100,
+ .clear_stencil = 0x0,
+ .clear_depth = 0.000000,
+ .unknown2 = 0x1f,
+ };
+ struct mali_single_framebuffer sfbd_framebuffer = {
+ .unknown2 = 0x1f,
+ .width = MALI_POSITIVE(width),
+ .height = MALI_POSITIVE(height),
+ .stride = width * 4,
+ .resolution_check = ((width + height) / 3) << 4,
+ .tiler_flags = 0xfff,
+ .clear_color_1 = color,
+ .clear_color_2 = color,
+ .clear_color_3 = color,
+ .clear_color_4 = color,
+ .clear_flags = 0x101100 | MALI_CLEAR_SLOW,
+ .format = 0xb84e0281,
+ };
+ struct mali_rt_format fmt = {
+ .unk1 = 0x4000000,
+ .unk2 = 0x1,
+ .nr_channels = MALI_POSITIVE(4),
+ .flags = do_crash ? 0x444 | (1 << 8) : 0x444,
+ .swizzle = MALI_CHANNEL_BLUE | (MALI_CHANNEL_GREEN << 3) | (MALI_CHANNEL_RED << 6) | (MALI_CHANNEL_ONE << 9),
+ .unk4 = 0x8,
+ };
+ struct bifrost_render_target rts = {
+ .format = fmt,
+ .chunknown = {
+ .unk = 0x0,
+ .pointer = 0x0,
+ },
+ .framebuffer_stride = ALIGN(width, 16) * 4 / 16,
+ .clear_color_1 = color,
+ .clear_color_2 = color,
+ .clear_color_3 = color,
+ .clear_color_4 = color,
+ };
+ int gpu_prod_id = igt_panfrost_get_param(fd, DRM_PANFROST_PARAM_GPU_PROD_ID);
+ uint32_t *known_unknown;
+ uint32_t *bos;
+
+ submit = malloc(sizeof(*submit));
+
+ submit->fbo = igt_panfrost_gem_new(fd, ALIGN(width, 16) * height * 4);
+ rts.framebuffer = submit->fbo->offset;
+ sfbd_framebuffer.framebuffer = submit->fbo->offset;
+
+ submit->tiler_heap_bo = igt_panfrost_gem_new(fd, 32768 * 128);
+ mfbd_framebuffer.tiler_heap_start = submit->tiler_heap_bo->offset;
+ mfbd_framebuffer.tiler_heap_end = submit->tiler_heap_bo->offset + 32768 * 128;
+ sfbd_framebuffer.tiler_heap_free = mfbd_framebuffer.tiler_heap_start;
+ sfbd_framebuffer.tiler_heap_end = mfbd_framebuffer.tiler_heap_end;
+
+ submit->tiler_scratch_bo = igt_panfrost_gem_new(fd, 128 * 128 * 128);
+ mfbd_framebuffer.tiler_scratch_start = submit->tiler_scratch_bo->offset;
+ mfbd_framebuffer.tiler_scratch_middle = submit->tiler_scratch_bo->offset + 0xf0000;
+ sfbd_framebuffer.unknown_address_0 = mfbd_framebuffer.tiler_scratch_start;
+
+ submit->scratchpad_bo = igt_panfrost_gem_new(fd, 64 * 4096);
+ igt_panfrost_bo_mmap(fd, submit->scratchpad_bo);
+ mfbd_framebuffer.scratchpad = submit->scratchpad_bo->offset;
+ sfbd_framebuffer.unknown_address_1 = submit->scratchpad_bo->offset;
+ sfbd_framebuffer.unknown_address_2 = submit->scratchpad_bo->offset + 512;
+
+ known_unknown = ((void*)submit->scratchpad_bo->map) + 512;
+ *known_unknown = 0xa0000000;
+
+ if (gpu_prod_id >= 0x0750) {
+ submit->fb_bo = igt_panfrost_gem_new(fd, sizeof(mfbd_framebuffer) + sizeof(struct bifrost_render_target));
+ igt_panfrost_bo_mmap(fd, submit->fb_bo);
+ memcpy(submit->fb_bo->map, &mfbd_framebuffer, sizeof(mfbd_framebuffer));
+ memcpy(submit->fb_bo->map + sizeof(mfbd_framebuffer), &rts, sizeof(struct bifrost_render_target));
+ payload.framebuffer = submit->fb_bo->offset | MALI_MFBD;
+ } else {
+ // We don't know yet how to cause a hang on <=T720
+ // Should probably use an infinite loop to hang the GPU
+ igt_require(!do_crash);
+ submit->fb_bo = igt_panfrost_gem_new(fd, sizeof(sfbd_framebuffer));
+ igt_panfrost_bo_mmap(fd, submit->fb_bo);
+ memcpy(submit->fb_bo->map, &sfbd_framebuffer, sizeof(sfbd_framebuffer));
+ payload.framebuffer = submit->fb_bo->offset | MALI_SFBD;
+ }
+
+ submit->submit_bo = igt_panfrost_gem_new(fd, sizeof(header) + sizeof(payload) + 1024000);
+ igt_panfrost_bo_mmap(fd, submit->submit_bo);
+
+ memcpy(submit->submit_bo->map, &header, sizeof(header));
+ memcpy(submit->submit_bo->map + sizeof(header), &payload, sizeof(payload));
+
+ submit->args = malloc(sizeof(*submit->args));
+ memset(submit->args, 0, sizeof(*submit->args));
+ submit->args->jc = submit->submit_bo->offset;
+ submit->args->requirements = PANFROST_JD_REQ_FS;
+
+ bos = malloc(sizeof(*bos) * 6);
+ bos[0] = submit->fbo->handle;
+ bos[1] = submit->tiler_heap_bo->handle;
+ bos[2] = submit->tiler_scratch_bo->handle;
+ bos[3] = submit->scratchpad_bo->handle;
+ bos[4] = submit->fb_bo->handle;
+ bos[5] = submit->submit_bo->handle;
+
+ submit->args->bo_handles = to_user_pointer(bos);
+ submit->args->bo_handle_count = 6;
+
+ igt_assert_eq(drmSyncobjCreate(fd, DRM_SYNCOBJ_CREATE_SIGNALED, &submit->args->out_sync), 0);
+
+ return submit;
+}
+
+void igt_panfrost_free_job(int fd, struct panfrost_submit *submit)
+{
+ free(from_user_pointer(submit->args->bo_handles));
+ igt_panfrost_free_bo(fd, submit->submit_bo);
+ igt_panfrost_free_bo(fd, submit->fb_bo);
+ igt_panfrost_free_bo(fd, submit->scratchpad_bo);
+ igt_panfrost_free_bo(fd, submit->tiler_scratch_bo);
+ igt_panfrost_free_bo(fd, submit->tiler_heap_bo);
+ igt_panfrost_free_bo(fd, submit->fbo);
+ free(submit->args);
+ free(submit);
+}
diff --git a/lib/igt_panfrost.h b/lib/igt_panfrost.h
new file mode 100644
index 00000000..cc7998dc
--- /dev/null
+++ b/lib/igt_panfrost.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright © 2016 Broadcom
+ * Copyright © 2019 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef IGT_PANFROST_H
+#define IGT_PANFROST_H
+
+#include "panfrost_drm.h"
+
+struct panfrost_bo {
+ int handle;
+ uint64_t offset;
+ uint32_t size;
+ void *map;
+};
+
+struct panfrost_submit {
+ struct drm_panfrost_submit *args;
+ struct panfrost_bo *submit_bo;
+ struct panfrost_bo *fb_bo;
+ struct panfrost_bo *scratchpad_bo;
+ struct panfrost_bo *tiler_scratch_bo;
+ struct panfrost_bo *tiler_heap_bo;
+ struct panfrost_bo *fbo;
+};
+
+struct panfrost_bo *igt_panfrost_gem_new(int fd, size_t size);
+void igt_panfrost_free_bo(int fd, struct panfrost_bo *bo);
+
+struct panfrost_submit *igt_panfrost_trivial_job(int fd, bool do_crash, int width, int height, uint32_t color);
+void igt_panfrost_free_job(int fd, struct panfrost_submit *submit);
+
+/* IOCTL wrappers */
+uint32_t igt_panfrost_get_bo_offset(int fd, uint32_t handle);
+uint32_t igt_panfrost_get_param(int fd, int param);
+void *igt_panfrost_mmap_bo(int fd, uint32_t handle, uint32_t size, unsigned prot);
+
+void igt_panfrost_bo_mmap(int fd, struct panfrost_bo *bo);
+
+#endif /* IGT_PANFROST_H */
diff --git a/lib/igt_syncobj.c b/lib/igt_syncobj.c
index d9114ca8..0fddb97a 100644
--- a/lib/igt_syncobj.c
+++ b/lib/igt_syncobj.c
@@ -223,7 +223,7 @@ syncobj_wait(int fd, uint32_t *handles, uint32_t count,
wait.pad = 0;
ret = __syncobj_wait(fd, &wait);
- if (ret == ETIME)
+ if (ret == -ETIME)
return false;
igt_assert_eq(ret, 0);
diff --git a/lib/igt_vc4.c b/lib/igt_vc4.c
index 9a0ba30b..4415fa32 100644
--- a/lib/igt_vc4.c
+++ b/lib/igt_vc4.c
@@ -56,6 +56,23 @@
* tests.
*/
+bool igt_vc4_is_tiled(uint64_t modifier)
+{
+ if (modifier >> 56ULL != DRM_FORMAT_MOD_VENDOR_BROADCOM)
+ return false;
+
+ switch (fourcc_mod_broadcom_mod(modifier)) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND32:
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
+ return true;
+ default:
+ return false;
+ }
+}
+
/**
* igt_vc4_get_cleared_bo:
* @fd: device file descriptor
@@ -178,63 +195,12 @@ bool igt_vc4_purgeable_bo(int fd, int handle, bool purgeable)
return arg.retained;
}
-unsigned int igt_vc4_fb_t_tiled_convert(struct igt_fb *dst, struct igt_fb *src)
-{
- unsigned int fb_id;
- unsigned int i, j;
- void *src_buf;
- void *dst_buf;
- size_t bpp = src->plane_bpp[0];
- size_t dst_stride = ALIGN(src->strides[0], 128);
-
- fb_id = igt_create_fb_with_bo_size(src->fd, src->width, src->height,
- src->drm_format,
- DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
- dst, 0, dst_stride);
- igt_assert(fb_id > 0);
-
- igt_assert(bpp == 16 || bpp == 32);
-
- src_buf = igt_fb_map_buffer(src->fd, src);
- igt_assert(src_buf);
-
- dst_buf = igt_fb_map_buffer(dst->fd, dst);
- igt_assert(dst_buf);
-
- for (i = 0; i < src->height; i++) {
- for (j = 0; j < src->width; j++) {
- size_t src_offset = src->offsets[0];
- size_t dst_offset = dst->offsets[0];
-
- src_offset += src->strides[0] * i + j * bpp / 8;
- dst_offset += igt_vc4_t_tiled_offset(dst_stride,
- src->height,
- bpp, j, i);
-
- switch (bpp) {
- case 16:
- *(uint16_t *)(dst_buf + dst_offset) =
- *(uint16_t *)(src_buf + src_offset);
- break;
- case 32:
- *(uint32_t *)(dst_buf + dst_offset) =
- *(uint32_t *)(src_buf + src_offset);
- break;
- }
- }
- }
-
- igt_fb_unmap_buffer(src, src_buf);
- igt_fb_unmap_buffer(dst, dst_buf);
-
- return fb_id;
-}
/* Calculate the t-tile width so that size = width * height * bpp / 8. */
#define VC4_T_TILE_W(size, height, bpp) ((size) / (height) / ((bpp) / 8))
-size_t igt_vc4_t_tiled_offset(size_t stride, size_t height, size_t bpp,
- size_t x, size_t y)
+static size_t igt_vc4_t_tiled_offset(size_t stride, size_t height, size_t bpp,
+ size_t x, size_t y)
{
const size_t t1k_map_even[] = { 0, 3, 1, 2 };
const size_t t1k_map_odd[] = { 2, 1, 3, 0 };
@@ -308,18 +274,116 @@ size_t igt_vc4_t_tiled_offset(size_t stride, size_t height, size_t bpp,
return offset;
}
-static void vc4_fb_sand_tiled_convert_plane(struct igt_fb *dst, void *dst_buf,
+static void vc4_fb_convert_plane_to_t_tiled(struct igt_fb *dst, void *dst_buf,
struct igt_fb *src, void *src_buf,
- size_t column_width_bytes,
- size_t column_height,
unsigned int plane)
{
+ size_t bpp = src->plane_bpp[plane];
+ unsigned int i, j;
+
+ for (i = 0; i < src->height; i++) {
+ for (j = 0; j < src->width; j++) {
+ size_t src_offset = src->offsets[plane];
+ size_t dst_offset = dst->offsets[plane];
+
+ src_offset += src->strides[plane] * i + j * bpp / 8;
+ dst_offset += igt_vc4_t_tiled_offset(dst->strides[plane],
+ dst->height,
+ bpp, j, i);
+
+ switch (bpp) {
+ case 16:
+ *(uint16_t *)(dst_buf + dst_offset) =
+ *(uint16_t *)(src_buf + src_offset);
+ break;
+ case 32:
+ *(uint32_t *)(dst_buf + dst_offset) =
+ *(uint32_t *)(src_buf + src_offset);
+ break;
+ }
+ }
+ }
+}
+
+static void vc4_fb_convert_plane_from_t_tiled(struct igt_fb *dst, void *dst_buf,
+ struct igt_fb *src, void *src_buf,
+ unsigned int plane)
+{
+ size_t bpp = src->plane_bpp[plane];
+ unsigned int i, j;
+
+ for (i = 0; i < src->height; i++) {
+ for (j = 0; j < src->width; j++) {
+ size_t src_offset = src->offsets[plane];
+ size_t dst_offset = dst->offsets[plane];
+
+ src_offset += igt_vc4_t_tiled_offset(src->strides[plane],
+ src->height,
+ bpp, j, i);
+ src_offset += dst->strides[plane] * i + j * bpp / 8;
+
+ switch (bpp) {
+ case 16:
+ *(uint16_t *)(dst_buf + dst_offset) =
+ *(uint16_t *)(src_buf + src_offset);
+ break;
+ case 32:
+ *(uint32_t *)(dst_buf + dst_offset) =
+ *(uint32_t *)(src_buf + src_offset);
+ break;
+ }
+ }
+ }
+}
+
+static size_t vc4_sand_tiled_offset(size_t column_width, size_t column_size, size_t x,
+ size_t y, size_t bpp)
+{
+ size_t offset = 0;
+ size_t cols_x;
+ size_t pix_x;
+
+ /* Offset to the beginning of the relevant column. */
+ cols_x = x / column_width;
+ offset += cols_x * column_size;
+
+ /* Offset to the relevant pixel. */
+ pix_x = x % column_width;
+ offset += (column_width * y + pix_x) * bpp / 8;
+
+ return offset;
+}
+
+static void vc4_fb_convert_plane_to_sand_tiled(struct igt_fb *dst, void *dst_buf,
+ struct igt_fb *src, void *src_buf,
+ unsigned int plane)
+{
+ uint64_t modifier_base = fourcc_mod_broadcom_mod(dst->modifier);
+ uint32_t column_height = fourcc_mod_broadcom_param(dst->modifier);
+ uint32_t column_width_bytes, column_width, column_size;
size_t bpp = dst->plane_bpp[plane];
- size_t column_width = column_width_bytes * dst->plane_width[plane] /
- dst->width;
- size_t column_size = column_width_bytes * column_height;
unsigned int i, j;
+ switch (modifier_base) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND32:
+ column_width_bytes = 32;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ column_width_bytes = 64;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ column_width_bytes = 128;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ column_width_bytes = 256;
+ break;
+ default:
+ igt_assert(false);
+ }
+
+ column_width = column_width_bytes * dst->plane_width[plane] / dst->width;
+ column_size = column_width_bytes * column_height;
+
for (i = 0; i < dst->plane_height[plane]; i++) {
for (j = 0; j < src->plane_width[plane]; j++) {
size_t src_offset = src->offsets[plane];
@@ -346,19 +410,15 @@ static void vc4_fb_sand_tiled_convert_plane(struct igt_fb *dst, void *dst_buf,
}
}
-unsigned int vc4_fb_sand_tiled_convert(struct igt_fb *dst, struct igt_fb *src,
- uint64_t modifier)
+static void vc4_fb_convert_plane_from_sand_tiled(struct igt_fb *dst, void *dst_buf,
+ struct igt_fb *src, void *src_buf,
+ unsigned int plane)
{
- uint64_t modifier_base;
- size_t column_width_bytes;
- size_t column_height;
- unsigned int fb_id;
- unsigned int i;
- void *src_buf;
- void *dst_buf;
-
- modifier_base = fourcc_mod_broadcom_mod(modifier);
- column_height = fourcc_mod_broadcom_param(modifier);
+ uint64_t modifier_base = fourcc_mod_broadcom_mod(src->modifier);
+ uint32_t column_height = fourcc_mod_broadcom_param(src->modifier);
+ uint32_t column_width_bytes, column_width, column_size;
+ size_t bpp = src->plane_bpp[plane];
+ unsigned int i, j;
switch (modifier_base) {
case DRM_FORMAT_MOD_BROADCOM_SAND32:
@@ -377,41 +437,63 @@ unsigned int vc4_fb_sand_tiled_convert(struct igt_fb *dst, struct igt_fb *src,
igt_assert(false);
}
- fb_id = igt_create_fb(src->fd, src->width, src->height, src->drm_format,
- modifier, dst);
- igt_assert(fb_id > 0);
+ column_width = column_width_bytes * src->plane_width[plane] / src->width;
+ column_size = column_width_bytes * column_height;
+
+ for (i = 0; i < dst->plane_height[plane]; i++) {
+ for (j = 0; j < src->plane_width[plane]; j++) {
+ size_t src_offset = src->offsets[plane];
+ size_t dst_offset = dst->offsets[plane];
- src_buf = igt_fb_map_buffer(src->fd, src);
- igt_assert(src_buf);
+ src_offset += vc4_sand_tiled_offset(column_width,
+ column_size, j, i,
+ bpp);
+ dst_offset += dst->strides[plane] * i + j * bpp / 8;
- dst_buf = igt_fb_map_buffer(dst->fd, dst);
- igt_assert(dst_buf);
+ switch (bpp) {
+ case 8:
+ *(uint8_t *)(dst_buf + dst_offset) =
+ *(uint8_t *)(src_buf + src_offset);
+ break;
+ case 16:
+ *(uint16_t *)(dst_buf + dst_offset) =
+ *(uint16_t *)(src_buf + src_offset);
+ break;
+ default:
+ igt_assert(false);
+ }
+ }
+ }
+}
- for (i = 0; i < dst->num_planes; i++)
- vc4_fb_sand_tiled_convert_plane(dst, dst_buf, src, src_buf,
- column_width_bytes,
- column_height, i);
+void vc4_fb_convert_plane_to_tiled(struct igt_fb *dst, void *dst_buf,
+ struct igt_fb *src, void *src_buf)
+{
+ unsigned int plane;
- igt_fb_unmap_buffer(src, src_buf);
- igt_fb_unmap_buffer(dst, dst_buf);
+ igt_assert(src->modifier == DRM_FORMAT_MOD_LINEAR);
+ igt_assert(igt_vc4_is_tiled(dst->modifier));
- return fb_id;
+ for (plane = 0; plane < src->num_planes; plane++) {
+ if (dst->modifier == DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)
+ vc4_fb_convert_plane_to_t_tiled(dst, dst_buf, src, src_buf, plane);
+ else
+ vc4_fb_convert_plane_to_sand_tiled(dst, dst_buf, src, src_buf, plane);
+ }
}
-size_t vc4_sand_tiled_offset(size_t column_width, size_t column_size, size_t x,
- size_t y, size_t bpp)
+void vc4_fb_convert_plane_from_tiled(struct igt_fb *dst, void *dst_buf,
+ struct igt_fb *src, void *src_buf)
{
- size_t offset = 0;
- size_t cols_x;
- size_t pix_x;
-
- /* Offset to the beginning of the relevant column. */
- cols_x = x / column_width;
- offset += cols_x * column_size;
+ unsigned int plane;
- /* Offset to the relevant pixel. */
- pix_x = x % column_width;
- offset += (column_width * y + pix_x) * bpp / 8;
+ igt_assert(igt_vc4_is_tiled(src->modifier));
+ igt_assert(dst->modifier == DRM_FORMAT_MOD_LINEAR);
- return offset;
+ for (plane = 0; plane < src->num_planes; plane++) {
+ if (src->modifier == DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)
+ vc4_fb_convert_plane_from_t_tiled(dst, dst_buf, src, src_buf, plane);
+ else
+ vc4_fb_convert_plane_from_sand_tiled(dst, dst_buf, src, src_buf, plane);
+ }
}
diff --git a/lib/igt_vc4.h b/lib/igt_vc4.h
index a1781269..f32bf398 100644
--- a/lib/igt_vc4.h
+++ b/lib/igt_vc4.h
@@ -29,16 +29,14 @@ int igt_vc4_create_bo(int fd, size_t size);
void *igt_vc4_mmap_bo(int fd, uint32_t handle, uint32_t size, unsigned prot);
int igt_vc4_get_param(int fd, uint32_t param, uint64_t *val);
bool igt_vc4_purgeable_bo(int fd, int handle, bool purgeable);
+bool igt_vc4_is_tiled(uint64_t modifier);
void igt_vc4_set_tiling(int fd, uint32_t handle, uint64_t modifier);
uint64_t igt_vc4_get_tiling(int fd, uint32_t handle);
-unsigned int igt_vc4_fb_t_tiled_convert(struct igt_fb *dst, struct igt_fb *src);
-size_t igt_vc4_t_tiled_offset(size_t stride, size_t height, size_t bpp,
- size_t x, size_t y);
-unsigned int vc4_fb_sand_tiled_convert(struct igt_fb *dst, struct igt_fb *src,
- uint64_t modifier);
-size_t vc4_sand_tiled_offset(size_t column_width, size_t column_size, size_t x,
- size_t y, size_t bpp);
+void vc4_fb_convert_plane_to_tiled(struct igt_fb *dst, void *dst_buf,
+ struct igt_fb *src, void *src_buf);
+void vc4_fb_convert_plane_from_tiled(struct igt_fb *dst, void *dst_buf,
+ struct igt_fb *src, void *src_buf);
#endif /* IGT_VC4_H */
diff --git a/lib/intel_device_info.c b/lib/intel_device_info.c
index 3f51211f..88be2917 100644
--- a/lib/intel_device_info.c
+++ b/lib/intel_device_info.c
@@ -382,6 +382,8 @@ static const struct pci_id_match intel_device_match[] = {
INTEL_ICL_11_IDS(&intel_icelake_info),
+ INTEL_EHL_IDS(&intel_icelake_info),
+
INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
};
diff --git a/lib/meson.build b/lib/meson.build
index e0b9cf51..157624e7 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -1,10 +1,12 @@
lib_sources = [
'drmtest.c',
'i915/gem_context.c',
+ 'i915/gem_engine_topology.c',
'i915/gem_scheduler.c',
'i915/gem_submission.c',
'i915/gem_ring.c',
'i915/gem_mman.c',
+ 'i915/gem_vm.c',
'igt_color_encoding.c',
'igt_debugfs.c',
'igt_device.c',
@@ -52,11 +54,13 @@ lib_sources = [
'igt_dummyload.c',
'uwildmat/uwildmat.c',
'igt_kmod.c',
+ 'igt_panfrost.c',
'igt_v3d.c',
'igt_vc4.c',
'igt_psr.c',
'igt_amd.c',
'igt_edid.c',
+ 'igt_eld.c',
]
lib_deps = [
@@ -67,7 +71,6 @@ lib_deps = [
libkmod,
libprocps,
libudev,
- libdw,
math,
pciaccess,
pixman,
@@ -103,7 +106,7 @@ if alsa.found()
lib_sources += 'igt_alsa.c'
endif
-if chamelium_found
+if chamelium.found()
lib_deps += chamelium
lib_sources += 'igt_chamelium.c'
lib_sources += 'igt_chamelium_stream.c'
@@ -135,7 +138,8 @@ lib_igt_build = shared_library('igt',
['dummy.c'],
link_whole: lib_intermediates,
dependencies: lib_deps,
- install : true
+ install : true,
+ soversion : '0',
)
lib_igt = declare_dependency(link_with : lib_igt_build,
diff --git a/lib/panfrost-job.h b/lib/panfrost-job.h
new file mode 100644
index 00000000..85ef02d0
--- /dev/null
+++ b/lib/panfrost-job.h
@@ -0,0 +1,1516 @@
+/*
+ * © Copyright 2017-2018 Alyssa Rosenzweig
+ * © Copyright 2017-2018 Connor Abbott
+ * © Copyright 2017-2018 Lyude Paul
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __PANFROST_JOB_H__
+#define __PANFROST_JOB_H__
+
+#include <stdint.h>
+#include <panfrost-misc.h>
+
+#define MALI_SHORT_PTR_BITS (sizeof(uintptr_t)*8)
+
+#define MALI_FBD_HIERARCHY_WEIGHTS 8
+
+#define MALI_PAYLOAD_SIZE 256
+
+typedef u32 mali_jd_core_req;
+
+enum mali_job_type {
+ JOB_NOT_STARTED = 0,
+ JOB_TYPE_NULL = 1,
+ JOB_TYPE_SET_VALUE = 2,
+ JOB_TYPE_CACHE_FLUSH = 3,
+ JOB_TYPE_COMPUTE = 4,
+ JOB_TYPE_VERTEX = 5,
+ JOB_TYPE_GEOMETRY = 6,
+ JOB_TYPE_TILER = 7,
+ JOB_TYPE_FUSED = 8,
+ JOB_TYPE_FRAGMENT = 9,
+};
+
+enum mali_draw_mode {
+ MALI_DRAW_NONE = 0x0,
+ MALI_POINTS = 0x1,
+ MALI_LINES = 0x2,
+ MALI_LINE_STRIP = 0x4,
+ MALI_LINE_LOOP = 0x6,
+ MALI_TRIANGLES = 0x8,
+ MALI_TRIANGLE_STRIP = 0xA,
+ MALI_TRIANGLE_FAN = 0xC,
+ MALI_POLYGON = 0xD,
+ MALI_QUADS = 0xE,
+ MALI_QUAD_STRIP = 0xF,
+
+ /* All other modes invalid */
+};
+
+/* Applies to tiler_gl_enables */
+
+
+#define MALI_OCCLUSION_QUERY (1 << 3)
+#define MALI_OCCLUSION_PRECISE (1 << 4)
+
+#define MALI_FRONT_FACE(v) (v << 5)
+#define MALI_CCW (0)
+#define MALI_CW (1)
+
+#define MALI_CULL_FACE_FRONT (1 << 6)
+#define MALI_CULL_FACE_BACK (1 << 7)
+
+/* TODO: Might this actually be a finer bitfield? */
+#define MALI_DEPTH_STENCIL_ENABLE 0x6400
+
+#define DS_ENABLE(field) \
+ (field == MALI_DEPTH_STENCIL_ENABLE) \
+ ? "MALI_DEPTH_STENCIL_ENABLE" \
+ : (field == 0) ? "0" \
+ : "0 /* XXX: Unknown, check hexdump */"
+
+/* Used in stencil and depth tests */
+
+enum mali_func {
+ MALI_FUNC_NEVER = 0,
+ MALI_FUNC_LESS = 1,
+ MALI_FUNC_EQUAL = 2,
+ MALI_FUNC_LEQUAL = 3,
+ MALI_FUNC_GREATER = 4,
+ MALI_FUNC_NOTEQUAL = 5,
+ MALI_FUNC_GEQUAL = 6,
+ MALI_FUNC_ALWAYS = 7
+};
+
+/* Same OpenGL, but mixed up. Why? Because forget me, that's why! */
+
+enum mali_alt_func {
+ MALI_ALT_FUNC_NEVER = 0,
+ MALI_ALT_FUNC_GREATER = 1,
+ MALI_ALT_FUNC_EQUAL = 2,
+ MALI_ALT_FUNC_GEQUAL = 3,
+ MALI_ALT_FUNC_LESS = 4,
+ MALI_ALT_FUNC_NOTEQUAL = 5,
+ MALI_ALT_FUNC_LEQUAL = 6,
+ MALI_ALT_FUNC_ALWAYS = 7
+};
+
+/* Flags apply to unknown2_3? */
+
+#define MALI_HAS_MSAA (1 << 0)
+#define MALI_CAN_DISCARD (1 << 5)
+
+/* Applies on SFBD systems, specifying that programmable blending is in use */
+#define MALI_HAS_BLEND_SHADER (1 << 6)
+
+/* func is mali_func */
+#define MALI_DEPTH_FUNC(func) (func << 8)
+#define MALI_GET_DEPTH_FUNC(flags) ((flags >> 8) & 0x7)
+#define MALI_DEPTH_FUNC_MASK MALI_DEPTH_FUNC(0x7)
+
+#define MALI_DEPTH_TEST (1 << 11)
+
+/* Next flags to unknown2_4 */
+#define MALI_STENCIL_TEST (1 << 0)
+
+/* What?! */
+#define MALI_SAMPLE_ALPHA_TO_COVERAGE_NO_BLEND_SHADER (1 << 1)
+
+#define MALI_NO_DITHER (1 << 9)
+#define MALI_DEPTH_RANGE_A (1 << 12)
+#define MALI_DEPTH_RANGE_B (1 << 13)
+#define MALI_NO_MSAA (1 << 14)
+
+/* Stencil test state is all encoded in a single u32, just with a lot of
+ * enums... */
+
+enum mali_stencil_op {
+ MALI_STENCIL_KEEP = 0,
+ MALI_STENCIL_REPLACE = 1,
+ MALI_STENCIL_ZERO = 2,
+ MALI_STENCIL_INVERT = 3,
+ MALI_STENCIL_INCR_WRAP = 4,
+ MALI_STENCIL_DECR_WRAP = 5,
+ MALI_STENCIL_INCR = 6,
+ MALI_STENCIL_DECR = 7
+};
+
+struct mali_stencil_test {
+ unsigned ref : 8;
+ unsigned mask : 8;
+ enum mali_func func : 3;
+ enum mali_stencil_op sfail : 3;
+ enum mali_stencil_op dpfail : 3;
+ enum mali_stencil_op dppass : 3;
+ unsigned zero : 4;
+} __attribute__((packed));
+
+/* Blending is a mess, since anything fancy triggers a blend shader, and
+ * -those- are not understood whatsover yet */
+
+#define MALI_MASK_R (1 << 0)
+#define MALI_MASK_G (1 << 1)
+#define MALI_MASK_B (1 << 2)
+#define MALI_MASK_A (1 << 3)
+
+enum mali_nondominant_mode {
+ MALI_BLEND_NON_MIRROR = 0,
+ MALI_BLEND_NON_ZERO = 1
+};
+
+enum mali_dominant_blend {
+ MALI_BLEND_DOM_SOURCE = 0,
+ MALI_BLEND_DOM_DESTINATION = 1
+};
+
+enum mali_dominant_factor {
+ MALI_DOMINANT_UNK0 = 0,
+ MALI_DOMINANT_ZERO = 1,
+ MALI_DOMINANT_SRC_COLOR = 2,
+ MALI_DOMINANT_DST_COLOR = 3,
+ MALI_DOMINANT_UNK4 = 4,
+ MALI_DOMINANT_SRC_ALPHA = 5,
+ MALI_DOMINANT_DST_ALPHA = 6,
+ MALI_DOMINANT_CONSTANT = 7,
+};
+
+enum mali_blend_modifier {
+ MALI_BLEND_MOD_UNK0 = 0,
+ MALI_BLEND_MOD_NORMAL = 1,
+ MALI_BLEND_MOD_SOURCE_ONE = 2,
+ MALI_BLEND_MOD_DEST_ONE = 3,
+};
+
+struct mali_blend_mode {
+ enum mali_blend_modifier clip_modifier : 2;
+ unsigned unused_0 : 1;
+ unsigned negate_source : 1;
+
+ enum mali_dominant_blend dominant : 1;
+
+ enum mali_nondominant_mode nondominant_mode : 1;
+
+ unsigned unused_1 : 1;
+
+ unsigned negate_dest : 1;
+
+ enum mali_dominant_factor dominant_factor : 3;
+ unsigned complement_dominant : 1;
+} __attribute__((packed));
+
+struct mali_blend_equation {
+ /* Of type mali_blend_mode */
+ unsigned rgb_mode : 12;
+ unsigned alpha_mode : 12;
+
+ unsigned zero1 : 4;
+
+ /* Corresponds to MALI_MASK_* above and glColorMask arguments */
+
+ unsigned color_mask : 4;
+
+ /* Attached constant for CONSTANT_ALPHA, etc */
+
+#ifndef BIFROST
+ float constant;
+#endif
+} __attribute__((packed));
+
+/* Used with channel swizzling */
+enum mali_channel {
+ MALI_CHANNEL_RED = 0,
+ MALI_CHANNEL_GREEN = 1,
+ MALI_CHANNEL_BLUE = 2,
+ MALI_CHANNEL_ALPHA = 3,
+ MALI_CHANNEL_ZERO = 4,
+ MALI_CHANNEL_ONE = 5,
+ MALI_CHANNEL_RESERVED_0 = 6,
+ MALI_CHANNEL_RESERVED_1 = 7,
+};
+
+struct mali_channel_swizzle {
+ enum mali_channel r : 3;
+ enum mali_channel g : 3;
+ enum mali_channel b : 3;
+ enum mali_channel a : 3;
+} __attribute__((packed));
+
+/* Compressed per-pixel formats. Each of these formats expands to one to four
+ * floating-point or integer numbers, as defined by the OpenGL specification.
+ * There are various places in OpenGL where the user can specify a compressed
+ * format in memory, which all use the same 8-bit enum in the various
+ * descriptors, although different hardware units support different formats.
+ */
+
+/* The top 3 bits specify how the bits of each component are interpreted. */
+
+/* e.g. R11F_G11F_B10F */
+#define MALI_FORMAT_SPECIAL (2 << 5)
+
+/* signed normalized, e.g. RGBA8_SNORM */
+#define MALI_FORMAT_SNORM (3 << 5)
+
+/* e.g. RGBA8UI */
+#define MALI_FORMAT_UINT (4 << 5)
+
+/* e.g. RGBA8 and RGBA32F */
+#define MALI_FORMAT_UNORM (5 << 5)
+
+/* e.g. RGBA8I and RGBA16F */
+#define MALI_FORMAT_SINT (6 << 5)
+
+/* These formats seem to largely duplicate the others. They're used at least
+ * for Bifrost framebuffer output.
+ */
+#define MALI_FORMAT_SPECIAL2 (7 << 5)
+
+/* If the high 3 bits are 3 to 6 these two bits say how many components
+ * there are.
+ */
+#define MALI_NR_CHANNELS(n) ((n - 1) << 3)
+
+/* If the high 3 bits are 3 to 6, then the low 3 bits say how big each
+ * component is, except the special MALI_CHANNEL_FLOAT which overrides what the
+ * bits mean.
+ */
+
+#define MALI_CHANNEL_4 2
+
+#define MALI_CHANNEL_8 3
+
+#define MALI_CHANNEL_16 4
+
+#define MALI_CHANNEL_32 5
+
+/* For MALI_FORMAT_SINT it means a half-float (e.g. RG16F). For
+ * MALI_FORMAT_UNORM, it means a 32-bit float.
+ */
+#define MALI_CHANNEL_FLOAT 7
+
+enum mali_format {
+ MALI_RGB565 = MALI_FORMAT_SPECIAL | 0x0,
+ MALI_RGB5_A1_UNORM = MALI_FORMAT_SPECIAL | 0x2,
+ MALI_RGB10_A2_UNORM = MALI_FORMAT_SPECIAL | 0x3,
+ MALI_RGB10_A2_SNORM = MALI_FORMAT_SPECIAL | 0x5,
+ MALI_RGB10_A2UI = MALI_FORMAT_SPECIAL | 0x7,
+ MALI_RGB10_A2I = MALI_FORMAT_SPECIAL | 0x9,
+
+ /* YUV formats */
+ MALI_NV12 = MALI_FORMAT_SPECIAL | 0xc,
+
+ MALI_Z32_UNORM = MALI_FORMAT_SPECIAL | 0xD,
+ MALI_R32_FIXED = MALI_FORMAT_SPECIAL | 0x11,
+ MALI_RG32_FIXED = MALI_FORMAT_SPECIAL | 0x12,
+ MALI_RGB32_FIXED = MALI_FORMAT_SPECIAL | 0x13,
+ MALI_RGBA32_FIXED = MALI_FORMAT_SPECIAL | 0x14,
+ MALI_R11F_G11F_B10F = MALI_FORMAT_SPECIAL | 0x19,
+ /* Only used for varyings, to indicate the transformed gl_Position */
+ MALI_VARYING_POS = MALI_FORMAT_SPECIAL | 0x1e,
+ /* Only used for varyings, to indicate that the write should be
+ * discarded.
+ */
+ MALI_VARYING_DISCARD = MALI_FORMAT_SPECIAL | 0x1f,
+
+ MALI_R8_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_8,
+ MALI_R16_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_16,
+ MALI_R32_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_32,
+ MALI_RG8_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_8,
+ MALI_RG16_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_16,
+ MALI_RG32_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_32,
+ MALI_RGB8_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_8,
+ MALI_RGB16_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_16,
+ MALI_RGB32_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_32,
+ MALI_RGBA8_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_8,
+ MALI_RGBA16_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_16,
+ MALI_RGBA32_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_32,
+
+ MALI_R8UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_8,
+ MALI_R16UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_16,
+ MALI_R32UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_32,
+ MALI_RG8UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_8,
+ MALI_RG16UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_16,
+ MALI_RG32UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_32,
+ MALI_RGB8UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_8,
+ MALI_RGB16UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_16,
+ MALI_RGB32UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_32,
+ MALI_RGBA8UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_8,
+ MALI_RGBA16UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_16,
+ MALI_RGBA32UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_32,
+
+ MALI_R8_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_8,
+ MALI_R16_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_16,
+ MALI_R32_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_32,
+ MALI_R32F = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_FLOAT,
+ MALI_RG8_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_8,
+ MALI_RG16_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_16,
+ MALI_RG32_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_32,
+ MALI_RG32F = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_FLOAT,
+ MALI_RGB8_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_8,
+ MALI_RGB16_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_16,
+ MALI_RGB32_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_32,
+ MALI_RGB32F = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_FLOAT,
+ MALI_RGBA4_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_4,
+ MALI_RGBA8_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_8,
+ MALI_RGBA16_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_16,
+ MALI_RGBA32_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_32,
+ MALI_RGBA32F = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_FLOAT,
+
+ MALI_R8I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_8,
+ MALI_R16I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_16,
+ MALI_R32I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_32,
+ MALI_R16F = MALI_FORMAT_SINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_FLOAT,
+ MALI_RG8I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_8,
+ MALI_RG16I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_16,
+ MALI_RG32I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_32,
+ MALI_RG16F = MALI_FORMAT_SINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_FLOAT,
+ MALI_RGB8I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_8,
+ MALI_RGB16I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_16,
+ MALI_RGB32I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_32,
+ MALI_RGB16F = MALI_FORMAT_SINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_FLOAT,
+ MALI_RGBA8I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_8,
+ MALI_RGBA16I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_16,
+ MALI_RGBA32I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_32,
+ MALI_RGBA16F = MALI_FORMAT_SINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_FLOAT,
+
+ MALI_RGBA4 = MALI_FORMAT_SPECIAL2 | 0x8,
+ MALI_RGBA8_2 = MALI_FORMAT_SPECIAL2 | 0xd,
+ MALI_RGB10_A2_2 = MALI_FORMAT_SPECIAL2 | 0xe,
+};
+
+
+/* Alpha coverage is encoded as 4-bits (from a clampf), with inversion
+ * literally performing a bitwise invert. This function produces slightly wrong
+ * results and I'm not sure why; some rounding issue I suppose... */
+
+#define MALI_ALPHA_COVERAGE(clampf) ((uint16_t) (int) (clampf * 15.0f))
+#define MALI_GET_ALPHA_COVERAGE(nibble) ((float) nibble / 15.0f)
+
+/* Applies to unknown1 */
+#define MALI_NO_ALPHA_TO_COVERAGE (1 << 10)
+
+struct mali_blend_meta {
+#ifndef BIFROST
+ /* Base value of 0x200.
+ * OR with 0x1 for blending (anything other than REPLACE).
+ * OR with 0x2 for programmable blending
+ */
+
+ u64 unk1;
+
+ /* For programmable blending, these turn into the blend_shader address */
+ struct mali_blend_equation blend_equation_1;
+
+ u64 zero2;
+ struct mali_blend_equation blend_equation_2;
+#else
+ u32 unk1; // = 0x200
+ struct mali_blend_equation blend_equation;
+ /*
+ * - 0x19 normally
+ * - 0x3 when this slot is unused (everything else is 0 except the index)
+ * - 0x11 when this is the fourth slot (and it's used)
++ * - 0 when there is a blend shader
+ */
+ u16 unk2;
+ /* increments from 0 to 3 */
+ u16 index;
+
+ union {
+ struct {
+ /* So far, I've only seen:
+ * - R001 for 1-component formats
+ * - RG01 for 2-component formats
+ * - RGB1 for 3-component formats
+ * - RGBA for 4-component formats
+ */
+ u32 swizzle : 12;
+ enum mali_format format : 8;
+
+ /* Type of the shader output variable. Note, this can
+ * be different from the format.
+ *
+ * 0: f16 (mediump float)
+ * 1: f32 (highp float)
+ * 2: i32 (highp int)
+ * 3: u32 (highp uint)
+ * 4: i16 (mediump int)
+ * 5: u16 (mediump uint)
+ */
+ u32 shader_type : 3;
+ u32 zero : 9;
+ };
+
+ /* Only the low 32 bits of the blend shader are stored, the
+ * high 32 bits are implicitly the same as the original shader.
+ * According to the kernel driver, the program counter for
+ * shaders is actually only 24 bits, so shaders cannot cross
+ * the 2^24-byte boundary, and neither can the blend shader.
+ * The blob handles this by allocating a 2^24 byte pool for
+ * shaders, and making sure that any blend shaders are stored
+ * in the same pool as the original shader. The kernel will
+ * make sure this allocation is aligned to 2^24 bytes.
+ */
+ u32 blend_shader;
+ };
+#endif
+} __attribute__((packed));
+
+struct mali_shader_meta {
+ mali_ptr shader;
+ u16 texture_count;
+ u16 sampler_count;
+ u16 attribute_count;
+ u16 varying_count;
+
+ union {
+ struct {
+ u32 uniform_buffer_count : 4;
+ u32 unk1 : 28; // = 0x800000 for vertex, 0x958020 for tiler
+ } bifrost1;
+ struct {
+ /* 0x200 except MALI_NO_ALPHA_TO_COVERAGE. Mysterious 1
+ * other times. Who knows really? */
+ u16 unknown1;
+
+ /* Whole number of uniform registers used, times two;
+ * whole number of work registers used (no scale).
+ */
+ unsigned work_count : 5;
+ unsigned uniform_count : 5;
+ unsigned unknown2 : 6;
+ } midgard1;
+ };
+
+ /* On bifrost: Exactly the same as glPolygonOffset() for both.
+ * On midgard: Depth factor is exactly as passed to glPolygonOffset.
+ * Depth units is equal to the value passed to glDeptOhffset + 1.0f
+ * (use MALI_NEGATIVE)
+ */
+ float depth_units;
+ float depth_factor;
+
+ u32 unknown2_2;
+
+ u16 alpha_coverage;
+ u16 unknown2_3;
+
+ u8 stencil_mask_front;
+ u8 stencil_mask_back;
+ u16 unknown2_4;
+
+ struct mali_stencil_test stencil_front;
+ struct mali_stencil_test stencil_back;
+
+ union {
+ struct {
+ u32 unk3 : 7;
+ /* On Bifrost, some system values are preloaded in
+ * registers R55-R62 by the thread dispatcher prior to
+ * the start of shader execution. This is a bitfield
+ * with one entry for each register saying which
+ * registers need to be preloaded. Right now, the known
+ * values are:
+ *
+ * Vertex/compute:
+ * - R55 : gl_LocalInvocationID.xy
+ * - R56 : gl_LocalInvocationID.z + unknown in high 16 bits
+ * - R57 : gl_WorkGroupID.x
+ * - R58 : gl_WorkGroupID.y
+ * - R59 : gl_WorkGroupID.z
+ * - R60 : gl_GlobalInvocationID.x
+ * - R61 : gl_GlobalInvocationID.y/gl_VertexID (without base)
+ * - R62 : gl_GlobalInvocationID.z/gl_InstanceID (without base)
+ *
+ * Fragment:
+ * - R55 : unknown, never seen (but the bit for this is
+ * always set?)
+ * - R56 : unknown (bit always unset)
+ * - R57 : gl_PrimitiveID
+ * - R58 : gl_FrontFacing in low bit, potentially other stuff
+ * - R59 : u16 fragment coordinates (used to compute
+ * gl_FragCoord.xy, together with sample positions)
+ * - R60 : gl_SampleMask (used in epilog, so pretty
+ * much always used, but the bit is always 0 -- is
+ * this just always pushed?)
+ * - R61 : gl_SampleMaskIn and gl_SampleID, used by
+ * varying interpolation.
+ * - R62 : unknown (bit always unset).
+ */
+ u32 preload_regs : 8;
+ /* In units of 8 bytes or 64 bits, since the
+ * uniform/const port loads 64 bits at a time.
+ */
+ u32 uniform_count : 7;
+ u32 unk4 : 10; // = 2
+ } bifrost2;
+ struct {
+ u32 unknown2_7;
+ } midgard2;
+ };
+
+ /* zero on bifrost */
+ u32 unknown2_8;
+
+ /* Blending information for the older non-MRT Midgard HW. Check for
+ * MALI_HAS_BLEND_SHADER to decide how to interpret.
+ */
+
+ union {
+ mali_ptr blend_shader;
+ struct mali_blend_equation blend_equation;
+ };
+
+ /* There can be up to 4 blend_meta's. None of them are required for
+ * vertex shaders or the non-MRT case for Midgard (so the blob doesn't
+ * allocate any space).
+ */
+ struct mali_blend_meta blend_meta[];
+
+} __attribute__((packed));
+
+/* This only concerns hardware jobs */
+
+/* Possible values for job_descriptor_size */
+
+#define MALI_JOB_32 0
+#define MALI_JOB_64 1
+
+struct mali_job_descriptor_header {
+ u32 exception_status;
+ u32 first_incomplete_task;
+ u64 fault_pointer;
+ u8 job_descriptor_size : 1;
+ enum mali_job_type job_type : 7;
+ u8 job_barrier : 1;
+ u8 unknown_flags : 7;
+ u16 job_index;
+ u16 job_dependency_index_1;
+ u16 job_dependency_index_2;
+
+ union {
+ u64 next_job_64;
+ u32 next_job_32;
+ };
+} __attribute__((packed));
+
+struct mali_payload_set_value {
+ u64 out;
+ u64 unknown;
+} __attribute__((packed));
+
+/* Special attributes have a fixed index */
+#define MALI_SPECIAL_ATTRIBUTE_BASE 16
+#define MALI_VERTEX_ID (MALI_SPECIAL_ATTRIBUTE_BASE + 0)
+#define MALI_INSTANCE_ID (MALI_SPECIAL_ATTRIBUTE_BASE + 1)
+
+/*
+ * Mali Attributes
+ *
+ * This structure lets the attribute unit compute the address of an attribute
+ * given the vertex and instance ID. Unfortunately, the way this works is
+ * rather complicated when instancing is enabled.
+ *
+ * To explain this, first we need to explain how compute and vertex threads are
+ * dispatched. This is a guess (although a pretty firm guess!) since the
+ * details are mostly hidden from the driver, except for attribute instancing.
+ * When a quad is dispatched, it receives a single, linear index. However, we
+ * need to translate that index into a (vertex id, instance id) pair, or a
+ * (local id x, local id y, local id z) triple for compute shaders (although
+ * vertex shaders and compute shaders are handled almost identically).
+ * Focusing on vertex shaders, one option would be to do:
+ *
+ * vertex_id = linear_id % num_vertices
+ * instance_id = linear_id / num_vertices
+ *
+ * but this involves a costly division and modulus by an arbitrary number.
+ * Instead, we could pad num_vertices. We dispatch padded_num_vertices *
+ * num_instances threads instead of num_vertices * num_instances, which results
+ * in some "extra" threads with vertex_id >= num_vertices, which we have to
+ * discard. The more we pad num_vertices, the more "wasted" threads we
+ * dispatch, but the division is potentially easier.
+ *
+ * One straightforward choice is to pad num_vertices to the next power of two,
+ * which means that the division and modulus are just simple bit shifts and
+ * masking. But the actual algorithm is a bit more complicated. The thread
+ * dispatcher has special support for dividing by 3, 5, 7, and 9, in addition
+ * to dividing by a power of two. This is possibly using the technique
+ * described in patent US20170010862A1. As a result, padded_num_vertices can be
+ * 1, 3, 5, 7, or 9 times a power of two. This results in less wasted threads,
+ * since we need less padding.
+ *
+ * padded_num_vertices is picked by the hardware. The driver just specifies the
+ * actual number of vertices. At least for Mali G71, the first few cases are
+ * given by:
+ *
+ * num_vertices | padded_num_vertices
+ * 3 | 4
+ * 4-7 | 8
+ * 8-11 | 12 (3 * 4)
+ * 12-15 | 16
+ * 16-19 | 20 (5 * 4)
+ *
+ * Note that padded_num_vertices is a multiple of four (presumably because
+ * threads are dispatched in groups of 4). Also, padded_num_vertices is always
+ * at least one more than num_vertices, which seems like a quirk of the
+ * hardware. For larger num_vertices, the hardware uses the following
+ * algorithm: using the binary representation of num_vertices, we look at the
+ * most significant set bit as well as the following 3 bits. Let n be the
+ * number of bits after those 4 bits. Then we set padded_num_vertices according
+ * to the following table:
+ *
+ * high bits | padded_num_vertices
+ * 1000 | 9 * 2^n
+ * 1001 | 5 * 2^(n+1)
+ * 101x | 3 * 2^(n+2)
+ * 110x | 7 * 2^(n+1)
+ * 111x | 2^(n+4)
+ *
+ * For example, if num_vertices = 70 is passed to glDraw(), its binary
+ * representation is 1000110, so n = 3 and the high bits are 1000, and
+ * therefore padded_num_vertices = 9 * 2^3 = 72.
+ *
+ * The attribute unit works in terms of the original linear_id. if
+ * num_instances = 1, then they are the same, and everything is simple.
+ * However, with instancing things get more complicated. There are four
+ * possible modes, two of them we can group together:
+ *
+ * 1. Use the linear_id directly. Only used when there is no instancing.
+ *
+ * 2. Use the linear_id modulo a constant. This is used for per-vertex
+ * attributes with instancing enabled by making the constant equal
+ * padded_num_vertices. Because the modulus is always padded_num_vertices, this
+ * mode only supports a modulus that is a power of 2 times 1, 3, 5, 7, or 9.
+ * The shift field specifies the power of two, while the extra_flags field
+ * specifies the odd number. If shift = n and extra_flags = m, then the modulus
+ * is (2m + 1) * 2^n. As an example, if num_vertices = 70, then as computed
+ * above, padded_num_vertices = 9 * 2^3, so we should set extra_flags = 4 and
+ * shift = 3. Note that we must exactly follow the hardware algorithm used to
+ * get padded_num_vertices in order to correctly implement per-vertex
+ * attributes.
+ *
+ * 3. Divide the linear_id by a constant. In order to correctly implement
+ * instance divisors, we have to divide linear_id by padded_num_vertices times
+ * to user-specified divisor. So first we compute padded_num_vertices, again
+ * following the exact same algorithm that the hardware uses, then multiply it
+ * by the GL-level divisor to get the hardware-level divisor. This case is
+ * further divided into two more cases. If the hardware-level divisor is a
+ * power of two, then we just need to shift. The shift amount is specified by
+ * the shift field, so that the hardware-level divisor is just 2^shift.
+ *
+ * If it isn't a power of two, then we have to divide by an arbitrary integer.
+ * For that, we use the well-known technique of multiplying by an approximation
+ * of the inverse. The driver must compute the magic multiplier and shift
+ * amount, and then the hardware does the multiplication and shift. The
+ * hardware and driver also use the "round-down" optimization as described in
+ * http://ridiculousfish.com/files/faster_unsigned_division_by_constants.pdf.
+ * The hardware further assumes the multiplier is between 2^31 and 2^32, so the
+ * high bit is implicitly set to 1 even though it is set to 0 by the driver --
+ * presumably this simplifies the hardware multiplier a little. The hardware
+ * first multiplies linear_id by the multiplier and takes the high 32 bits,
+ * then applies the round-down correction if extra_flags = 1, then finally
+ * shifts right by the shift field.
+ *
+ * There are some differences between ridiculousfish's algorithm and the Mali
+ * hardware algorithm, which means that the reference code from ridiculousfish
+ * doesn't always produce the right constants. Mali does not use the pre-shift
+ * optimization, since that would make a hardware implementation slower (it
+ * would have to always do the pre-shift, multiply, and post-shift operations).
+ * It also forces the multplier to be at least 2^31, which means that the
+ * exponent is entirely fixed, so there is no trial-and-error. Altogether,
+ * given the divisor d, the algorithm the driver must follow is:
+ *
+ * 1. Set shift = floor(log2(d)).
+ * 2. Compute m = ceil(2^(shift + 32) / d) and e = 2^(shift + 32) % d.
+ * 3. If e <= 2^shift, then we need to use the round-down algorithm. Set
+ * magic_divisor = m - 1 and extra_flags = 1.
+ * 4. Otherwise, set magic_divisor = m and extra_flags = 0.
+ */
+
+enum mali_attr_mode {
+ MALI_ATTR_UNUSED = 0,
+ MALI_ATTR_LINEAR = 1,
+ MALI_ATTR_POT_DIVIDE = 2,
+ MALI_ATTR_MODULO = 3,
+ MALI_ATTR_NPOT_DIVIDE = 4,
+};
+
+union mali_attr {
+ /* This is used for actual attributes. */
+ struct {
+ /* The bottom 3 bits are the mode */
+ mali_ptr elements : 64 - 8;
+ u32 shift : 5;
+ u32 extra_flags : 3;
+ u32 stride;
+ u32 size;
+ };
+ /* The entry after an NPOT_DIVIDE entry has this format. It stores
+ * extra information that wouldn't fit in a normal entry.
+ */
+ struct {
+ u32 unk; /* = 0x20 */
+ u32 magic_divisor;
+ u32 zero;
+ /* This is the original, GL-level divisor. */
+ u32 divisor;
+ };
+} __attribute__((packed));
+
+struct mali_attr_meta {
+ /* Vertex buffer index */
+ u8 index;
+
+ unsigned unknown1 : 2;
+ unsigned swizzle : 12;
+ enum mali_format format : 8;
+
+ /* Always observed to be zero at the moment */
+ unsigned unknown3 : 2;
+
+ /* When packing multiple attributes in a buffer, offset addresses by this value */
+ uint32_t src_offset;
+} __attribute__((packed));
+
+enum mali_fbd_type {
+ MALI_SFBD = 0,
+ MALI_MFBD = 1,
+};
+
+#define FBD_TYPE (1)
+#define FBD_MASK (~0x3f)
+
+struct mali_uniform_buffer_meta {
+ /* This is actually the size minus 1 (MALI_POSITIVE), in units of 16
+ * bytes. This gives a maximum of 2^14 bytes, which just so happens to
+ * be the GL minimum-maximum for GL_MAX_UNIFORM_BLOCK_SIZE.
+ */
+ u64 size : 10;
+
+ /* This is missing the bottom 2 bits and top 8 bits. The top 8 bits
+ * should be 0 for userspace pointers, according to
+ * https://lwn.net/Articles/718895/. By reusing these bits, we can make
+ * each entry in the table only 64 bits.
+ */
+ mali_ptr ptr : 64 - 10;
+};
+
+/* On Bifrost, these fields are the same between the vertex and tiler payloads.
+ * They also seem to be the same between Bifrost and Midgard. They're shared in
+ * fused payloads.
+ */
+
+/* Applies to unknown_draw */
+
+#define MALI_DRAW_INDEXED_UINT8 (0x10)
+#define MALI_DRAW_INDEXED_UINT16 (0x20)
+#define MALI_DRAW_INDEXED_UINT32 (0x30)
+#define MALI_DRAW_VARYING_SIZE (0x100)
+#define MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX (0x10000)
+
+struct mali_vertex_tiler_prefix {
+ /* This is a dynamic bitfield containing the following things in this order:
+ *
+ * - gl_WorkGroupSize.x
+ * - gl_WorkGroupSize.y
+ * - gl_WorkGroupSize.z
+ * - gl_NumWorkGroups.x
+ * - gl_NumWorkGroups.y
+ * - gl_NumWorkGroups.z
+ *
+ * The number of bits allocated for each number is based on the *_shift
+ * fields below. For example, workgroups_y_shift gives the bit that
+ * gl_NumWorkGroups.y starts at, and workgroups_z_shift gives the bit
+ * that gl_NumWorkGroups.z starts at (and therefore one after the bit
+ * that gl_NumWorkGroups.y ends at). The actual value for each gl_*
+ * value is one more than the stored value, since if any of the values
+ * are zero, then there would be no invocations (and hence no job). If
+ * there were 0 bits allocated to a given field, then it must be zero,
+ * and hence the real value is one.
+ *
+ * Vertex jobs reuse the same job dispatch mechanism as compute jobs,
+ * effectively doing glDispatchCompute(1, vertex_count, instance_count)
+ * where vertex count is the number of vertices.
+ */
+ u32 invocation_count;
+
+ u32 size_y_shift : 5;
+ u32 size_z_shift : 5;
+ u32 workgroups_x_shift : 6;
+ u32 workgroups_y_shift : 6;
+ u32 workgroups_z_shift : 6;
+ /* This is max(workgroups_x_shift, 2) in all the cases I've seen. */
+ u32 workgroups_x_shift_2 : 4;
+
+ u32 draw_mode : 4;
+ u32 unknown_draw : 22;
+
+ /* This is the the same as workgroups_x_shift_2 in compute shaders, but
+ * always 5 for vertex jobs and 6 for tiler jobs. I suspect this has
+ * something to do with how many quads get put in the same execution
+ * engine, which is a balance (you don't want to starve the engine, but
+ * you also want to distribute work evenly).
+ */
+ u32 workgroups_x_shift_3 : 6;
+
+
+ /* Negative of draw_start for TILER jobs from what I've seen */
+ int32_t negative_start;
+ u32 zero1;
+
+ /* Like many other strictly nonzero quantities, index_count is
+ * subtracted by one. For an indexed cube, this is equal to 35 = 6
+ * faces * 2 triangles/per face * 3 vertices/per triangle - 1. That is,
+ * for an indexed draw, index_count is the number of actual vertices
+ * rendered whereas invocation_count is the number of unique vertices
+ * rendered (the number of times the vertex shader must be invoked).
+ * For non-indexed draws, this is just equal to invocation_count. */
+
+ u32 index_count;
+
+ /* No hidden structure; literally just a pointer to an array of uint
+ * indices (width depends on flags). Thanks, guys, for not making my
+ * life insane for once! NULL for non-indexed draws. */
+
+ uintptr_t indices;
+} __attribute__((packed));
+
+/* Point size / line width can either be specified as a 32-bit float (for
+ * constant size) or as a [machine word size]-bit GPU pointer (for varying size). If a pointer
+ * is selected, by setting the appropriate MALI_DRAW_VARYING_SIZE bit in the tiler
+ * payload, the contents of varying_pointer will be intepreted as an array of
+ * fp16 sizes, one for each vertex. gl_PointSize is therefore implemented by
+ * creating a special MALI_R16F varying writing to varying_pointer. */
+
+union midgard_primitive_size {
+ float constant;
+ uintptr_t pointer;
+};
+
+struct bifrost_vertex_only {
+ u32 unk2; /* =0x2 */
+
+ u32 zero0;
+
+ u64 zero1;
+} __attribute__((packed));
+
+struct bifrost_tiler_heap_meta {
+ u32 zero;
+ u32 heap_size;
+ /* note: these are just guesses! */
+ mali_ptr tiler_heap_start;
+ mali_ptr tiler_heap_free;
+ mali_ptr tiler_heap_end;
+
+ /* hierarchy weights? but they're still 0 after the job has run... */
+ u32 zeros[12];
+} __attribute__((packed));
+
+struct bifrost_tiler_meta {
+ u64 zero0;
+ u32 unk; // = 0xf0
+ u16 width;
+ u16 height;
+ u64 zero1;
+ mali_ptr tiler_heap_meta;
+ /* TODO what is this used for? */
+ u64 zeros[20];
+} __attribute__((packed));
+
+struct bifrost_tiler_only {
+ /* 0x20 */
+ union midgard_primitive_size primitive_size;
+
+ mali_ptr tiler_meta;
+
+ u64 zero1, zero2, zero3, zero4, zero5, zero6;
+
+ u32 gl_enables;
+ u32 zero7;
+ u64 zero8;
+} __attribute__((packed));
+
+struct bifrost_scratchpad {
+ u32 zero;
+ u32 flags; // = 0x1f
+ /* This is a pointer to a CPU-inaccessible buffer, 16 pages, allocated
+ * during startup. It seems to serve the same purpose as the
+ * gpu_scratchpad in the SFBD for Midgard, although it's slightly
+ * larger.
+ */
+ mali_ptr gpu_scratchpad;
+} __attribute__((packed));
+
+struct mali_vertex_tiler_postfix {
+ /* Zero for vertex jobs. Pointer to the position (gl_Position) varying
+ * output from the vertex shader for tiler jobs.
+ */
+
+ uintptr_t position_varying;
+
+ /* An array of mali_uniform_buffer_meta's. The size is given by the
+ * shader_meta.
+ */
+ uintptr_t uniform_buffers;
+
+ /* This is a pointer to an array of pointers to the texture
+ * descriptors, number of pointers bounded by number of textures. The
+ * indirection is needed to accomodate varying numbers and sizes of
+ * texture descriptors */
+ uintptr_t texture_trampoline;
+
+ /* For OpenGL, from what I've seen, this is intimately connected to
+ * texture_meta. cwabbott says this is not the case under Vulkan, hence
+ * why this field is seperate (Midgard is Vulkan capable). Pointer to
+ * array of sampler descriptors (which are uniform in size) */
+ uintptr_t sampler_descriptor;
+
+ uintptr_t uniforms;
+ u8 flags : 4;
+ uintptr_t _shader_upper : MALI_SHORT_PTR_BITS - 4; /* struct shader_meta */
+ uintptr_t attributes; /* struct attribute_buffer[] */
+ uintptr_t attribute_meta; /* attribute_meta[] */
+ uintptr_t varyings; /* struct attr */
+ uintptr_t varying_meta; /* pointer */
+ uintptr_t viewport;
+ uintptr_t occlusion_counter; /* A single bit as far as I can tell */
+
+ /* Note: on Bifrost, this isn't actually the FBD. It points to
+ * bifrost_scratchpad instead. However, it does point to the same thing
+ * in vertex and tiler jobs.
+ */
+ mali_ptr framebuffer;
+
+#ifdef __LP64__
+#ifdef BIFROST
+ /* most likely padding to make this a multiple of 64 bytes */
+ u64 zero7;
+#endif
+#endif
+} __attribute__((packed));
+
+struct midgard_payload_vertex_tiler {
+#ifndef __LP64__
+ union midgard_primitive_size primitive_size;
+#endif
+
+ struct mali_vertex_tiler_prefix prefix;
+
+#ifndef __LP64__
+ u32 zero3;
+#endif
+
+ u32 gl_enables; // 0x5
+
+ /* Offset for first vertex in buffer */
+ u32 draw_start;
+
+ uintptr_t zero5;
+
+ struct mali_vertex_tiler_postfix postfix;
+
+#ifdef __LP64__
+ union midgard_primitive_size primitive_size;
+#endif
+} __attribute__((packed));
+
+struct bifrost_payload_vertex {
+ struct mali_vertex_tiler_prefix prefix;
+ struct bifrost_vertex_only vertex;
+ struct mali_vertex_tiler_postfix postfix;
+} __attribute__((packed));
+
+struct bifrost_payload_tiler {
+ struct mali_vertex_tiler_prefix prefix;
+ struct bifrost_tiler_only tiler;
+ struct mali_vertex_tiler_postfix postfix;
+} __attribute__((packed));
+
+struct bifrost_payload_fused {
+ struct mali_vertex_tiler_prefix prefix;
+ struct bifrost_tiler_only tiler;
+ struct mali_vertex_tiler_postfix tiler_postfix;
+ struct bifrost_vertex_only vertex;
+ struct mali_vertex_tiler_postfix vertex_postfix;
+} __attribute__((packed));
+
+/* Pointed to from texture_trampoline, mostly unknown still, haven't
+ * managed to replay successfully */
+
+/* Purposeful off-by-one in width, height fields. For example, a (64, 64)
+ * texture is stored as (63, 63) in these fields. This adjusts for that.
+ * There's an identical pattern in the framebuffer descriptor. Even vertex
+ * count fields work this way, hence the generic name -- integral fields that
+ * are strictly positive generally need this adjustment. */
+
+#define MALI_POSITIVE(dim) (dim - 1)
+
+/* Opposite of MALI_POSITIVE, found in the depth_units field */
+
+#define MALI_NEGATIVE(dim) (dim + 1)
+
+/* Used with wrapping. Incomplete (this is a 4-bit field...) */
+
+enum mali_wrap_mode {
+ MALI_WRAP_REPEAT = 0x8,
+ MALI_WRAP_CLAMP_TO_EDGE = 0x9,
+ MALI_WRAP_CLAMP_TO_BORDER = 0xB,
+ MALI_WRAP_MIRRORED_REPEAT = 0xC
+};
+
+/* 8192x8192 */
+#define MAX_MIP_LEVELS (13)
+
+/* Cubemap bloats everything up */
+#define MAX_FACES (6)
+
+/* Corresponds to the type passed to glTexImage2D and so forth */
+
+struct mali_texture_format {
+ unsigned swizzle : 12;
+ enum mali_format format : 8;
+
+ unsigned usage1 : 3;
+ unsigned is_not_cubemap : 1;
+ unsigned usage2 : 8;
+} __attribute__((packed));
+
+struct mali_texture_descriptor {
+ uint16_t width;
+ uint16_t height;
+ uint16_t depth;
+
+ uint16_t unknown1;
+
+ struct mali_texture_format format;
+
+ uint16_t unknown3;
+
+ /* One for non-mipmapped, zero for mipmapped */
+ uint8_t unknown3A;
+
+ /* Zero for non-mipmapped, (number of levels - 1) for mipmapped */
+ uint8_t nr_mipmap_levels;
+
+ /* Swizzling is a single 32-bit word, broken up here for convenience.
+ * Here, swizzling refers to the ES 3.0 texture parameters for channel
+ * level swizzling, not the internal pixel-level swizzling which is
+ * below OpenGL's reach */
+
+ unsigned swizzle : 12;
+ unsigned swizzle_zero : 20;
+
+ uint32_t unknown5;
+ uint32_t unknown6;
+ uint32_t unknown7;
+
+ mali_ptr swizzled_bitmaps[MAX_MIP_LEVELS * MAX_FACES];
+} __attribute__((packed));
+
+/* Used as part of filter_mode */
+
+#define MALI_LINEAR 0
+#define MALI_NEAREST 1
+#define MALI_MIP_LINEAR (0x18)
+
+/* Used to construct low bits of filter_mode */
+
+#define MALI_TEX_MAG(mode) (((mode) & 1) << 0)
+#define MALI_TEX_MIN(mode) (((mode) & 1) << 1)
+
+#define MALI_TEX_MAG_MASK (1)
+#define MALI_TEX_MIN_MASK (2)
+
+#define MALI_FILTER_NAME(filter) (filter ? "MALI_NEAREST" : "MALI_LINEAR")
+
+/* Used for lod encoding. Thanks @urjaman for pointing out these routines can
+ * be cleaned up a lot. */
+
+#define DECODE_FIXED_16(x) ((float) (x / 256.0))
+
+static inline uint16_t
+FIXED_16(float x)
+{
+ /* Clamp inputs, accounting for float error */
+ float max_lod = (32.0 - (1.0 / 512.0));
+
+ x = ((x > max_lod) ? max_lod : ((x < 0.0) ? 0.0 : x));
+
+ return (int) (x * 256.0);
+}
+
+struct mali_sampler_descriptor {
+ uint32_t filter_mode;
+
+ /* Fixed point. Upper 8-bits is before the decimal point, although it
+ * caps [0-31]. Lower 8-bits is after the decimal point: int(round(x *
+ * 256)) */
+
+ uint16_t min_lod;
+ uint16_t max_lod;
+
+ /* All one word in reality, but packed a bit */
+
+ enum mali_wrap_mode wrap_s : 4;
+ enum mali_wrap_mode wrap_t : 4;
+ enum mali_wrap_mode wrap_r : 4;
+ enum mali_alt_func compare_func : 3;
+
+ /* A single set bit of unknown, ha! */
+ unsigned unknown2 : 1;
+
+ unsigned zero : 16;
+
+ uint32_t zero2;
+ float border_color[4];
+} __attribute__((packed));
+
+/* TODO: What are the floats? Apparently always { -inf, -inf, inf, inf },
+ * unless the scissor test is enabled.
+ *
+ * viewport0/viewport1 form the arguments to glViewport. viewport1 is modified
+ * by MALI_POSITIVE; viewport0 is as-is.
+ */
+
+struct mali_viewport {
+ /* XY clipping planes */
+ float clip_minx;
+ float clip_miny;
+ float clip_maxx;
+ float clip_maxy;
+
+ /* Depth clipping planes */
+ float clip_minz;
+ float clip_maxz;
+
+ u16 viewport0[2];
+ u16 viewport1[2];
+} __attribute__((packed));
+
+/* TODO: Varying meta is symmetrical with attr_meta, but there is some
+ * weirdness associated. Figure it out. */
+
+struct mali_unknown6 {
+ u64 unknown0;
+ u64 unknown1;
+};
+
+/* From presentations, 16x16 tiles externally. Use shift for fast computation
+ * of tile numbers. */
+
+#define MALI_TILE_SHIFT 4
+#define MALI_TILE_LENGTH (1 << MALI_TILE_SHIFT)
+
+/* Tile coordinates are stored as a compact u32, as only 12 bits are needed to
+ * each component. Notice that this provides a theoretical upper bound of (1 <<
+ * 12) = 4096 tiles in each direction, addressing a maximum framebuffer of size
+ * 65536x65536. Multiplying that together, times another four given that Mali
+ * framebuffers are 32-bit ARGB8888, means that this upper bound would take 16
+ * gigabytes of RAM just to store the uncompressed framebuffer itself, let
+ * alone rendering in real-time to such a buffer.
+ *
+ * Nice job, guys.*/
+
+/* From mali_kbase_10969_workaround.c */
+#define MALI_X_COORD_MASK 0x00000FFF
+#define MALI_Y_COORD_MASK 0x0FFF0000
+
+/* Extract parts of a tile coordinate */
+
+#define MALI_TILE_COORD_X(coord) ((coord) & MALI_X_COORD_MASK)
+#define MALI_TILE_COORD_Y(coord) (((coord) & MALI_Y_COORD_MASK) >> 16)
+#define MALI_TILE_COORD_FLAGS(coord) ((coord) & ~(MALI_X_COORD_MASK | MALI_Y_COORD_MASK))
+
+/* No known flags yet, but just in case...? */
+
+#define MALI_TILE_NO_FLAG (0)
+
+/* Helpers to generate tile coordinates based on the boundary coordinates in
+ * screen space. So, with the bounds (0, 0) to (128, 128) for the screen, these
+ * functions would convert it to the bounding tiles (0, 0) to (7, 7).
+ * Intentional "off-by-one"; finding the tile number is a form of fencepost
+ * problem. */
+
+#define MALI_MAKE_TILE_COORDS(X, Y) ((X) | ((Y) << 16))
+#define MALI_BOUND_TO_TILE(B, bias) ((B - bias) >> MALI_TILE_SHIFT)
+#define MALI_COORDINATE_TO_TILE(W, H, bias) MALI_MAKE_TILE_COORDS(MALI_BOUND_TO_TILE(W, bias), MALI_BOUND_TO_TILE(H, bias))
+#define MALI_COORDINATE_TO_TILE_MIN(W, H) MALI_COORDINATE_TO_TILE(W, H, 0)
+#define MALI_COORDINATE_TO_TILE_MAX(W, H) MALI_COORDINATE_TO_TILE(W, H, 1)
+
+struct mali_payload_fragment {
+ u32 min_tile_coord;
+ u32 max_tile_coord;
+ mali_ptr framebuffer;
+} __attribute__((packed));
+
+/* (Single?) Framebuffer Descriptor */
+
+/* Flags apply to format. With just MSAA_A and MSAA_B, the framebuffer is
+ * configured for 4x. With MSAA_8, it is configured for 8x. */
+
+#define MALI_FRAMEBUFFER_MSAA_8 (1 << 3)
+#define MALI_FRAMEBUFFER_MSAA_A (1 << 4)
+#define MALI_FRAMEBUFFER_MSAA_B (1 << 23)
+
+/* Fast/slow based on whether all three buffers are cleared at once */
+
+#define MALI_CLEAR_FAST (1 << 18)
+#define MALI_CLEAR_SLOW (1 << 28)
+#define MALI_CLEAR_SLOW_STENCIL (1 << 31)
+
+struct mali_single_framebuffer {
+ u32 unknown1;
+ u32 unknown2;
+ u64 unknown_address_0;
+ u64 zero1;
+ u64 zero0;
+
+ /* Exact format is ironically not known, since EGL is finnicky with the
+ * blob. MSAA, colourspace, etc are configured here. */
+
+ u32 format;
+
+ u32 clear_flags;
+ u32 zero2;
+
+ /* Purposeful off-by-one in these fields should be accounted for by the
+ * MALI_DIMENSION macro */
+
+ u16 width;
+ u16 height;
+
+ u32 zero3[8];
+
+ /* By default, the framebuffer is upside down from OpenGL's
+ * perspective. Set framebuffer to the end and negate the stride to
+ * flip in the Y direction */
+
+ mali_ptr framebuffer;
+ int32_t stride;
+
+ u32 zero4;
+
+ /* Depth and stencil buffers are interleaved, it appears, as they are
+ * set to the same address in captures. Both fields set to zero if the
+ * buffer is not being cleared. Depending on GL_ENABLE magic, you might
+ * get a zero enable despite the buffer being present; that still is
+ * disabled. */
+
+ mali_ptr depth_buffer; // not SAME_VA
+ u64 depth_buffer_enable;
+
+ mali_ptr stencil_buffer; // not SAME_VA
+ u64 stencil_buffer_enable;
+
+ u32 clear_color_1; // RGBA8888 from glClear, actually used by hardware
+ u32 clear_color_2; // always equal, but unclear function?
+ u32 clear_color_3; // always equal, but unclear function?
+ u32 clear_color_4; // always equal, but unclear function?
+
+ /* Set to zero if not cleared */
+
+ float clear_depth_1; // float32, ditto
+ float clear_depth_2; // float32, ditto
+ float clear_depth_3; // float32, ditto
+ float clear_depth_4; // float32, ditto
+
+ u32 clear_stencil; // Exactly as it appears in OpenGL
+
+ u32 zero6[7];
+
+ /* Very weird format, see generation code in trans_builder.c */
+ u32 resolution_check;
+
+ u32 tiler_flags;
+
+ u64 unknown_address_1; /* Pointing towards... a zero buffer? */
+ u64 unknown_address_2;
+
+ /* See mali_kbase_replay.c */
+ u64 tiler_heap_free;
+ u64 tiler_heap_end;
+
+ /* More below this, maybe */
+} __attribute__((packed));
+
+/* Format bits for the render target flags */
+
+#define MALI_MFBD_FORMAT_AFBC (1 << 5)
+#define MALI_MFBD_FORMAT_MSAA (1 << 7)
+
+struct mali_rt_format {
+ unsigned unk1 : 32;
+ unsigned unk2 : 3;
+
+ unsigned nr_channels : 2; /* MALI_POSITIVE */
+
+ unsigned flags : 11;
+
+ unsigned swizzle : 12;
+
+ unsigned unk4 : 4;
+} __attribute__((packed));
+
+struct bifrost_render_target {
+ struct mali_rt_format format;
+
+ u64 zero1;
+
+ union {
+ struct {
+ /* Stuff related to ARM Framebuffer Compression. When AFBC is enabled,
+ * there is an extra metadata buffer that contains 16 bytes per tile.
+ * The framebuffer needs to be the same size as before, since we don't
+ * know ahead of time how much space it will take up. The
+ * framebuffer_stride is set to 0, since the data isn't stored linearly
+ * anymore.
+ */
+
+ mali_ptr metadata;
+ u32 stride; // stride in units of tiles
+ u32 unk; // = 0x20000
+ } afbc;
+
+ struct {
+ /* Heck if I know */
+ u64 unk;
+ mali_ptr pointer;
+ } chunknown;
+ };
+
+ mali_ptr framebuffer;
+
+ u32 zero2 : 4;
+ u32 framebuffer_stride : 28; // in units of bytes
+ u32 zero3;
+
+ u32 clear_color_1; // RGBA8888 from glClear, actually used by hardware
+ u32 clear_color_2; // always equal, but unclear function?
+ u32 clear_color_3; // always equal, but unclear function?
+ u32 clear_color_4; // always equal, but unclear function?
+} __attribute__((packed));
+
+/* An optional part of bifrost_framebuffer. It comes between the main structure
+ * and the array of render targets. It must be included if any of these are
+ * enabled:
+ *
+ * - Transaction Elimination
+ * - Depth/stencil
+ * - TODO: Anything else?
+ */
+
+/* Flags field: note, these are guesses */
+
+#define MALI_EXTRA_PRESENT (0x400)
+#define MALI_EXTRA_AFBC (0x20)
+#define MALI_EXTRA_AFBC_ZS (0x10)
+#define MALI_EXTRA_ZS (0x4)
+
+struct bifrost_fb_extra {
+ mali_ptr checksum;
+ /* Each tile has an 8 byte checksum, so the stride is "width in tiles * 8" */
+ u32 checksum_stride;
+
+ u32 flags;
+
+ union {
+ /* Note: AFBC is only allowed for 24/8 combined depth/stencil. */
+ struct {
+ mali_ptr depth_stencil_afbc_metadata;
+ u32 depth_stencil_afbc_stride; // in units of tiles
+ u32 zero1;
+
+ mali_ptr depth_stencil;
+
+ u64 padding;
+ } ds_afbc;
+
+ struct {
+ /* Depth becomes depth/stencil in case of combined D/S */
+ mali_ptr depth;
+ u32 depth_stride_zero : 4;
+ u32 depth_stride : 28;
+ u32 zero1;
+
+ mali_ptr stencil;
+ u32 stencil_stride_zero : 4;
+ u32 stencil_stride : 28;
+ u32 zero2;
+ } ds_linear;
+ };
+
+
+ u64 zero3, zero4;
+} __attribute__((packed));
+
+/* flags for unk3 */
+
+/* Enables writing depth results back to main memory (rather than keeping them
+ * on-chip in the tile buffer and then discarding) */
+
+#define MALI_MFBD_DEPTH_WRITE (1 << 10)
+
+/* The MFBD contains the extra bifrost_fb_extra section */
+
+#define MALI_MFBD_EXTRA (1 << 13)
+
+struct bifrost_framebuffer {
+ u32 unk0; // = 0x10
+
+ u32 unknown2; // = 0x1f, same as SFBD
+ mali_ptr scratchpad;
+
+ /* 0x10 */
+ mali_ptr sample_locations;
+ mali_ptr unknown1;
+ /* 0x20 */
+ u16 width1, height1;
+ u32 zero3;
+ u16 width2, height2;
+ u32 unk1 : 19; // = 0x01000
+ u32 rt_count_1 : 2; // off-by-one (use MALI_POSITIVE)
+ u32 unk2 : 3; // = 0
+ u32 rt_count_2 : 3; // no off-by-one
+ u32 zero4 : 5;
+ /* 0x30 */
+ u32 clear_stencil : 8;
+ u32 unk3 : 24; // = 0x100
+ float clear_depth;
+ mali_ptr tiler_meta;
+ /* 0x40 */
+
+ /* Note: these are guesses! */
+ mali_ptr tiler_scratch_start;
+ mali_ptr tiler_scratch_middle;
+
+ /* These are not, since we see symmetry with replay jobs which name these explicitly */
+ mali_ptr tiler_heap_start;
+ mali_ptr tiler_heap_end;
+
+ u64 zero9, zero10, zero11, zero12;
+
+ /* optional: struct bifrost_fb_extra extra */
+ /* struct bifrost_render_target rts[] */
+} __attribute__((packed));
+
+#endif /* __PANFROST_JOB_H__ */
diff --git a/lib/panfrost-misc.h b/lib/panfrost-misc.h
new file mode 100644
index 00000000..82363d87
--- /dev/null
+++ b/lib/panfrost-misc.h
@@ -0,0 +1,47 @@
+/*
+ * © Copyright 2017-2018 The Panfrost Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __PANFROST_MISC_H__
+#define __PANFROST_MISC_H__
+
+#include <inttypes.h>
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+
+typedef uint64_t mali_ptr;
+
+#define MALI_PTR_FMT "0x%" PRIx64
+
+/* FIXME: put this somewhere more fitting */
+#define MALI_MEM_MAP_TRACKING_HANDLE (3ull << 12)
+
+#endif
diff --git a/lib/tests/igt_audio.c b/lib/tests/igt_audio.c
new file mode 100644
index 00000000..a2d57fb8
--- /dev/null
+++ b/lib/tests/igt_audio.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Author: Simon Ser <simon.ser@intel.com>
+ */
+
+#include "config.h"
+
+#include <stdlib.h>
+
+#include "igt_core.h"
+#include "igt_audio.h"
+
+#define SAMPLING_RATE 44100
+#define CHANNELS 1
+#define BUFFER_LEN 2048
+/** PHASESHIFT_LEN: how many samples will be truncated from the signal */
+#define PHASESHIFT_LEN 8
+
+static const int test_freqs[] = { 300, 700, 5000 };
+
+static const size_t test_freqs_len = sizeof(test_freqs) / sizeof(test_freqs[0]);
+
+#define TEST_EXTRA_FREQ 500
+
+static void test_signal_detect_untampered(struct audio_signal *signal)
+{
+ double buf[BUFFER_LEN];
+ bool ok;
+
+ audio_signal_fill(signal, buf, BUFFER_LEN / CHANNELS);
+ ok = audio_signal_detect(signal, SAMPLING_RATE, 0, buf, BUFFER_LEN);
+ igt_assert(ok);
+}
+
+static void test_signal_detect_silence(struct audio_signal *signal)
+{
+ double buf[BUFFER_LEN] = {0};
+ bool ok;
+
+ ok = audio_signal_detect(signal, SAMPLING_RATE, 0, buf, BUFFER_LEN);
+
+ igt_assert(!ok);
+}
+
+static void test_signal_detect_noise(struct audio_signal *signal)
+{
+ double buf[BUFFER_LEN];
+ bool ok;
+ size_t i;
+ long r;
+
+ /* Generate random samples between -1 and 1 */
+ srand(42);
+ for (i = 0; i < BUFFER_LEN; i++) {
+ r = random();
+ buf[i] = (double) r / RAND_MAX * 2 - 1;
+ }
+
+ ok = audio_signal_detect(signal, SAMPLING_RATE, 0, buf, BUFFER_LEN);
+
+ igt_assert(!ok);
+}
+
+static void test_signal_detect_with_missing_freq(struct audio_signal *signal)
+{
+ double buf[BUFFER_LEN];
+ struct audio_signal *missing;
+ bool ok;
+ size_t i;
+
+ /* Generate a signal with all the expected frequencies but the first
+ * one */
+ missing = audio_signal_init(CHANNELS, SAMPLING_RATE);
+ for (i = 1; i < test_freqs_len; i++) {
+ audio_signal_add_frequency(missing, test_freqs[i], 0);
+ }
+ audio_signal_synthesize(missing);
+
+ audio_signal_fill(missing, buf, BUFFER_LEN / CHANNELS);
+ ok = audio_signal_detect(signal, SAMPLING_RATE, 0, buf, BUFFER_LEN);
+ igt_assert(!ok);
+}
+
+static void test_signal_detect_with_unexpected_freq(struct audio_signal *signal)
+{
+ double buf[BUFFER_LEN];
+ struct audio_signal *extra;
+ bool ok;
+ size_t i;
+
+ /* Add an extra, unexpected frequency */
+ extra = audio_signal_init(CHANNELS, SAMPLING_RATE);
+ for (i = 0; i < test_freqs_len; i++) {
+ audio_signal_add_frequency(extra, test_freqs[i], 0);
+ }
+ audio_signal_add_frequency(extra, TEST_EXTRA_FREQ, 0);
+ audio_signal_synthesize(extra);
+
+ audio_signal_fill(extra, buf, BUFFER_LEN / CHANNELS);
+ ok = audio_signal_detect(signal, SAMPLING_RATE, 0, buf, BUFFER_LEN);
+ igt_assert(!ok);
+}
+
+static void test_signal_detect_held_sample(struct audio_signal *signal)
+{
+ double *buf;
+ bool ok;
+ size_t i;
+ double value;
+
+ buf = malloc(BUFFER_LEN * sizeof(double));
+ audio_signal_fill(signal, buf, BUFFER_LEN / CHANNELS);
+
+ /* Repeat a sample in the middle of the signal */
+ value = buf[BUFFER_LEN / 3];
+ for (i = 0; i < 5; i++)
+ buf[BUFFER_LEN / 3 + i] = value;
+
+ ok = audio_signal_detect(signal, SAMPLING_RATE, 0, buf, BUFFER_LEN);
+
+ free(buf);
+
+ igt_assert_f(!ok, "Expected audio signal not to be detected\n");
+}
+
+static void test_signal_detect_phaseshift(struct audio_signal *signal)
+{
+ double *buf;
+ bool ok;
+
+ buf = malloc((BUFFER_LEN + PHASESHIFT_LEN) * sizeof(double));
+ audio_signal_fill(signal, buf, (BUFFER_LEN + PHASESHIFT_LEN) / CHANNELS);
+
+ /* Perform a phaseshift (this isn't related to sirens).
+ *
+ * The idea is to remove a part of the signal in the middle of the
+ * buffer:
+ *
+ * BUFFER_LEN/3 PHASESHIFT_LEN 2*BUFFER_LEN/3
+ * [--------------|################|---------------------------------]
+ *
+ * |
+ * V
+ *
+ * [--------------|---------------------------------]
+ */
+ memmove(&buf[BUFFER_LEN / 3], &buf[BUFFER_LEN / 3 + PHASESHIFT_LEN],
+ (2 * BUFFER_LEN / 3) * sizeof(double));
+
+ ok = audio_signal_detect(signal, SAMPLING_RATE, 0, buf, BUFFER_LEN);
+
+ free(buf);
+
+ igt_assert(!ok);
+}
+
+igt_main
+{
+ struct audio_signal *signal;
+ int ret;
+ size_t i;
+
+ igt_subtest_group {
+ igt_fixture {
+ signal = audio_signal_init(CHANNELS, SAMPLING_RATE);
+
+ for (i = 0; i < test_freqs_len; i++) {
+ ret = audio_signal_add_frequency(signal,
+ test_freqs[i],
+ 0);
+ igt_assert(ret == 0);
+ }
+
+ audio_signal_synthesize(signal);
+ }
+
+ igt_subtest("signal-detect-untampered")
+ test_signal_detect_untampered(signal);
+
+ igt_subtest("signal-detect-silence")
+ test_signal_detect_silence(signal);
+
+ igt_subtest("signal-detect-noise")
+ test_signal_detect_noise(signal);
+
+ igt_subtest("signal-detect-with-missing-freq")
+ test_signal_detect_with_missing_freq(signal);
+
+ igt_subtest("signal-detect-with-unexpected-freq")
+ test_signal_detect_with_unexpected_freq(signal);
+
+ igt_subtest("signal-detect-held-sample")
+ test_signal_detect_held_sample(signal);
+
+ igt_subtest("signal-detect-phaseshift")
+ test_signal_detect_phaseshift(signal);
+
+ igt_fixture {
+ audio_signal_fini(signal);
+ }
+ }
+}
diff --git a/lib/tests/igt_conflicting_args.c b/lib/tests/igt_conflicting_args.c
new file mode 100644
index 00000000..f600abd4
--- /dev/null
+++ b/lib/tests/igt_conflicting_args.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/*
+ * DESCRIPTION: Make sure that IGT framework complains when test try to define
+ * conflicting options.
+ */
+
+#include <signal.h>
+#include <sys/wait.h>
+#include <errno.h>
+
+#include "drmtest.h"
+#include "igt_core.h"
+#include "igt_tests_common.h"
+
+static struct option long_options[2];
+static const char *short_options;
+
+static int opt_handler(int option, int option_index, void *input)
+{
+
+ return 0;
+}
+
+static int do_fork(void)
+{
+ char test_name[] = "test";
+
+ char *argv[] = { test_name };
+ int argc = ARRAY_SIZE(argv);
+
+ pid_t pid = fork();
+ internal_assert(pid != -1);
+
+ if (pid) {
+ int status;
+ while (waitpid(pid, &status, 0) == -1 && errno == EINTR)
+ ;
+
+ return status;
+ }
+
+
+ igt_subtest_init_parse_opts(&argc, argv, short_options, long_options,
+ "", opt_handler, NULL);
+ igt_subtest("dummy") {}
+ igt_exit();
+}
+
+int main(int argc, char **argv)
+{
+ /* no conflict */
+ long_options[0] = (struct option) { "iterations", required_argument, NULL, 'i'};
+ short_options = "";
+ internal_assert_wexited(do_fork(), 0);
+
+ /* conflict on short option */
+ long_options[0] = (struct option) { "iterations", required_argument, NULL, 'i'};
+ short_options = "h";
+ internal_assert_wsignaled(do_fork(), SIGABRT);
+
+ /* conflict on long option name */
+ long_options[0] = (struct option) { "help", required_argument, NULL, 'i'};
+ short_options = "";
+ internal_assert_wsignaled(do_fork(), SIGABRT);
+
+ /* conflict on long option 'val' representation vs short option */
+ long_options[0] = (struct option) { "iterations", required_argument, NULL, 'h'};
+ short_options = "";
+ internal_assert_wsignaled(do_fork(), SIGABRT);
+
+ /* conflict on long option 'val' representations */
+ long_options[0] = (struct option) { "iterations", required_argument, NULL, 500};
+ short_options = "";
+ internal_assert_wsignaled(do_fork(), SIGABRT);
+
+ return 0;
+}
diff --git a/lib/tests/igt_edid.c b/lib/tests/igt_edid.c
new file mode 100644
index 00000000..a847df27
--- /dev/null
+++ b/lib/tests/igt_edid.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors: Simon Ser <simon.ser@intel.com>
+ */
+
+#include "config.h"
+
+#include <stdbool.h>
+
+#include "igt_core.h"
+#include "igt_kms.h"
+#include "igt_edid.h"
+
+static const unsigned char edid_header[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+};
+
+/**
+ * Sanity check the header of the base EDID block.
+ */
+static bool edid_header_is_valid(const unsigned char *raw_edid)
+{
+ size_t i;
+
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] != edid_header[i])
+ return false;
+
+ return true;
+}
+
+/**
+ * Sanity check the checksum of the EDID block.
+ */
+static bool edid_block_checksum(const unsigned char *raw_edid)
+{
+ size_t i;
+ unsigned char csum = 0;
+
+ for (i = 0; i < EDID_LENGTH; i++) {
+ csum += raw_edid[i];
+ }
+
+ return csum == 0;
+}
+
+typedef const unsigned char *(*get_edid_func)(void);
+
+igt_simple_main
+{
+ const struct {
+ const char *desc;
+ get_edid_func f;
+ size_t exts;
+ } funcs[] = {
+ { "base", igt_kms_get_base_edid, 0 },
+ { "alt", igt_kms_get_alt_edid, 0 },
+ { "hdmi_audio", igt_kms_get_hdmi_audio_edid, 1 },
+ {0},
+ }, *f;
+ const unsigned char *edid;
+ size_t i;
+
+ for (f = funcs; f->f; f++) {
+ edid = f->f();
+
+ igt_assert_f(edid_header_is_valid(edid),
+ "invalid header on %s EDID", f->desc);
+ /* check base edid block */
+ igt_assert_f(edid_block_checksum(edid),
+ "checksum failed on %s EDID", f->desc);
+ /* check extension blocks, if any */
+ for (i = 0; i < f->exts; i++)
+ igt_assert_f(edid_block_checksum(edid + (i + 1) * EDID_LENGTH),
+ "CEA block checksum failed on %s EDID", f->desc);
+ }
+}
diff --git a/lib/tests/igt_hdmi_inject.c b/lib/tests/igt_hdmi_inject.c
index 9b6780a1..2534b1a2 100644
--- a/lib/tests/igt_hdmi_inject.c
+++ b/lib/tests/igt_hdmi_inject.c
@@ -73,7 +73,6 @@ igt_simple_main
} funcs[] = {
{ "3D", kmstest_edid_add_3d },
{ "4k", kmstest_edid_add_4k },
- { "audio", kmstest_edid_add_audio },
{ NULL, NULL },
}, *f;
diff --git a/lib/tests/igt_subtest_group.c b/lib/tests/igt_subtest_group.c
index 7783d021..973a30e4 100644
--- a/lib/tests/igt_subtest_group.c
+++ b/lib/tests/igt_subtest_group.c
@@ -28,8 +28,13 @@
igt_main
{
- bool t1 = false;
- int t2 = 0;
+ /*
+ * local variables have to be volatile here otherwise they end up being
+ * undefined when modified in igt_subtest, fixture, etc. because of
+ * longjmps
+ */
+ volatile bool t1 = false;
+ volatile int t2 = 0;
igt_subtest_group {
igt_fixture {
diff --git a/lib/tests/meson.build b/lib/tests/meson.build
index 74efce39..b930ee6e 100644
--- a/lib/tests/meson.build
+++ b/lib/tests/meson.build
@@ -2,6 +2,8 @@ lib_tests = [
'igt_assert',
'igt_can_fail',
'igt_can_fail_simple',
+ 'igt_conflicting_args',
+ 'igt_edid',
'igt_exit_handler',
'igt_fork',
'igt_fork_helper',
@@ -21,6 +23,13 @@ lib_fail_tests = [
'igt_timeout',
]
+lib_tests_deps = igt_deps
+
+if chamelium.found()
+ lib_deps += chamelium
+ lib_tests += 'igt_audio'
+endif
+
foreach lib_test : lib_tests
exec = executable(lib_test, lib_test + '.c', install : false,
dependencies : igt_deps)
diff --git a/man/meson.build b/man/meson.build
index a6b08900..2c1396af 100644
--- a/man/meson.build
+++ b/man/meson.build
@@ -1,3 +1,5 @@
+build_man = get_option('build_man')
+
manpages = [
'intel_aubdump',
'intel_audio_dump',
@@ -22,10 +24,10 @@ defs_rst = configure_file(input : 'defs.rst.in',
output : 'defs.rst',
configuration : config)
-rst2man = find_program('rst2man-3', 'rst2man', required : _man_required)
+rst2man = find_program('rst2man-3', 'rst2man', required : build_man)
rst2man_script = find_program('rst2man.sh')
-if _build_man and rst2man.found()
+if rst2man.found()
foreach manpage : manpages
custom_target(manpage + '.1',
build_by_default : true,
@@ -36,10 +38,6 @@ if _build_man and rst2man.found()
install : true,
install_dir : join_paths(mandir, 'man1'))
endforeach
- build_info += 'Build man pages: Yes'
-else
- if _man_required
- error('Cannot build man pages due to missing dependencies')
- endif
- build_info += 'Build man pages: No'
endif
+
+build_info += 'Build man pages: @0@'.format(rst2man.found())
diff --git a/meson.build b/meson.build
index be6dff9d..b1028ee6 100644
--- a/meson.build
+++ b/meson.build
@@ -1,14 +1,25 @@
project('igt-gpu-tools', 'c',
- version : '1.23',
+ version : '1.24',
default_options: [
'warning_level=2',
'c_std=gnu11',
+ 'b_ndebug=false',
+ 'buildtype=debugoptimized',
],
license : 'MIT',
- meson_version : '>=0.46.0')
+ meson_version : '>=0.47.0')
+
+if get_option('b_ndebug') != 'false'
+ error('Building without -Db_ndebug=false is not supported')
+endif
cc = meson.get_compiler('c')
+# Also make sure that the user doesn't have -DNDEBUG defined in their config
+if not cc.compiles(files('lib/check-ndebug.h'), args: get_option('c_args'))
+ error('Building with NDEBUG defined is not supported')
+endif
+
cc_args = [
'-Wbad-function-cast',
'-Wdeclaration-after-statement',
@@ -52,6 +63,12 @@ cc_args = [
'-Werror=sequence-point',
'-Werror=trigraphs',
'-Werror=write-strings',
+# Disable the memory allocating builtins as they may cause unexpected behavior
+# with our framework. They *may* get optimized out in favor of a register or
+# stack variable, making them effectively local. Local variables do not play
+# well with longjmp which is heavily used by IGT framework.
+ '-fno-builtin-malloc',
+ '-fno-builtin-calloc',
]
foreach cc_arg : cc_args
@@ -60,44 +77,12 @@ foreach cc_arg : cc_args
endif
endforeach
-_build_overlay = false
-_overlay_required = false
-_build_man = false
-_man_required = false
-_build_chamelium = false
-_chamelium_required = false
-_build_docs = false
-_docs_required = false
-_build_tests = false
-_tests_required = false
-_build_runner = false
-_runner_required = false
-
-build_overlay = get_option('build_overlay')
-overlay_backends = get_option('overlay_backends')
-build_man = get_option('build_man')
-with_valgrind = get_option('with_valgrind')
build_chamelium = get_option('build_chamelium')
build_docs = get_option('build_docs')
-build_tests = get_option('build_tests')
+build_tests = not get_option('build_tests').disabled()
with_libdrm = get_option('with_libdrm')
-with_libunwind = get_option('with_libunwind')
-build_runner = get_option('build_runner')
-
-_build_overlay = build_overlay != 'false'
-_overlay_required = build_overlay == 'true'
-_build_man = build_man != 'false'
-_man_required = build_man == 'true'
-_build_chamelium = build_chamelium != 'false'
-_chamelium_required = build_chamelium == 'true'
-_build_docs = build_docs != 'false'
-_docs_required = build_docs == 'true'
-_build_tests = build_tests != 'false'
-_tests_required = build_tests == 'true'
-_build_runner = build_runner != 'false'
-_runner_required = build_runner == 'true'
-
-build_info = []
+
+build_info = ['Build type: ' + get_option('buildtype')]
inc = include_directories('include/drm-uapi', 'lib', 'lib/stubs/syscalls', '.')
@@ -133,29 +118,17 @@ pciaccess = dependency('pciaccess', version : '>=0.10')
libkmod = dependency('libkmod')
libprocps = dependency('libprocps', required : true)
-libunwind = null_dep
-libunwindinfo = 'No'
-if with_libunwind != 'false'
- libunwind = dependency('libunwind', required : with_libunwind == 'true')
- if libunwind.found()
- libunwindinfo = 'Yes'
- endif
-endif
-build_info += 'With libunwind: ' + libunwindinfo
+libunwind = dependency('libunwind', required : get_option('with_libunwind'))
+build_info += 'With libunwind: @0@'.format(libunwind.found())
libdw = dependency('libdw', required : true)
pixman = dependency('pixman-1', required : true)
-valgrind = null_dep
-valgrindinfo = 'No'
-if with_valgrind != 'false'
- valgrind = dependency('valgrind', required : with_valgrind == 'true')
- if valgrind.found()
- config.set('HAVE_VALGRIND', 1)
- valgrindinfo = 'Yes'
- endif
+valgrind = dependency('valgrind', required : get_option('with_valgrind'))
+if valgrind.found()
+ config.set('HAVE_VALGRIND', 1)
endif
-build_info += 'Valgrind annotations: ' + valgrindinfo
+build_info += 'Valgrind annotations: @0@'.format(valgrind.found())
cairo = dependency('cairo', version : '>1.12.0', required : true)
libudev = dependency('libudev', required : true)
@@ -178,28 +151,28 @@ if not xmlrpc.found() and xmlrpc_cmd.found()
endif
endif
-gsl = null_dep
-alsa = null_dep
-chamelium = null_dep
-chamelium_found = false # TODO: use a disabler object instead
-chameliuminfo = 'No'
-if _build_chamelium
- gsl = dependency('gsl', required : _chamelium_required)
- alsa = dependency('alsa', required : _chamelium_required)
+if build_chamelium.enabled() and not (xmlrpc.found() and xmlrpc_util.found() and xmlrpc_client.found())
+ error('Chamelium build forced and required dependency xmlrpc not found')
+endif
+
+gsl = dependency('gsl', required : build_chamelium)
+alsa = dependency('alsa', required : build_chamelium)
+libcurl = dependency('libcurl', required : build_chamelium)
+
+if xmlrpc.found() and xmlrpc_util.found() and xmlrpc_client.found() and gsl.found() and alsa.found() and libcurl.found()
+ config.set('HAVE_CHAMELIUM', 1)
chamelium = declare_dependency(dependencies : [
xmlrpc,
xmlrpc_util,
xmlrpc_client,
gsl,
alsa,
- ], required : _chamelium_required)
- if xmlrpc.found() and xmlrpc_util.found() and xmlrpc_client.found() and gsl.found() and alsa.found()
- config.set('HAVE_CHAMELIUM', 1)
- chameliuminfo = 'Yes'
- chamelium_found = true
- endif
+ ])
+else
+ chamelium = disabler()
endif
-build_info += 'Build Chamelium test: ' + chameliuminfo
+
+build_info += 'Build Chamelium test: @0@'.format(chamelium.found())
pthreads = dependency('threads')
math = cc.find_library('m')
@@ -223,6 +196,9 @@ if cc.has_header('cpuid.h')
# FIXME: Do we need the example link test from configure.ac?
config.set('HAVE_CPUID_H', 1)
endif
+if cc.has_header_symbol('unistd.h', 'gettid', args : '-D_GNU_SOURCE')
+ config.set('HAVE_GETTID', 1)
+endif
if cc.has_member('struct sysinfo', 'totalram',
prefix : '#include <sys/sysinfo.h>')
@@ -235,6 +211,13 @@ config.set10('HAVE_MEMFD_CREATE', have)
add_project_arguments('-D_GNU_SOURCE', language : 'c')
add_project_arguments('-include', 'config.h', language : 'c')
+# FEATURE_TEST_MACROS(7)
+# performs lightweight overflow checks on quite a few libc functions
+# requires -O optimizations
+if ['debugoptimized', 'release', 'minsize'].contains(get_option('buildtype'))
+ add_project_arguments('-D_FORTIFY_SOURCE=2', language : 'c')
+endif
+
config.set('PACKAGE_NAME', meson.project_name())
config.set_quoted('PACKAGE_VERSION', meson.project_version())
config.set_quoted('PACKAGE', meson.project_name())
@@ -291,12 +274,11 @@ else
endif
subdir('lib')
-if _build_tests
+if build_tests
subdir('tests')
- build_info += 'Build tests: Yes'
-else
- build_info += 'Build tests: No'
endif
+build_info += 'Build tests: @0@'.format(build_tests)
+
subdir('benchmarks')
subdir('tools')
subdir('runner')
@@ -306,18 +288,13 @@ endif
subdir('overlay')
subdir('man')
-gtk_doc = dependency('gtk-doc', required : _docs_required)
-
-docs_info = 'No'
-if _build_docs
- if _build_tests and gtk_doc.found()
- subdir('docs')
- docs_info = 'Yes'
- elif _docs_required
- error('Documentation requires building tests')
- endif
+gtk_doc = dependency('gtk-doc', required : build_docs)
+if build_tests and gtk_doc.found()
+ subdir('docs')
+elif build_docs.enabled()
+ error('Documentation requires building tests')
endif
-build_info += 'Build documentation: ' + docs_info
+build_info += 'Build documentation: @0@'.format(build_tests and gtk_doc.found())
message('Build options')
message('=============')
diff --git a/meson_options.txt b/meson_options.txt
index 888efe56..9cca0c4f 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -1,7 +1,5 @@
option('build_overlay',
- type : 'combo',
- value : 'auto',
- choices : ['auto', 'true', 'false'],
+ type : 'feature',
description : 'Build overlay')
option('overlay_backends',
@@ -11,33 +9,23 @@ option('overlay_backends',
description : 'Overlay backends to enable')
option('build_chamelium',
- type : 'combo',
- value : 'auto',
- choices : ['auto', 'true', 'false'],
+ type : 'feature',
description : 'Build chamelium test')
option('with_valgrind',
- type : 'combo',
- value : 'auto',
- choices : ['auto', 'true', 'false'],
+ type : 'feature',
description : 'Build with support for valgrind annotations')
option('build_man',
- type : 'combo',
- value : 'auto',
- choices : ['auto', 'true', 'false'],
+ type : 'feature',
description : 'Build man pages')
option('build_docs',
- type : 'combo',
- value : 'auto',
- choices : ['auto', 'true', 'false'],
+ type : 'feature',
description : 'Build documentation')
option('build_tests',
- type : 'combo',
- value : 'auto',
- choices : ['auto', 'true', 'false'],
+ type : 'feature',
description : 'Build tests')
option('with_libdrm',
@@ -47,15 +35,11 @@ option('with_libdrm',
description : 'libdrm libraries to be used')
option('with_libunwind',
- type : 'combo',
- value : 'auto',
- choices : ['auto', 'true', 'false'],
+ type : 'feature',
description : 'Use libunwind')
option('build_runner',
- type : 'combo',
- value : 'auto',
- choices : ['auto', 'true', 'false'],
+ type : 'feature',
description : 'Build test runner')
option('use_rpath',
diff --git a/overlay/meson.build b/overlay/meson.build
index 46d2d494..d133b6be 100644
--- a/overlay/meson.build
+++ b/overlay/meson.build
@@ -1,3 +1,6 @@
+build_overlay = get_option('build_overlay')
+overlay_backends = get_option('overlay_backends')
+
gpu_overlay_src = [
'chart.c',
'config.c',
@@ -18,7 +21,7 @@ xv_backend_required = false
xlib_backend_required = false
build_xv_backend = overlay_backends.contains('xv') or overlay_backends.contains('auto')
build_xlib_backend = overlay_backends.contains('x') or overlay_backends.contains('auto')
-if _overlay_required
+if build_overlay.enabled()
xv_backend_required = overlay_backends.contains('xv')
xlib_backend_required = overlay_backends.contains('x')
endif
@@ -30,7 +33,7 @@ dri2proto = dependency('dri2proto',
version : '>= 2.6',
required : xv_backend_required or xlib_backend_required)
cairo_xlib = dependency('cairo-xlib', required : xlib_backend_required)
-xrandr = dependency('xrandr', version : '>=1.3', required : _overlay_required)
+xrandr = dependency('xrandr', version : '>=1.3', required : build_overlay)
gpu_overlay_deps = [ realtime, math, cairo, pciaccess, libdrm,
libdrm_intel, lib_igt_perf ]
@@ -70,7 +73,7 @@ gpu_overlay_src += both_x11_src
gpu_overlay_src += 'kms/kms-overlay.c'
-leg = find_program('leg', required : _overlay_required)
+leg = find_program('leg', required : build_overlay)
if leg.found()
leg_file = custom_target('tracepoint_format',
output: 'tracepoint_format.h',
@@ -81,17 +84,17 @@ else
message('WARNING: leg command not found, disabling overlay; try : apt-get install peg')
endif
-if _build_overlay and ['x86', 'x86_64'].contains(host_machine.cpu_family()) and libdrm_intel.found() and leg.found() and xrandr.found() and cairo.found() and (with_xlib_backend or with_xv_backend)
+if not build_overlay.disabled() and ['x86', 'x86_64'].contains(host_machine.cpu_family()) and libdrm_intel.found() and leg.found() and xrandr.found() and cairo.found() and (with_xlib_backend or with_xv_backend)
executable('intel-gpu-overlay', gpu_overlay_src,
include_directories : inc,
c_args : gpu_overlay_cflags,
dependencies : gpu_overlay_deps,
install : true)
- build_info += 'Build overlay: Yes'
+ build_info += 'Build overlay: true'
build_info += 'Overlay backends: ' + ','.join(backends_strings)
else
- if _overlay_required
+ if build_overlay.enabled()
error('Cannot build overlay due to missing dependencies')
endif
- build_info += 'Build overlay: No'
+ build_info += 'Build overlay: false'
endif
diff --git a/runner/job_list.c b/runner/job_list.c
index 941e2ee0..5283fd72 100644
--- a/runner/job_list.c
+++ b/runner/job_list.c
@@ -17,10 +17,8 @@ static bool matches_any(const char *str, struct regex_list *list)
size_t i;
for (i = 0; i < list->size; i++) {
- if (regexec(list->regexes[i], str,
- (size_t)0, NULL, 0) == 0) {
+ if (g_regex_match(list->regexes[i], str, 0, NULL))
return true;
- }
}
return false;
@@ -113,11 +111,17 @@ static void add_subtests(struct job_list *job_list, struct settings *settings,
fprintf(stderr, "popen error when executing %s: %s\n", binary, strerror(errno));
} else if (WIFEXITED(s)) {
if (WEXITSTATUS(s) == IGT_EXIT_INVALID) {
+ char piglitname[256];
+
+ generate_piglit_name(binary, NULL,
+ piglitname, sizeof(piglitname));
/* No subtests on this one */
- if (exclude && exclude->size && matches_any(binary, exclude)) {
+ if (exclude && exclude->size &&
+ matches_any(piglitname, exclude)) {
return;
}
- if (!include || !include->size || matches_any(binary, include)) {
+ if (!include || !include->size ||
+ matches_any(piglitname, include)) {
add_job_list_entry(job_list, strdup(binary), NULL, 0);
return;
}
@@ -293,6 +297,30 @@ static bool job_list_from_test_list(struct job_list *job_list,
return any;
}
+void list_all_tests(struct job_list *lst)
+{
+ char piglit_name[256];
+
+ for (size_t test_idx = 0; test_idx < lst->size; ++test_idx) {
+ struct job_list_entry *current_entry = lst->entries + test_idx;
+ char *binary = current_entry->binary;
+
+ if (current_entry->subtest_count == 0) {
+ generate_piglit_name(binary, NULL,
+ piglit_name, sizeof(piglit_name));
+ printf("%s\n", piglit_name);
+ continue;
+ }
+ for (size_t subtest_idx = 0;
+ subtest_idx < current_entry->subtest_count;
+ ++subtest_idx) {
+ generate_piglit_name(binary, current_entry->subtests[subtest_idx],
+ piglit_name, sizeof(piglit_name));
+ printf("%s\n", piglit_name);
+ }
+ }
+}
+
static char *lowercase(const char *str)
{
char *ret = malloc(strlen(str) + 1);
diff --git a/runner/job_list.h b/runner/job_list.h
index f8bbbddc..39c9b863 100644
--- a/runner/job_list.h
+++ b/runner/job_list.h
@@ -36,5 +36,6 @@ bool create_job_list(struct job_list *job_list, struct settings *settings);
bool serialize_job_list(struct job_list *job_list, struct settings *settings);
bool read_job_list(struct job_list *job_list, int dirfd);
+void list_all_tests(struct job_list *lst);
#endif
diff --git a/runner/meson.build b/runner/meson.build
index de6e6f1c..4eff193a 100644
--- a/runner/meson.build
+++ b/runner/meson.build
@@ -1,4 +1,4 @@
-jsonc = dependency('json-c', required: _runner_required)
+build_runner = get_option('build_runner')
runnerlib_sources = [ 'settings.c',
'job_list.c',
@@ -12,12 +12,18 @@ results_sources = [ 'results.c' ]
runner_test_sources = [ 'runner_tests.c' ]
runner_json_test_sources = [ 'runner_json_tests.c' ]
-if _build_runner and jsonc.found()
+jsonc = dependency('json-c', required: build_runner)
+
+if not build_tests and jsonc.found()
+ error('Building test runner requires building tests')
+endif
+
+if jsonc.found()
subdir('testdata')
runnerlib = static_library('igt_runner', runnerlib_sources,
include_directories : inc,
- dependencies : jsonc)
+ dependencies : [jsonc, glib])
runner = executable('igt_runner', runner_sources,
link_with : runnerlib,
@@ -54,7 +60,7 @@ if _build_runner and jsonc.found()
dependencies : [igt_deps, jsonc])
test('runner_json', runner_json_test)
- build_info += 'Build test runner: Yes'
+ build_info += 'Build test runner: true'
else
- build_info += 'Build test runner: No'
+ build_info += 'Build test runner: false'
endif
diff --git a/runner/resultgen.c b/runner/resultgen.c
index d9702a19..7b4cd519 100644
--- a/runner/resultgen.c
+++ b/runner/resultgen.c
@@ -499,14 +499,17 @@ static const char igt_dmesg_whitelist[] =
static const char igt_piglit_style_dmesg_blacklist[] =
"(\\[drm:|drm_|intel_|i915_)";
-static bool init_regex_whitelist(struct settings* settings, regex_t* re)
+static bool init_regex_whitelist(struct settings* settings, GRegex **re)
{
+ GError *err = NULL;
const char *regex = settings->piglit_style_dmesg ?
igt_piglit_style_dmesg_blacklist :
igt_dmesg_whitelist;
- if (regcomp(re, regex, REG_EXTENDED | REG_NOSUB) != 0) {
+ *re = g_regex_new(regex, G_REGEX_OPTIMIZE, 0, &err);
+ if (err) {
fprintf(stderr, "Cannot compile dmesg regexp\n");
+ g_error_free(err);
return false;
}
@@ -630,7 +633,7 @@ static bool fill_from_dmesg(int fd,
char piglit_name[256];
ssize_t read;
size_t i;
- regex_t re;
+ GRegex *re;
if (!f) {
return false;
@@ -671,12 +674,12 @@ static bool fill_from_dmesg(int fd,
if (settings->piglit_style_dmesg) {
if ((flags & 0x07) <= settings->dmesg_warn_level && continuation != 'c' &&
- regexec(&re, message, (size_t)0, NULL, 0) != REG_NOMATCH) {
+ g_regex_match(re, message, 0, NULL)) {
append_line(&warnings, &warningslen, formatted);
}
} else {
if ((flags & 0x07) <= settings->dmesg_warn_level && continuation != 'c' &&
- regexec(&re, message, (size_t)0, NULL, 0) == REG_NOMATCH) {
+ !g_regex_match(re, message, 0, NULL)) {
append_line(&warnings, &warningslen, formatted);
}
}
@@ -715,7 +718,7 @@ static bool fill_from_dmesg(int fd,
free(dmesg);
free(warnings);
- regfree(&re);
+ g_regex_unref(re);
fclose(f);
return true;
}
@@ -739,6 +742,7 @@ static const char *result_from_exitcode(int exitcode)
static void add_subtest(struct subtests *subtests, char *subtest)
{
size_t len = strlen(subtest);
+ size_t i;
if (len == 0)
return;
@@ -746,6 +750,11 @@ static void add_subtest(struct subtests *subtests, char *subtest)
if (subtest[len - 1] == '\n')
subtest[len - 1] = '\0';
+ /* Don't add if we already have this subtest */
+ for (i = 0; i < subtests->size; i++)
+ if (!strcmp(subtest, subtests->names[i]))
+ return;
+
subtests->size++;
subtests->names = realloc(subtests->names, sizeof(*subtests->names) * subtests->size);
subtests->names[subtests->size - 1] = subtest;
diff --git a/runner/runner.c b/runner/runner.c
index e1a6ccba..4855ad64 100644
--- a/runner/runner.c
+++ b/runner/runner.c
@@ -24,6 +24,11 @@ int main(int argc, char **argv)
return 1;
}
+ if (settings.list_all) {
+ list_all_tests(&job_list);
+ return 0;
+ }
+
if (!initialize_execute_state(&state, &settings, &job_list)) {
return 1;
}
diff --git a/runner/runner_tests.c b/runner/runner_tests.c
index ade78b18..39d4a078 100644
--- a/runner/runner_tests.c
+++ b/runner/runner_tests.c
@@ -10,7 +10,15 @@
#include "job_list.h"
#include "executor.h"
-static char testdatadir[] = TESTDATA_DIRECTORY;
+/*
+ * NOTE: this test is using a lot of variables that are changed in igt_fixture,
+ * igt_subtest_group and igt_subtests blocks but defined outside of them.
+ *
+ * Such variables have to be either non-local or volatile, otherwise their
+ * contents is undefined due to longjmps the framework performs.
+ */
+
+static const char testdatadir[] = TESTDATA_DIRECTORY;
static void igt_assert_eqstr(const char *one, const char *two)
{
@@ -65,10 +73,10 @@ static void job_list_filter_test(const char *name, const char *filterarg1, const
size_t expected_normal, size_t expected_multiple)
{
int multiple;
- struct settings settings;
+ struct settings *settings = malloc(sizeof(*settings));
igt_fixture
- init_settings(&settings);
+ init_settings(settings);
for (multiple = 0; multiple < 2; multiple++) {
igt_subtest_f("job-list-filters-%s-%s", name, multiple ? "multiple" : "normal") {
@@ -84,9 +92,9 @@ static void job_list_filter_test(const char *name, const char *filterarg1, const
size_t size;
init_job_list(&list);
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
- success = create_job_list(&list, &settings);
+ success = create_job_list(&list, settings);
size = list.size;
if (success)
@@ -99,8 +107,10 @@ static void job_list_filter_test(const char *name, const char *filterarg1, const
}
}
- igt_fixture
- free_settings(&settings);
+ igt_fixture {
+ free_settings(settings);
+ free(settings);
+ }
}
static void clear_directory_fd(int dirfd)
@@ -196,10 +206,29 @@ static void assert_execution_results_exist(int dirfd)
igt_main
{
- struct settings settings;
+ struct settings *settings = malloc(sizeof(*settings));
- igt_fixture
- init_settings(&settings);
+ igt_fixture {
+ int i;
+
+ /*
+ * Let's close all the non-standard fds ahead of executing
+ * anything, so we can test for descriptor leakage caused by
+ * any of the igt_runner code-paths exercised here.
+ *
+ * See file-descriptor-leakage subtest at the end.
+ *
+ * Some libraries (looking at you, GnuTLS) may leave fds opened
+ * after the implicitly called library constructor. We don't
+ * have full control over them as they may be dependencies of
+ * our dependencies and may get pulled in if the user's and
+ * distribution's compile/configure/USE are just right.
+ */
+ for (i = 3; i < 400; i++)
+ close(i);
+
+ init_settings(settings);
+ }
igt_subtest("default-settings") {
const char *argv[] = { "runner",
@@ -207,26 +236,26 @@ igt_main
"path-to-results",
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
-
- igt_assert_eq(settings.abort_mask, 0);
- igt_assert(!settings.test_list);
- igt_assert_eqstr(settings.name, "path-to-results");
- igt_assert(!settings.dry_run);
- igt_assert_eq(settings.include_regexes.size, 0);
- igt_assert_eq(settings.exclude_regexes.size, 0);
- igt_assert(!settings.sync);
- igt_assert_eq(settings.log_level, LOG_LEVEL_NORMAL);
- igt_assert(!settings.overwrite);
- igt_assert(!settings.multiple_mode);
- igt_assert_eq(settings.inactivity_timeout, 0);
- igt_assert_eq(settings.overall_timeout, 0);
- igt_assert(!settings.use_watchdog);
- igt_assert(strstr(settings.test_root, "test-root-dir") != NULL);
- igt_assert(strstr(settings.results_path, "path-to-results") != NULL);
-
- igt_assert(!settings.piglit_style_dmesg);
- igt_assert_eq(settings.dmesg_warn_level, 4);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+
+ igt_assert_eq(settings->abort_mask, 0);
+ igt_assert(!settings->test_list);
+ igt_assert_eqstr(settings->name, "path-to-results");
+ igt_assert(!settings->dry_run);
+ igt_assert_eq(settings->include_regexes.size, 0);
+ igt_assert_eq(settings->exclude_regexes.size, 0);
+ igt_assert(!settings->sync);
+ igt_assert_eq(settings->log_level, LOG_LEVEL_NORMAL);
+ igt_assert(!settings->overwrite);
+ igt_assert(!settings->multiple_mode);
+ igt_assert_eq(settings->inactivity_timeout, 0);
+ igt_assert_eq(settings->overall_timeout, 0);
+ igt_assert(!settings->use_watchdog);
+ igt_assert(strstr(settings->test_root, "test-root-dir") != NULL);
+ igt_assert(strstr(settings->results_path, "path-to-results") != NULL);
+
+ igt_assert(!settings->piglit_style_dmesg);
+ igt_assert_eq(settings->dmesg_warn_level, 4);
}
igt_subtest_group {
@@ -262,10 +291,10 @@ igt_main
}
igt_subtest_group {
+ const char tmptestlist[] = "tmp.testlist";
char dirname[] = "tmpdirXXXXXX";
- char tmptestlist[] = "tmp.testlist";
char pathtotestlist[64];
- char *path;
+ volatile char *path;
igt_fixture {
int dirfd, fd;
@@ -290,19 +319,19 @@ igt_main
dirname,
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
path = realpath(testdatadir, NULL);
igt_assert(path != NULL);
- igt_assert_eqstr(settings.test_root, path);
- free(path);
+ igt_assert_eqstr(settings->test_root, (char*)path);
+ free((void*)path);
path = realpath(dirname, NULL);
igt_assert(path != NULL);
- igt_assert_eqstr(settings.results_path, path);
- free(path);
+ igt_assert_eqstr(settings->results_path, (char*)path);
+ free((void*)path);
path = realpath(pathtotestlist, NULL);
igt_assert(path != NULL);
- igt_assert_eqstr(settings.test_list, path);
+ igt_assert_eqstr(settings->test_list, (char*)path);
}
igt_fixture {
@@ -313,7 +342,7 @@ igt_main
close(dirfd);
rmdir(dirname);
- free(path);
+ free((void*)path);
}
}
@@ -324,24 +353,24 @@ igt_main
};
setenv("IGT_TEST_ROOT", testdatadir, 1);
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
-
- igt_assert_eq(settings.abort_mask, 0);
- igt_assert(!settings.test_list);
- igt_assert_eqstr(settings.name, "path-to-results");
- igt_assert(!settings.dry_run);
- igt_assert_eq(settings.include_regexes.size, 0);
- igt_assert_eq(settings.exclude_regexes.size, 0);
- igt_assert(!settings.sync);
- igt_assert_eq(settings.log_level, LOG_LEVEL_NORMAL);
- igt_assert(!settings.overwrite);
- igt_assert(!settings.multiple_mode);
- igt_assert_eq(settings.inactivity_timeout, 0);
- igt_assert_eq(settings.overall_timeout, 0);
- igt_assert(!settings.use_watchdog);
- igt_assert(strstr(settings.test_root, testdatadir) != NULL);
- igt_assert(strstr(settings.results_path, "path-to-results") != NULL);
- igt_assert(!settings.piglit_style_dmesg);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+
+ igt_assert_eq(settings->abort_mask, 0);
+ igt_assert(!settings->test_list);
+ igt_assert_eqstr(settings->name, "path-to-results");
+ igt_assert(!settings->dry_run);
+ igt_assert_eq(settings->include_regexes.size, 0);
+ igt_assert_eq(settings->exclude_regexes.size, 0);
+ igt_assert(!settings->sync);
+ igt_assert_eq(settings->log_level, LOG_LEVEL_NORMAL);
+ igt_assert(!settings->overwrite);
+ igt_assert(!settings->multiple_mode);
+ igt_assert_eq(settings->inactivity_timeout, 0);
+ igt_assert_eq(settings->overall_timeout, 0);
+ igt_assert(!settings->use_watchdog);
+ igt_assert(strstr(settings->test_root, testdatadir) != NULL);
+ igt_assert(strstr(settings->results_path, "path-to-results") != NULL);
+ igt_assert(!settings->piglit_style_dmesg);
}
igt_fixture {
@@ -349,6 +378,7 @@ igt_main
}
igt_subtest("parse-all-settings") {
+ char blacklist_name[PATH_MAX], blacklist2_name[PATH_MAX];
const char *argv[] = { "runner",
"-n", "foo",
"--abort-on-monitored-error=taint,lockdep",
@@ -359,6 +389,8 @@ igt_main
"-t", "pattern2",
"-x", "xpattern1",
"-x", "xpattern2",
+ "-b", blacklist_name,
+ "--blacklist", blacklist2_name,
"-s",
"-l", "verbose",
"--overwrite",
@@ -372,30 +404,43 @@ igt_main
"path-to-results",
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
-
- igt_assert_eq(settings.abort_mask, ABORT_TAINT | ABORT_LOCKDEP);
- igt_assert(strstr(settings.test_list, "path-to-test-list") != NULL);
- igt_assert_eqstr(settings.name, "foo");
- igt_assert(settings.dry_run);
- igt_assert_eq(settings.include_regexes.size, 2);
- igt_assert_eqstr(settings.include_regexes.regex_strings[0], "pattern1");
- igt_assert_eqstr(settings.include_regexes.regex_strings[1], "pattern2");
- igt_assert_eq(settings.exclude_regexes.size, 2);
- igt_assert_eqstr(settings.exclude_regexes.regex_strings[0], "xpattern1");
- igt_assert_eqstr(settings.exclude_regexes.regex_strings[1], "xpattern2");
- igt_assert(settings.sync);
- igt_assert_eq(settings.log_level, LOG_LEVEL_VERBOSE);
- igt_assert(settings.overwrite);
- igt_assert(settings.multiple_mode);
- igt_assert_eq(settings.inactivity_timeout, 27);
- igt_assert_eq(settings.overall_timeout, 360);
- igt_assert(settings.use_watchdog);
- igt_assert(strstr(settings.test_root, "test-root-dir") != NULL);
- igt_assert(strstr(settings.results_path, "path-to-results") != NULL);
-
- igt_assert(settings.piglit_style_dmesg);
- igt_assert_eq(settings.dmesg_warn_level, 3);
+ sprintf(blacklist_name, "%s/test-blacklist.txt", testdatadir);
+ sprintf(blacklist2_name, "%s/test-blacklist2.txt", testdatadir);
+
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+
+ igt_assert_eq(settings->abort_mask, ABORT_TAINT | ABORT_LOCKDEP);
+ igt_assert(strstr(settings->test_list, "path-to-test-list") != NULL);
+ igt_assert_eqstr(settings->name, "foo");
+ igt_assert(settings->dry_run);
+ igt_assert_eq(settings->include_regexes.size, 2);
+ igt_assert_eqstr(settings->include_regexes.regex_strings[0], "pattern1");
+ igt_assert_eqstr(settings->include_regexes.regex_strings[1], "pattern2");
+ igt_assert_eq(settings->exclude_regexes.size, 4);
+ igt_assert_eqstr(settings->exclude_regexes.regex_strings[0], "xpattern1");
+ igt_assert_eqstr(settings->exclude_regexes.regex_strings[1], "xpattern2");
+ igt_assert_eqstr(settings->exclude_regexes.regex_strings[2], "xpattern3"); /* From blacklist */
+ igt_assert_eqstr(settings->exclude_regexes.regex_strings[3], "xpattern4"); /* From blacklist2 */
+ igt_assert(settings->sync);
+ igt_assert_eq(settings->log_level, LOG_LEVEL_VERBOSE);
+ igt_assert(settings->overwrite);
+ igt_assert(settings->multiple_mode);
+ igt_assert_eq(settings->inactivity_timeout, 27);
+ igt_assert_eq(settings->overall_timeout, 360);
+ igt_assert(settings->use_watchdog);
+ igt_assert(strstr(settings->test_root, "test-root-dir") != NULL);
+ igt_assert(strstr(settings->results_path, "path-to-results") != NULL);
+
+ igt_assert(settings->piglit_style_dmesg);
+ igt_assert_eq(settings->dmesg_warn_level, 3);
+ }
+ igt_subtest("parse-list-all") {
+ const char *argv[] = { "runner",
+ "--list-all",
+ "test-root-dir"};
+
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert_eq(settings->list_all, 1);
}
igt_subtest("dmesg-warn-level-inferred") {
@@ -404,10 +449,10 @@ igt_main
"path-to-results",
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
- igt_assert(!settings.piglit_style_dmesg);
- igt_assert_eq(settings.dmesg_warn_level, 4);
+ igt_assert(!settings->piglit_style_dmesg);
+ igt_assert_eq(settings->dmesg_warn_level, 4);
}
igt_subtest("dmesg-warn-level-inferred-with-piglit-style") {
@@ -417,10 +462,10 @@ igt_main
"path-to-results",
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
- igt_assert(settings.piglit_style_dmesg);
- igt_assert_eq(settings.dmesg_warn_level, 5);
+ igt_assert(settings->piglit_style_dmesg);
+ igt_assert_eq(settings->dmesg_warn_level, 5);
}
igt_subtest("dmesg-warn-level-overridable-with-piglit-style") {
@@ -431,10 +476,10 @@ igt_main
"path-to-results",
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
- igt_assert(settings.piglit_style_dmesg);
- igt_assert_eq(settings.dmesg_warn_level, 3);
+ igt_assert(settings->piglit_style_dmesg);
+ igt_assert_eq(settings->dmesg_warn_level, 3);
}
igt_subtest("invalid-option") {
@@ -444,14 +489,14 @@ igt_main
"results-path",
};
- igt_assert(!parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
+ igt_assert(!parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
}
igt_subtest("paths-missing") {
const char *argv[] = { "runner",
"-o",
};
- igt_assert(!parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
+ igt_assert(!parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
}
igt_subtest("log-levels") {
@@ -461,16 +506,16 @@ igt_main
"results-path",
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert_eq(settings.log_level, LOG_LEVEL_NORMAL);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert_eq(settings->log_level, LOG_LEVEL_NORMAL);
argv[2] = "quiet";
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert_eq(settings.log_level, LOG_LEVEL_QUIET);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert_eq(settings->log_level, LOG_LEVEL_QUIET);
argv[2] = "verbose";
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert_eq(settings.log_level, LOG_LEVEL_VERBOSE);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert_eq(settings->log_level, LOG_LEVEL_VERBOSE);
}
igt_subtest("abort-conditions") {
@@ -480,35 +525,35 @@ igt_main
"results-path",
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert_eq(settings.abort_mask, ABORT_TAINT);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert_eq(settings->abort_mask, ABORT_TAINT);
argv[1] = "--abort-on-monitored-error=lockdep";
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert_eq(settings.abort_mask, ABORT_LOCKDEP);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert_eq(settings->abort_mask, ABORT_LOCKDEP);
argv[1] = "--abort-on-monitored-error=taint";
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert_eq(settings.abort_mask, ABORT_TAINT);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert_eq(settings->abort_mask, ABORT_TAINT);
argv[1] = "--abort-on-monitored-error=lockdep,taint";
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert_eq(settings.abort_mask, ABORT_TAINT | ABORT_LOCKDEP);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert_eq(settings->abort_mask, ABORT_TAINT | ABORT_LOCKDEP);
argv[1] = "--abort-on-monitored-error=taint,lockdep";
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert_eq(settings.abort_mask, ABORT_TAINT | ABORT_LOCKDEP);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert_eq(settings->abort_mask, ABORT_TAINT | ABORT_LOCKDEP);
argv[1] = "--abort-on-monitored-error=all";
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert_eq(settings.abort_mask, ABORT_ALL);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert_eq(settings->abort_mask, ABORT_ALL);
argv[1] = "--abort-on-monitored-error=";
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert_eq(settings.abort_mask, 0);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert_eq(settings->abort_mask, 0);
argv[1] = "--abort-on-monitored-error=doesnotexist";
- igt_assert(!parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
+ igt_assert(!parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
}
@@ -520,29 +565,29 @@ igt_main
"results-path",
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
- igt_assert_eqstr(settings.name, "foo");
- igt_assert(settings.dry_run);
- igt_assert(!settings.test_list);
- igt_assert(!settings.sync);
+ igt_assert_eqstr(settings->name, "foo");
+ igt_assert(settings->dry_run);
+ igt_assert(!settings->test_list);
+ igt_assert(!settings->sync);
argv[1] = "--test-list";
argv[3] = "--sync";
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
- igt_assert_eqstr(settings.name, "results-path");
- igt_assert(!settings.dry_run);
- igt_assert(strstr(settings.test_list, "foo") != NULL);
- igt_assert(settings.sync);
+ igt_assert_eqstr(settings->name, "results-path");
+ igt_assert(!settings->dry_run);
+ igt_assert(strstr(settings->test_list, "foo") != NULL);
+ igt_assert(settings->sync);
}
igt_subtest_group {
char filename[] = "tmplistXXXXXX";
- int fd = -1;
igt_fixture {
+ int fd;
igt_require((fd = mkstemp(filename)) >= 0);
close(fd);
}
@@ -554,9 +599,9 @@ igt_main
"path-to-results",
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
- igt_assert(validate_settings(&settings));
+ igt_assert(validate_settings(settings));
}
igt_fixture {
@@ -573,18 +618,18 @@ igt_main
};
igt_assert_lt(open(nosuchfile, O_RDONLY), 0);
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
- igt_assert(!validate_settings(&settings));
+ igt_assert(!validate_settings(settings));
}
igt_subtest_group {
char dirname[] = "tmpdirXXXXXX";
- struct job_list list;
+ struct job_list *list = malloc(sizeof(*list));
igt_fixture {
igt_require(mkdtemp(dirname) != NULL);
- init_job_list(&list);
+ init_job_list(list);
}
igt_subtest("job-list-no-test-list-txt") {
@@ -593,14 +638,15 @@ igt_main
"path-to-results",
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
- igt_assert(!create_job_list(&list, &settings));
+ igt_assert(!create_job_list(list, settings));
}
igt_fixture {
rmdir(dirname);
- free_job_list(&list);
+ free_job_list(list);
+ free(list);
}
}
@@ -614,17 +660,18 @@ igt_main
igt_subtest_group {
char filename[] = "tmplistXXXXXX";
- char testlisttext[] = "igt@successtest@first-subtest\n"
+ const char testlisttext[] = "igt@successtest@first-subtest\n"
"igt@successtest@second-subtest\n"
"igt@nosubtests\n";
- int fd = -1, multiple;
- struct job_list list;
+ int multiple;
+ struct job_list *list = malloc(sizeof(*list));
igt_fixture {
+ int fd;
igt_require((fd = mkstemp(filename)) >= 0);
igt_require(write(fd, testlisttext, strlen(testlisttext)) == strlen(testlisttext));
close(fd);
- init_job_list(&list);
+ init_job_list(list);
}
for (multiple = 0; multiple < 2; multiple++) {
@@ -636,21 +683,21 @@ igt_main
"path-to-results",
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert(create_job_list(&list, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert(create_job_list(list, settings));
- igt_assert_eq(list.size, multiple ? 2 : 3);
+ igt_assert_eq(list->size, multiple ? 2 : 3);
- igt_assert_eqstr(list.entries[0].binary, "successtest");
- if (!multiple) igt_assert_eqstr(list.entries[1].binary, "successtest");
- igt_assert_eqstr(list.entries[multiple ? 1 : 2].binary, "nosubtests");
+ igt_assert_eqstr(list->entries[0].binary, "successtest");
+ if (!multiple) igt_assert_eqstr(list->entries[1].binary, "successtest");
+ igt_assert_eqstr(list->entries[multiple ? 1 : 2].binary, "nosubtests");
- igt_assert_eq(list.entries[0].subtest_count, multiple ? 2 : 1);
- igt_assert_eq(list.entries[1].subtest_count, multiple ? 0 : 1);
- if (!multiple) igt_assert_eq(list.entries[2].subtest_count, 0);
+ igt_assert_eq(list->entries[0].subtest_count, multiple ? 2 : 1);
+ igt_assert_eq(list->entries[1].subtest_count, multiple ? 0 : 1);
+ if (!multiple) igt_assert_eq(list->entries[2].subtest_count, 0);
- igt_assert_eqstr(list.entries[0].subtests[0], "first-subtest");
- igt_assert_eqstr(list.entries[multiple ? 0 : 1].subtests[multiple ? 1 : 0], "second-subtest");
+ igt_assert_eqstr(list->entries[0].subtests[0], "first-subtest");
+ igt_assert_eqstr(list->entries[multiple ? 0 : 1].subtests[multiple ? 1 : 0], "second-subtest");
}
igt_subtest_f("job-list-testlist-filtered-%s", multiple ? "multiple" : "normal") {
@@ -663,32 +710,33 @@ igt_main
"path-to-results",
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert(create_job_list(&list, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert(create_job_list(list, settings));
- igt_assert_eq(list.size, 1);
- igt_assert_eqstr(list.entries[0].binary, "successtest");
+ igt_assert_eq(list->size, 1);
+ igt_assert_eqstr(list->entries[0].binary, "successtest");
- igt_assert_eq(list.entries[0].subtest_count, 1);
- igt_assert_eqstr(list.entries[0].subtests[0], "second-subtest");
+ igt_assert_eq(list->entries[0].subtest_count, 1);
+ igt_assert_eqstr(list->entries[0].subtests[0], "second-subtest");
}
}
igt_fixture {
unlink(filename);
- free_job_list(&list);
+ free_job_list(list);
+ free(list);
}
}
igt_subtest_group {
char dirname[] = "tmpdirXXXXXX";
- int dirfd = -1, fd = -1;
- struct settings cmp_settings;
+ volatile int dirfd = -1, fd = -1;
+ struct settings *cmp_settings = malloc(sizeof(*cmp_settings));
igt_fixture {
igt_require(mkdtemp(dirname) != NULL);
rmdir(dirname);
- init_settings(&cmp_settings);
+ init_settings(cmp_settings);
}
igt_subtest("settings-serialize") {
@@ -714,9 +762,9 @@ igt_main
dirname,
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
- igt_assert(serialize_settings(&settings));
+ igt_assert(serialize_settings(settings));
dirfd = open(dirname, O_DIRECTORY, O_RDONLY);
igt_assert_f(dirfd >= 0, "Serialization did not create the results directory\n");
@@ -725,27 +773,31 @@ igt_main
"Opening %s/metadata.txt failed\n", dirname);
close(fd);
- igt_assert_f(read_settings_from_dir(&cmp_settings, dirfd), "Reading settings failed\n");
- assert_settings_equal(&settings, &cmp_settings);
+ igt_assert_f(read_settings_from_dir(cmp_settings, dirfd), "Reading settings failed\n");
+ assert_settings_equal(settings, cmp_settings);
}
igt_fixture {
close(fd);
close(dirfd);
clear_directory(dirname);
- free_settings(&cmp_settings);
+ free_settings(cmp_settings);
+ free(cmp_settings);
}
}
igt_subtest_group {
char dirname[] = "tmpdirXXXXXX";
- int dirfd = -1, fd = -1;
- struct job_list list, cmp_list;
+ volatile int dirfd = -1, fd = -1;
+ struct job_list *list, *cmp_list;
int multiple;
+ list = malloc(sizeof(*list));
+ cmp_list = malloc(sizeof(*cmp_list));
+
igt_fixture {
- init_job_list(&list);
- init_job_list(&cmp_list);
+ init_job_list(list);
+ init_job_list(cmp_list);
igt_require(mkdtemp(dirname) != NULL);
rmdir(dirname);
}
@@ -759,11 +811,11 @@ igt_main
dirname,
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert(create_job_list(&list, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert(create_job_list(list, settings));
- igt_assert(serialize_settings(&settings));
- igt_assert(serialize_job_list(&list, &settings));
+ igt_assert(serialize_settings(settings));
+ igt_assert(serialize_job_list(list, settings));
dirfd = open(dirname, O_DIRECTORY, O_RDONLY);
igt_assert_f(dirfd >= 0, "Serialization did not create the results directory\n");
@@ -773,27 +825,32 @@ igt_main
close(fd);
fd = -1;
- igt_assert_f(read_job_list(&cmp_list, dirfd), "Reading job list failed\n");
- assert_job_list_equal(&list, &cmp_list);
+ igt_assert_f(read_job_list(cmp_list, dirfd), "Reading job list failed\n");
+ assert_job_list_equal(list, cmp_list);
}
igt_fixture {
close(fd);
close(dirfd);
clear_directory(dirname);
- free_job_list(&cmp_list);
- free_job_list(&list);
+ free_job_list(cmp_list);
+ free_job_list(list);
}
}
+
+ igt_fixture {
+ free(cmp_list);
+ free(list);
+ }
}
igt_subtest_group {
char dirname[] = "tmpdirXXXXXX";
- struct job_list list;
- int dirfd = -1, subdirfd = -1, fd = -1;
+ struct job_list *list = malloc(sizeof(*list));
+ volatile int dirfd = -1, subdirfd = -1, fd = -1;
igt_fixture {
- init_job_list(&list);
+ init_job_list(list);
igt_require(mkdtemp(dirname) != NULL);
rmdir(dirname);
}
@@ -806,19 +863,19 @@ igt_main
dirname,
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert(create_job_list(&list, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert(create_job_list(list, settings));
- igt_assert(initialize_execute_state(&state, &settings, &list));
+ igt_assert(initialize_execute_state(&state, settings, list));
igt_assert_eq(state.next, 0);
igt_assert(state.dry);
- igt_assert_eq(list.size, 5);
+ igt_assert_eq(list->size, 5);
igt_assert_f((dirfd = open(dirname, O_DIRECTORY | O_RDONLY)) >= 0,
"Dry run initialization didn't create the results directory.\n");
/* Execute from just initialize_execute_state should fail */
- igt_assert(execute(&state, &settings, &list));
+ igt_assert(execute(&state, settings, list));
igt_assert_f(openat(dirfd, "0", O_DIRECTORY | O_RDONLY) < 0,
"Dry run executed when it should not have.\n");
igt_assert_f((fd = openat(dirfd, "metadata.txt", O_RDONLY)) >= 0,
@@ -830,16 +887,16 @@ igt_main
igt_assert_f((fd = openat(dirfd, "uname.txt", O_RDONLY)) < 0,
"Dry run initialization created uname.txt.\n");
- igt_assert(initialize_execute_state_from_resume(dirfd, &state, &settings, &list));
+ igt_assert(initialize_execute_state_from_resume(dirfd, &state, settings, list));
igt_assert_eq(state.next, 0);
igt_assert(!state.dry);
- igt_assert_eq(list.size, 5);
+ igt_assert_eq(list->size, 5);
/* initialize_execute_state_from_resume() closes the dirfd */
igt_assert_f((dirfd = open(dirname, O_DIRECTORY | O_RDONLY)) >= 0,
"Dry run resume somehow deleted the results directory.\n");
/* Execute from resume should work */
- igt_assert(execute(&state, &settings, &list));
+ igt_assert(execute(&state, settings, list));
igt_assert_f((fd = openat(dirfd, "uname.txt", O_RDONLY)) >= 0,
"Dry run resume didn't create uname.txt.\n");
close(fd);
@@ -854,17 +911,18 @@ igt_main
close(dirfd);
close(subdirfd);
clear_directory(dirname);
- free_job_list(&list);
+ free_job_list(list);
+ free(list);
}
}
igt_subtest_group {
char dirname[] = "tmpdirXXXXXX";
- struct job_list list;
- int dirfd = -1, fd = -1;
+ struct job_list *list = malloc(sizeof(*list));
+ volatile int dirfd = -1, fd = -1;
igt_fixture {
- init_job_list(&list);
+ init_job_list(list);
igt_require(mkdtemp(dirname) != NULL);
rmdir(dirname);
}
@@ -876,13 +934,13 @@ igt_main
dirname,
};
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert(create_job_list(&list, &settings));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert(create_job_list(list, settings));
- igt_assert(initialize_execute_state(&state, &settings, &list));
+ igt_assert(initialize_execute_state(&state, settings, list));
igt_assert_eq(state.next, 0);
- igt_assert_eq(list.size, 5);
+ igt_assert_eq(list->size, 5);
igt_assert_f((dirfd = open(dirname, O_DIRECTORY | O_RDONLY)) >= 0,
"Execute state initialization didn't create the results directory.\n");
igt_assert_f((fd = openat(dirfd, "metadata.txt", O_RDONLY)) >= 0,
@@ -901,17 +959,18 @@ igt_main
close(fd);
close(dirfd);
clear_directory(dirname);
- free_job_list(&list);
+ free_job_list(list);
+ free(list);
}
}
igt_subtest_group {
char dirname[] = "tmpdirXXXXXX";
- struct job_list list;
- int dirfd = -1, subdirfd = -1, fd = -1;
+ struct job_list *list = malloc(sizeof(*list));
+ volatile int dirfd = -1, subdirfd = -1, fd = -1;
igt_fixture {
- init_job_list(&list);
+ init_job_list(list);
igt_require(mkdtemp(dirname) != NULL);
}
@@ -923,16 +982,16 @@ igt_main
testdatadir,
dirname,
};
- char journaltext[] = "first-subtest\n";
- char excludestring[] = "!first-subtest";
+ const char journaltext[] = "first-subtest\n";
+ const char excludestring[] = "!first-subtest";
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert(create_job_list(&list, &settings));
- igt_assert(list.size == 1);
- igt_assert(list.entries[0].subtest_count == 0);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert(create_job_list(list, settings));
+ igt_assert(list->size == 1);
+ igt_assert(list->entries[0].subtest_count == 0);
- igt_assert(serialize_settings(&settings));
- igt_assert(serialize_job_list(&list, &settings));
+ igt_assert(serialize_settings(settings));
+ igt_assert(serialize_job_list(list, settings));
igt_assert((dirfd = open(dirname, O_DIRECTORY | O_RDONLY)) >= 0);
igt_assert(mkdirat(dirfd, "0", 0770) == 0);
@@ -940,15 +999,15 @@ igt_main
igt_assert((fd = openat(subdirfd, "journal.txt", O_CREAT | O_WRONLY | O_EXCL, 0660)) >= 0);
igt_assert(write(fd, journaltext, strlen(journaltext)) == strlen(journaltext));
- free_job_list(&list);
- free_settings(&settings);
- igt_assert(initialize_execute_state_from_resume(dirfd, &state, &settings, &list));
+ free_job_list(list);
+ free_settings(settings);
+ igt_assert(initialize_execute_state_from_resume(dirfd, &state, settings, list));
igt_assert_eq(state.next, 0);
- igt_assert_eq(list.size, 1);
- igt_assert_eq(list.entries[0].subtest_count, 2);
- igt_assert_eqstr(list.entries[0].subtests[0], "*");
- igt_assert_eqstr(list.entries[0].subtests[1], excludestring);
+ igt_assert_eq(list->size, 1);
+ igt_assert_eq(list->entries[0].subtest_count, 2);
+ igt_assert_eqstr(list->entries[0].subtests[0], "*");
+ igt_assert_eqstr(list->entries[0].subtests[1], excludestring);
}
igt_fixture {
@@ -956,17 +1015,18 @@ igt_main
close(subdirfd);
close(dirfd);
clear_directory(dirname);
- free_job_list(&list);
+ free_job_list(list);
+ free(list);
}
}
igt_subtest_group {
char dirname[] = "tmpdirXXXXXX";
- struct job_list list;
- int dirfd = -1, subdirfd = -1, fd = -1;
+ struct job_list *list = malloc(sizeof(*list));
+ volatile int dirfd = -1, subdirfd = -1, fd = -1;
igt_fixture {
- init_job_list(&list);
+ init_job_list(list);
igt_require(mkdtemp(dirname) != NULL);
}
@@ -979,15 +1039,15 @@ igt_main
testdatadir,
dirname,
};
- char journaltext[] = "first-subtest\nsecond-subtest\n";
+ const char journaltext[] = "first-subtest\nsecond-subtest\n";
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert(create_job_list(&list, &settings));
- igt_assert(list.size == 1);
- igt_assert(list.entries[0].subtest_count == 2);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert(create_job_list(list, settings));
+ igt_assert(list->size == 1);
+ igt_assert(list->entries[0].subtest_count == 2);
- igt_assert(serialize_settings(&settings));
- igt_assert(serialize_job_list(&list, &settings));
+ igt_assert(serialize_settings(settings));
+ igt_assert(serialize_job_list(list, settings));
igt_assert((dirfd = open(dirname, O_DIRECTORY | O_RDONLY)) >= 0);
igt_assert(mkdirat(dirfd, "0", 0770) == 0);
@@ -995,14 +1055,14 @@ igt_main
igt_assert((fd = openat(subdirfd, "journal.txt", O_CREAT | O_WRONLY | O_EXCL, 0660)) >= 0);
igt_assert(write(fd, journaltext, strlen(journaltext)) == strlen(journaltext));
- free_job_list(&list);
- free_settings(&settings);
- igt_assert(initialize_execute_state_from_resume(dirfd, &state, &settings, &list));
+ free_job_list(list);
+ free_settings(settings);
+ igt_assert(initialize_execute_state_from_resume(dirfd, &state, settings, list));
/* All subtests are in journal, the entry should be considered completed */
igt_assert_eq(state.next, 1);
- igt_assert_eq(list.size, 1);
- igt_assert_eq(list.entries[0].subtest_count, 4);
+ igt_assert_eq(list->size, 1);
+ igt_assert_eq(list->entries[0].subtest_count, 4);
}
igt_fixture {
@@ -1010,17 +1070,18 @@ igt_main
close(subdirfd);
close(dirfd);
clear_directory(dirname);
- free_job_list(&list);
+ free_job_list(list);
+ free(list);
}
}
igt_subtest_group {
char dirname[] = "tmpdirXXXXXX";
- struct job_list list;
- int dirfd = -1, subdirfd = -1, fd = -1;
+ struct job_list *list = malloc(sizeof(*list));
+ volatile int dirfd = -1, subdirfd = -1, fd = -1;
igt_fixture {
- init_job_list(&list);
+ init_job_list(list);
igt_require(mkdtemp(dirname) != NULL);
}
@@ -1031,22 +1092,22 @@ igt_main
testdatadir,
dirname,
};
- char journaltext[] = "first-subtest\nsecond-subtest\nexit:0\n";
+ const char journaltext[] = "first-subtest\nsecond-subtest\nexit:0\n";
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert(create_job_list(&list, &settings));
- igt_assert(list.size == 3);
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert(create_job_list(list, settings));
+ igt_assert(list->size == 3);
- if (!strcmp(list.entries[0].binary, "no-subtests")) {
- struct job_list_entry tmp = list.entries[0];
- list.entries[0] = list.entries[1];
- list.entries[1] = tmp;
+ if (!strcmp(list->entries[0].binary, "no-subtests")) {
+ struct job_list_entry tmp = list->entries[0];
+ list->entries[0] = list->entries[1];
+ list->entries[1] = tmp;
}
- igt_assert(list.entries[0].subtest_count == 0);
+ igt_assert(list->entries[0].subtest_count == 0);
- igt_assert(serialize_settings(&settings));
- igt_assert(serialize_job_list(&list, &settings));
+ igt_assert(serialize_settings(settings));
+ igt_assert(serialize_job_list(list, settings));
igt_assert_lte(0, dirfd = open(dirname, O_DIRECTORY | O_RDONLY));
igt_assert_eq(mkdirat(dirfd, "0", 0770), 0);
@@ -1054,12 +1115,12 @@ igt_main
igt_assert_lte(0, fd = openat(subdirfd, "journal.txt", O_CREAT | O_WRONLY | O_EXCL, 0660));
igt_assert_eq(write(fd, journaltext, sizeof(journaltext)), sizeof(journaltext));
- free_job_list(&list);
- free_settings(&settings);
- igt_assert(initialize_execute_state_from_resume(dirfd, &state, &settings, &list));
+ free_job_list(list);
+ free_settings(settings);
+ igt_assert(initialize_execute_state_from_resume(dirfd, &state, settings, list));
igt_assert_eq(state.next, 1);
- igt_assert_eq(list.size, 3);
+ igt_assert_eq(list->size, 3);
}
igt_fixture {
@@ -1067,17 +1128,18 @@ igt_main
close(subdirfd);
close(dirfd);
clear_directory(dirname);
- free_job_list(&list);
+ free_job_list(list);
+ free(list);
}
}
igt_subtest_group {
- struct job_list list;
- int dirfd = -1, subdirfd = -1, fd = -1;
+ struct job_list *list = malloc(sizeof(*list));
+ volatile int dirfd = -1, subdirfd = -1, fd = -1;
int multiple;
igt_fixture {
- init_job_list(&list);
+ init_job_list(list);
}
for (multiple = 0; multiple < 2; multiple++) {
@@ -1100,11 +1162,11 @@ igt_main
size_t expected_tests = multiple ? 2 : 3;
size_t i;
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert(create_job_list(&list, &settings));
- igt_assert(initialize_execute_state(&state, &settings, &list));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert(create_job_list(list, settings));
+ igt_assert(initialize_execute_state(&state, settings, list));
- igt_assert(execute(&state, &settings, &list));
+ igt_assert(execute(&state, settings, list));
igt_assert_f((dirfd = open(dirname, O_DIRECTORY | O_RDONLY)) >= 0,
"Execute didn't create the results directory\n");
@@ -1132,9 +1194,12 @@ igt_main
close(subdirfd);
close(dirfd);
clear_directory(dirname);
- free_job_list(&list);
+ free_job_list(list);
}
}
+
+ igt_fixture
+ free(list);
}
igt_subtest_group {
@@ -1143,10 +1208,10 @@ igt_main
FILE *f = fmemopen(metadata, strlen(metadata), "r");
igt_assert(f);
- igt_assert(read_settings_from_file(&settings, f));
+ igt_assert(read_settings_from_file(settings, f));
- igt_assert(settings.piglit_style_dmesg);
- igt_assert_eq(settings.dmesg_warn_level, 5);
+ igt_assert(settings->piglit_style_dmesg);
+ igt_assert_eq(settings->dmesg_warn_level, 5);
fclose(f);
}
@@ -1156,10 +1221,10 @@ igt_main
FILE *f = fmemopen(metadata, strlen(metadata), "r");
igt_assert(f);
- igt_assert(read_settings_from_file(&settings, f));
+ igt_assert(read_settings_from_file(settings, f));
- igt_assert(settings.piglit_style_dmesg);
- igt_assert_eq(settings.dmesg_warn_level, 3);
+ igt_assert(settings->piglit_style_dmesg);
+ igt_assert_eq(settings->dmesg_warn_level, 3);
fclose(f);
}
@@ -1169,10 +1234,10 @@ igt_main
FILE *f = fmemopen(metadata, strlen(metadata), "r");
igt_assert(f);
- igt_assert(read_settings_from_file(&settings, f));
+ igt_assert(read_settings_from_file(settings, f));
- igt_assert(!settings.piglit_style_dmesg);
- igt_assert_eq(settings.dmesg_warn_level, 4);
+ igt_assert(!settings->piglit_style_dmesg);
+ igt_assert_eq(settings->dmesg_warn_level, 4);
fclose(f);
}
@@ -1182,22 +1247,22 @@ igt_main
FILE *f = fmemopen(metadata, strlen(metadata), "r");
igt_assert(f);
- igt_assert(read_settings_from_file(&settings, f));
+ igt_assert(read_settings_from_file(settings, f));
- igt_assert(!settings.piglit_style_dmesg);
- igt_assert_eq(settings.dmesg_warn_level, 3);
+ igt_assert(!settings->piglit_style_dmesg);
+ igt_assert_eq(settings->dmesg_warn_level, 3);
fclose(f);
}
}
igt_subtest_group {
- struct job_list list;
- int dirfd = -1, subdirfd = -1, fd = -1;
+ struct job_list *list = malloc(sizeof(*list));
+ volatile int dirfd = -1, subdirfd = -1, fd = -1;
int multiple;
igt_fixture {
- init_job_list(&list);
+ init_job_list(list);
}
for (multiple = 0; multiple < 2; multiple++) {
@@ -1222,11 +1287,11 @@ igt_main
"skip-one\nexit:77 (";
const char *expected_1 = "skip-two\nexit:77 (";
- igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, &settings));
- igt_assert(create_job_list(&list, &settings));
- igt_assert(initialize_execute_state(&state, &settings, &list));
+ igt_assert(parse_options(ARRAY_SIZE(argv), (char**)argv, settings));
+ igt_assert(create_job_list(list, settings));
+ igt_assert(initialize_execute_state(&state, settings, list));
- igt_assert(execute(&state, &settings, &list));
+ igt_assert(execute(&state, settings, list));
igt_assert_f((dirfd = open(dirname, O_DIRECTORY | O_RDONLY)) >= 0,
"Execute didn't create the results directory\n");
@@ -1266,9 +1331,12 @@ igt_main
close(subdirfd);
close(dirfd);
clear_directory(dirname);
- free_job_list(&list);
+ free_job_list(list);
}
}
+
+ igt_fixture
+ free(list);
}
igt_subtest("file-descriptor-leakage") {
@@ -1289,6 +1357,8 @@ igt_main
}
}
- igt_fixture
- free_settings(&settings);
+ igt_fixture {
+ free_settings(settings);
+ free(settings);
+ }
}
diff --git a/runner/settings.c b/runner/settings.c
index 25bcf531..9920e1a6 100644
--- a/runner/settings.c
+++ b/runner/settings.c
@@ -1,5 +1,6 @@
#include "settings.h"
+#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
@@ -30,6 +31,8 @@ enum {
OPT_MULTIPLE = 'm',
OPT_TIMEOUT = 'c',
OPT_WATCHDOG = 'g',
+ OPT_BLACKLIST = 'b',
+ OPT_LIST_ALL = 'L',
};
static struct {
@@ -117,7 +120,8 @@ static bool parse_abort_conditions(struct settings *settings, const char *optarg
}
static const char *usage_str =
- "usage: runner [options] [test_root] results-path\n\n"
+ "usage: runner [options] [test_root] results-path\n"
+ " or: runner --list-all [options] [test_root]\n\n"
"Options:\n"
" Piglit compatible:\n"
" -h, --help Show this help message and exit\n"
@@ -172,6 +176,10 @@ static const char *usage_str =
" (longer) filter list means the test result should\n"
" change. KERN_NOTICE dmesg level is treated as warn,\n"
" unless overridden with --dmesg-warn-level.\n"
+ " -b, --blacklist FILENAME\n"
+ " Exclude all test matching to regexes from FILENAME\n"
+ " (can be used more than once)\n"
+ " -L, --list-all List all matching subtests instead of running\n"
" [test_root] Directory that contains the IGT tests. The environment\n"
" variable IGT_TEST_ROOT will be used if set, overriding\n"
" this option if given.\n"
@@ -187,23 +195,18 @@ static void usage(const char *extra_message, FILE *f)
static bool add_regex(struct regex_list *list, char *new)
{
- regex_t *regex;
- size_t buflen;
- char *buf;
- int s;
-
- regex = malloc(sizeof(*regex));
-
- if ((s = regcomp(regex, new,
- REG_EXTENDED | REG_NOSUB)) != 0) {
- buflen = regerror(s, regex, NULL, 0);
- buf = malloc(buflen);
- regerror(s, regex, buf, buflen);
+ GRegex *regex;
+ GError *error = NULL;
+
+ regex = g_regex_new(new, G_REGEX_OPTIMIZE, 0, &error);
+ if (error) {
+ char *buf = malloc(snprintf(NULL, 0, "Invalid regex '%s': %s", new, error->message) + 1);
+
+ sprintf(buf, "Invalid regex '%s': %s", new, error->message);
usage(buf, stderr);
free(buf);
- regfree(regex);
- free(regex);
+ g_error_free(error);
return false;
}
@@ -218,14 +221,58 @@ static bool add_regex(struct regex_list *list, char *new)
return true;
}
+static bool parse_blacklist(struct regex_list *exclude_regexes,
+ char *blacklist_filename)
+{
+ FILE *f;
+ char *line = NULL;
+ size_t line_len = 0;
+ bool status;
+
+ if ((f = fopen(blacklist_filename, "r")) == NULL) {
+ fprintf(stderr, "Cannot open blacklist file %s\n", blacklist_filename);
+ return false;
+ }
+ while (1) {
+ size_t str_size = 0, idx = 0;
+
+ if (getline(&line, &line_len, f) == -1) {
+ if (errno == EINTR)
+ continue;
+ else
+ break;
+ }
+
+ while (true) {
+ if (line[idx] == '\n' ||
+ line[idx] == '\0' ||
+ line[idx] == '#') /* # starts a comment */
+ break;
+ if (!isspace(line[idx]))
+ str_size = idx + 1;
+ idx++;
+ }
+ if (str_size > 0) {
+ char *test_regex = strndup(line, str_size);
+
+ status = add_regex(exclude_regexes, test_regex);
+ if (!status)
+ break;
+ }
+ }
+
+ free(line);
+ fclose(f);
+ return status;
+}
+
static void free_regexes(struct regex_list *regexes)
{
size_t i;
for (i = 0; i < regexes->size; i++) {
free(regexes->regex_strings[i]);
- regfree(regexes->regexes[i]);
- free(regexes->regexes[i]);
+ g_regex_unref(regexes->regexes[i]);
}
free(regexes->regex_strings);
free(regexes->regexes);
@@ -278,6 +325,8 @@ bool parse_options(int argc, char **argv,
{"use-watchdog", no_argument, NULL, OPT_WATCHDOG},
{"piglit-style-dmesg", no_argument, NULL, OPT_PIGLIT_DMESG},
{"dmesg-warn-level", required_argument, NULL, OPT_DMESG_WARN_LEVEL},
+ {"blacklist", required_argument, NULL, OPT_BLACKLIST},
+ {"list-all", no_argument, NULL, OPT_LIST_ALL},
{ 0, 0, 0, 0},
};
@@ -287,7 +336,8 @@ bool parse_options(int argc, char **argv,
settings->dmesg_warn_level = -1;
- while ((c = getopt_long(argc, argv, "hn:dt:x:sl:om", long_options, NULL)) != -1) {
+ while ((c = getopt_long(argc, argv, "hn:dt:x:sl:omb:L",
+ long_options, NULL)) != -1) {
switch (c) {
case OPT_HELP:
usage(NULL, stdout);
@@ -348,6 +398,14 @@ bool parse_options(int argc, char **argv,
case OPT_DMESG_WARN_LEVEL:
settings->dmesg_warn_level = atoi(optarg);
break;
+ case OPT_BLACKLIST:
+ if (!parse_blacklist(&settings->exclude_regexes,
+ absolute_path(optarg)))
+ goto error;
+ break;
+ case OPT_LIST_ALL:
+ settings->list_all = true;
+ break;
case '?':
usage(NULL, stderr);
goto error;
@@ -360,20 +418,40 @@ bool parse_options(int argc, char **argv,
if (settings->dmesg_warn_level < 0)
settings->dmesg_warn_level = 4; /* KERN_WARN */
- switch (argc - optind) {
- case 2:
- settings->test_root = absolute_path(argv[optind]);
- ++optind;
- /* fallthrough */
- case 1:
- settings->results_path = absolute_path(argv[optind]);
- break;
- case 0:
- usage("Results-path missing", stderr);
- goto error;
- default:
- usage("Extra arguments after results-path", stderr);
- goto error;
+ if (settings->list_all) { /* --list-all doesn't require results path */
+ switch (argc - optind) {
+ case 1:
+ settings->test_root = absolute_path(argv[optind]);
+ ++optind;
+ /* fallthrough */
+ case 0:
+ break;
+ default:
+ usage("Too many arguments for --list-all", stderr);
+ goto error;
+ }
+ } else {
+ switch (argc - optind) {
+ case 2:
+ settings->test_root = absolute_path(argv[optind]);
+ ++optind;
+ /* fallthrough */
+ case 1:
+ settings->results_path = absolute_path(argv[optind]);
+ break;
+ case 0:
+ usage("Results-path missing", stderr);
+ goto error;
+ default:
+ usage("Extra arguments after results-path", stderr);
+ goto error;
+ }
+ if (!settings->name) {
+ char *name = strdup(settings->results_path);
+
+ settings->name = strdup(basename(name));
+ free(name);
+ }
}
if ((env_test_root = getenv("IGT_TEST_ROOT")) != NULL) {
@@ -386,11 +464,6 @@ bool parse_options(int argc, char **argv,
goto error;
}
- if (!settings->name) {
- char *name = strdup(settings->results_path);
- settings->name = strdup(basename(name));
- free(name);
- }
return true;
diff --git a/runner/settings.h b/runner/settings.h
index 672a3af8..6dcfa8c5 100644
--- a/runner/settings.h
+++ b/runner/settings.h
@@ -4,8 +4,8 @@
#include <stdbool.h>
#include <stddef.h>
#include <sys/types.h>
-#include <regex.h>
#include <stdio.h>
+#include <glib.h>
enum {
LOG_LEVEL_NORMAL = 0,
@@ -21,7 +21,7 @@ _Static_assert(ABORT_ALL == (ABORT_TAINT | ABORT_LOCKDEP), "ABORT_ALL must be al
struct regex_list {
char **regex_strings;
- regex_t** regexes;
+ GRegex **regexes;
size_t size;
};
@@ -43,6 +43,7 @@ struct settings {
char *results_path;
bool piglit_style_dmesg;
int dmesg_warn_level;
+ bool list_all;
};
/**
diff --git a/runner/testdata/meson.build b/runner/testdata/meson.build
index 011eff8e..2456f82a 100644
--- a/runner/testdata/meson.build
+++ b/runner/testdata/meson.build
@@ -12,6 +12,11 @@ foreach prog : testdata_progs
install : false)
endforeach
+configure_file(input : 'test-blacklist.txt',
+ output : 'test-blacklist.txt', copy : true)
+configure_file(input : 'test-blacklist2.txt',
+ output : 'test-blacklist2.txt', copy : true)
+
testdata_list = custom_target('testdata_testlist',
output : 'test-list.txt',
command : [ gen_testlist, '@OUTPUT@', testdata_progs ],
diff --git a/runner/testdata/test-blacklist.txt b/runner/testdata/test-blacklist.txt
new file mode 100644
index 00000000..6b09ae5c
--- /dev/null
+++ b/runner/testdata/test-blacklist.txt
@@ -0,0 +1,2 @@
+xpattern3 # Comment 1
+# Comment 2
diff --git a/runner/testdata/test-blacklist2.txt b/runner/testdata/test-blacklist2.txt
new file mode 100644
index 00000000..d0f6e612
--- /dev/null
+++ b/runner/testdata/test-blacklist2.txt
@@ -0,0 +1,2 @@
+
+xpattern4
diff --git a/scripts/media-bench.pl b/scripts/media-bench.pl
index 066b542f..1cd8205f 100755
--- a/scripts/media-bench.pl
+++ b/scripts/media-bench.pl
@@ -49,10 +49,11 @@ my $nop;
my %opts;
my @balancers = ( 'rr', 'rand', 'qd', 'qdr', 'qdavg', 'rt', 'rtr', 'rtavg',
- 'context', 'busy', 'busy-avg' );
+ 'context', 'busy', 'busy-avg', 'i915' );
my %bal_skip_H = ( 'rr' => 1, 'rand' => 1, 'context' => 1, , 'busy' => 1,
- 'busy-avg' => 1 );
-my %bal_skip_R = ( 'context' => 1 );
+ 'busy-avg' => 1, 'i915' => 1 );
+my %bal_skip_R = ( 'i915' => 1 );
+my %bal_skip_G = ( 'i915' => 1 );
my @workloads = (
'media_load_balance_17i7.wsim',
@@ -498,6 +499,8 @@ foreach my $wrk (@saturation_workloads) {
my $bid;
if ($bal ne '') {
+ next GBAL if $G =~ '-G' and exists $bal_skip_G{$bal};
+
push @xargs, "-b $bal";
push @xargs, '-R' unless exists $bal_skip_R{$bal};
push @xargs, $G if $G ne '';
diff --git a/scripts/trace.pl b/scripts/trace.pl
index 18f9f3b1..77587f24 100755
--- a/scripts/trace.pl
+++ b/scripts/trace.pl
@@ -27,10 +27,16 @@ use warnings;
use 5.010;
my $gid = 0;
-my (%db, %queue, %submit, %notify, %rings, %ctxdb, %ringmap, %reqwait, %ctxtimelines);
+my (%db, %vdb, %queue, %submit, %notify, %rings, %ctxdb, %ringmap, %reqwait,
+ %ctxtimelines);
+my (%cids, %ctxmap);
+my $cid = 0;
+my %queues;
my @freqs;
-my $max_items = 3000;
+use constant VENG => '255:254';
+
+my $max_requests = 1000;
my $width_us = 32000;
my $correct_durations = 0;
my %ignore_ring;
@@ -66,7 +72,7 @@ Notes:
i915:i915_request_submit, \
i915:i915_request_in, \
i915:i915_request_out, \
- i915:intel_engine_notify, \
+ dma_fence:dma_fence_signaled, \
i915:i915_request_wait_begin, \
i915:i915_request_wait_end \
[command-to-be-profiled]
@@ -161,7 +167,7 @@ sub arg_trace
'i915:i915_request_submit',
'i915:i915_request_in',
'i915:i915_request_out',
- 'i915:intel_engine_notify',
+ 'dma_fence:dma_fence_signaled',
'i915:i915_request_wait_begin',
'i915:i915_request_wait_end' );
@@ -180,21 +186,21 @@ sub arg_trace
return @_;
}
-sub arg_max_items
+sub arg_max_requests
{
my $val;
return unless scalar(@_);
- if ($_[0] eq '--max-items' or $_[0] eq '-m') {
+ if ($_[0] eq '--max-requests' or $_[0] eq '-m') {
shift @_;
$val = shift @_;
- } elsif ($_[0] =~ /--max-items=(\d+)/) {
+ } elsif ($_[0] =~ /--max-requests=(\d+)/) {
shift @_;
$val = $1;
}
- $max_items = int($val) if defined $val;
+ $max_requests = int($val) if defined $val;
return @_;
}
@@ -291,7 +297,7 @@ while (@args) {
@args = arg_avg_delay_stats(@args);
@args = arg_gpu_timeline(@args);
@args = arg_trace(@args);
- @args = arg_max_items(@args);
+ @args = arg_max_requests(@args);
@args = arg_zoom_width(@args);
@args = arg_split_requests(@args);
@args = arg_ignore_ring(@args);
@@ -312,11 +318,11 @@ sub db_key
return $ring . '/' . $ctx . '/' . $seqno;
}
-sub global_key
+sub notify_key
{
- my ($ring, $seqno) = @_;
+ my ($ctx, $seqno) = @_;
- return $ring . '/' . $seqno;
+ return $ctx . '/' . $seqno;
}
sub sanitize_ctx
@@ -330,6 +336,13 @@ sub sanitize_ctx
}
}
+sub is_veng
+{
+ my ($class, $instance) = split ':', shift;
+
+ return $instance eq '254';
+}
+
# Main input loop - parse lines and build the internal representation of the
# trace using a hash of requests and some auxilliary data structures.
my $prev_freq = 0;
@@ -372,6 +385,7 @@ while (<>) {
$ctx = $tp{'ctx'};
$orig_ctx = $ctx;
$ctx = sanitize_ctx($ctx, $ring);
+ $ring = VENG if is_veng($ring);
$key = db_key($ring, $ctx, $seqno);
}
}
@@ -380,6 +394,7 @@ while (<>) {
my %rw;
next if exists $reqwait{$key};
+ die if $ring eq VENG and not exists $queues{$ctx};
$rw{'key'} = $key;
$rw{'ring'} = $ring;
@@ -388,9 +403,19 @@ while (<>) {
$rw{'start'} = $time;
$reqwait{$key} = \%rw;
} elsif ($tp_name eq 'i915:i915_request_wait_end:') {
- next unless exists $reqwait{$key};
+ die if $ring eq VENG and not exists $queues{$ctx};
+
+ if (exists $reqwait{$key}) {
+ $reqwait{$key}->{'end'} = $time;
+ } else { # Virtual engine
+ my $vkey = db_key(VENG, $ctx, $seqno);
+
+ die unless exists $reqwait{$vkey};
- $reqwait{$key}->{'end'} = $time;
+ # If the wait started on the virtual engine, attribute
+ # it to it completely.
+ $reqwait{$vkey}->{'end'} = $time;
+ }
} elsif ($tp_name eq 'i915:i915_request_add:') {
if (exists $queue{$key}) {
$ctxdb{$orig_ctx}++;
@@ -401,19 +426,52 @@ while (<>) {
}
$queue{$key} = $time;
+ if ($ring eq VENG and not exists $queues{$ctx}) {
+ $queues{$ctx} = 1 ;
+ $cids{$ctx} = $cid++;
+ $ctxmap{$cids{$ctx}} = $ctx;
+ }
} elsif ($tp_name eq 'i915:i915_request_submit:') {
die if exists $submit{$key};
die unless exists $queue{$key};
+ die if $ring eq VENG and not exists $queues{$ctx};
$submit{$key} = $time;
} elsif ($tp_name eq 'i915:i915_request_in:') {
+ my ($q, $s);
my %req;
# preemption
delete $db{$key} if exists $db{$key};
- die unless exists $queue{$key};
- die unless exists $submit{$key};
+ unless (exists $queue{$key}) {
+ # Virtual engine
+ my $vkey = db_key(VENG, $ctx, $seqno);
+ my %req;
+
+ die unless exists $queues{$ctx};
+ die unless exists $queue{$vkey};
+ die unless exists $submit{$vkey};
+
+ # Create separate request record on the queue timeline
+ $q = $queue{$vkey};
+ $s = $submit{$vkey};
+ $req{'queue'} = $q;
+ $req{'submit'} = $s;
+ $req{'start'} = $time;
+ $req{'end'} = $time;
+ $req{'ring'} = VENG;
+ $req{'seqno'} = $seqno;
+ $req{'ctx'} = $ctx;
+ $req{'name'} = $ctx . '/' . $seqno;
+ $req{'global'} = $tp{'global'};
+ $req{'port'} = $tp{'port'};
+
+ $vdb{$vkey} = \%req;
+ } else {
+ $q = $queue{$key};
+ $s = $submit{$key};
+ }
$req{'start'} = $time;
$req{'ring'} = $ring;
@@ -423,24 +481,38 @@ while (<>) {
$req{'name'} = $ctx . '/' . $seqno;
$req{'global'} = $tp{'global'};
$req{'port'} = $tp{'port'};
- $req{'queue'} = $queue{$key};
- $req{'submit'} = $submit{$key};
+ $req{'queue'} = $q;
+ $req{'submit'} = $s;
+ $req{'virtual'} = 1 if exists $queues{$ctx};
$rings{$ring} = $gid++ unless exists $rings{$ring};
$ringmap{$rings{$ring}} = $ring;
$db{$key} = \%req;
} elsif ($tp_name eq 'i915:i915_request_out:') {
- my $gkey = global_key($ring, $tp{'global'});
+ if ($tp{'completed?'}) {
+ my $nkey;
- die unless exists $db{$key};
- die unless exists $db{$key}->{'start'};
- die if exists $db{$key}->{'end'};
+ die unless exists $db{$key};
+ die unless exists $db{$key}->{'start'};
+ die if exists $db{$key}->{'end'};
- $db{$key}->{'end'} = $time;
- $db{$key}->{'notify'} = $notify{$gkey} if exists $notify{$gkey};
- } elsif ($tp_name eq 'i915:intel_engine_notify:') {
- my $gkey = global_key($ring, $seqno);
+ $nkey = notify_key($ctx, $seqno);
- $notify{$gkey} = $time unless exists $notify{$gkey};
+ $db{$key}->{'end'} = $time;
+ $db{$key}->{'notify'} = $notify{$nkey}
+ if exists $notify{$nkey};
+ } else {
+ delete $db{$key};
+ }
+ } elsif ($tp_name eq 'dma_fence:dma_fence_signaled:') {
+ my $nkey;
+
+ next unless $tp{'driver'} eq 'i915' and
+ $tp{'timeline'} eq 'signaled';
+
+ $nkey = notify_key($tp{'context'}, $tp{'seqno'});
+
+ die if exists $notify{$nkey};
+ $notify{$nkey} = $time unless exists $notify{$nkey};
} elsif ($tp_name eq 'i915:intel_gpu_freq_change:') {
push @freqs, [$prev_freq_ts, $time, $prev_freq] if $prev_freq;
$prev_freq_ts = $time;
@@ -452,15 +524,15 @@ while (<>) {
# find the largest seqno to be used for timeline sorting purposes.
my $max_seqno = 0;
foreach my $key (keys %db) {
- my $gkey = global_key($db{$key}->{'ring'}, $db{$key}->{'global'});
+ my $nkey = notify_key($db{$key}->{'ctx'}, $db{$key}->{'seqno'});
die unless exists $db{$key}->{'start'};
$max_seqno = $db{$key}->{'seqno'} if $db{$key}->{'seqno'} > $max_seqno;
# Notify arrived after context complete?
- $db{$key}->{'notify'} = $notify{$gkey} if not exists $db{$key}->{'notify'}
- and exists $notify{$gkey};
+ $db{$key}->{'notify'} = $notify{$nkey} if not exists $db{$key}->{'notify'}
+ and exists $notify{$nkey};
# No notify but we have end?
$db{$key}->{'notify'} = $db{$key}->{'end'} if exists $db{$key}->{'end'} and
@@ -478,14 +550,13 @@ my $key_count = scalar(keys %db);
my %engine_timelines;
-sub sortEngine {
- my $as = $db{$a}->{'global'};
- my $bs = $db{$b}->{'global'};
+sub sortStart {
+ my $as = $db{$a}->{'start'};
+ my $bs = $db{$b}->{'start'};
my $val;
$val = $as <=> $bs;
-
- die if $val == 0;
+ $val = $a cmp $b if $val == 0;
return $val;
}
@@ -497,9 +568,7 @@ sub get_engine_timeline {
return $engine_timelines{$ring} if exists $engine_timelines{$ring};
@timeline = grep { $db{$_}->{'ring'} eq $ring } keys %db;
- # FIXME seqno restart
- @timeline = sort sortEngine @timeline;
-
+ @timeline = sort sortStart @timeline;
$engine_timelines{$ring} = \@timeline;
return \@timeline;
@@ -561,20 +630,10 @@ foreach my $gid (sort keys %rings) {
$db{$key}->{'no-notify'} = 1;
}
$db{$key}->{'end'} = $end;
+ $db{$key}->{'notify'} = $end if $db{$key}->{'notify'} > $end;
}
}
-sub sortStart {
- my $as = $db{$a}->{'start'};
- my $bs = $db{$b}->{'start'};
- my $val;
-
- $val = $as <=> $bs;
- $val = $a cmp $b if $val == 0;
-
- return $val;
-}
-
my $re_sort = 1;
my @sorted_keys;
@@ -670,9 +729,13 @@ if ($correct_durations) {
next unless exists $db{$key}->{'no-end'};
last if $pos == $#{$timeline};
- # Shift following request to start after the current one
+ # Shift following request to start after the current
+ # one, but only if that wouldn't make it zero duration,
+ # which would indicate notify arrived after context
+ # complete.
$next_key = ${$timeline}[$pos + 1];
- if (exists $db{$key}->{'notify'}) {
+ if (exists $db{$key}->{'notify'} and
+ $db{$key}->{'notify'} < $db{$key}->{'end'}) {
$db{$next_key}->{'engine-start'} = $db{$next_key}->{'start'};
$db{$next_key}->{'start'} = $db{$key}->{'notify'};
$re_sort = 1;
@@ -720,8 +783,10 @@ foreach my $key (@sorted_keys) {
$running{$ring} += $end - $start if $correct_durations or
not exists $db{$key}->{'no-end'};
- $runnable{$ring} += $db{$key}->{'execute-delay'};
- $queued{$ring} += $start - $db{$key}->{'execute-delay'} - $db{$key}->{'queue'};
+ unless (exists $db{$key}->{'virtual'}) {
+ $runnable{$ring} += $db{$key}->{'execute-delay'};
+ $queued{$ring} += $start - $db{$key}->{'execute-delay'} - $db{$key}->{'queue'};
+ }
$batch_count{$ring}++;
@@ -750,9 +815,9 @@ foreach my $gid (sort keys %rings) {
# Extract all GPU busy intervals and sort them.
foreach my $key (@sorted_keys) {
next unless $db{$key}->{'ring'} eq $ring;
+ die if $db{$key}->{'start'} > $db{$key}->{'end'};
push @s_, $db{$key}->{'start'};
push @e_, $db{$key}->{'end'};
- die if $db{$key}->{'start'} > $db{$key}->{'end'};
}
die unless $#s_ == $#e_;
@@ -840,6 +905,12 @@ foreach my $key (keys %reqwait) {
$reqw{$reqwait{$key}->{'ring'}} += $reqwait{$key}->{'end'} - $reqwait{$key}->{'start'};
}
+# Add up all request waits per virtual engine
+my %vreqw;
+foreach my $key (keys %reqwait) {
+ $vreqw{$reqwait{$key}->{'ctx'}} += $reqwait{$key}->{'end'} - $reqwait{$key}->{'start'};
+}
+
say sprintf('GPU: %.2f%% idle, %.2f%% busy',
$flat_busy{'gpu-idle'}, $flat_busy{'gpu-busy'}) unless $html;
@@ -961,18 +1032,24 @@ ENDHTML
sub html_stats
{
my ($stats, $group, $id) = @_;
+ my $veng = exists $stats->{'virtual'} ? 1 : 0;
my $name;
- $name = 'Ring' . $group;
+ $name = $veng ? 'Virtual' : 'Ring';
+ $name .= $group;
$name .= '<br><small><br>';
- $name .= sprintf('%.2f', $stats->{'idle'}) . '% idle<br><br>';
- $name .= sprintf('%.2f', $stats->{'busy'}) . '% busy<br>';
+ unless ($veng) {
+ $name .= sprintf('%.2f', $stats->{'idle'}) . '% idle<br><br>';
+ $name .= sprintf('%.2f', $stats->{'busy'}) . '% busy<br>';
+ }
$name .= sprintf('%.2f', $stats->{'runnable'}) . '% runnable<br>';
$name .= sprintf('%.2f', $stats->{'queued'}) . '% queued<br><br>';
$name .= sprintf('%.2f', $stats->{'wait'}) . '% wait<br><br>';
$name .= $stats->{'count'} . ' batches<br>';
- $name .= sprintf('%.2f', $stats->{'avg'}) . 'us avg batch<br>';
- $name .= sprintf('%.2f', $stats->{'total-avg'}) . 'us avg engine batch<br>';
+ unless ($veng) {
+ $name .= sprintf('%.2f', $stats->{'avg'}) . 'us avg batch<br>';
+ $name .= sprintf('%.2f', $stats->{'total-avg'}) . 'us avg engine batch<br>';
+ }
$name .= '</small>';
print "\t{id: $id, content: '$name'},\n";
@@ -981,17 +1058,24 @@ sub html_stats
sub stdio_stats
{
my ($stats, $group, $id) = @_;
+ my $veng = exists $stats->{'virtual'} ? 1 : 0;
my $str;
- $str = 'Ring' . $group . ': ';
+ $str = $veng ? 'Virtual' : 'Ring';
+ $str .= $group . ': ';
$str .= $stats->{'count'} . ' batches, ';
- $str .= sprintf('%.2f (%.2f) avg batch us, ', $stats->{'avg'}, $stats->{'total-avg'});
- $str .= sprintf('%.2f', $stats->{'idle'}) . '% idle, ';
- $str .= sprintf('%.2f', $stats->{'busy'}) . '% busy, ';
+ unless ($veng) {
+ $str .= sprintf('%.2f (%.2f) avg batch us, ',
+ $stats->{'avg'}, $stats->{'total-avg'});
+ $str .= sprintf('%.2f', $stats->{'idle'}) . '% idle, ';
+ $str .= sprintf('%.2f', $stats->{'busy'}) . '% busy, ';
+ }
+
$str .= sprintf('%.2f', $stats->{'runnable'}) . '% runnable, ';
$str .= sprintf('%.2f', $stats->{'queued'}) . '% queued, ';
$str .= sprintf('%.2f', $stats->{'wait'}) . '% wait';
- if ($avg_delay_stats) {
+
+ if ($avg_delay_stats and not $veng) {
$str .= ', submit/execute/save-avg=(';
$str .= sprintf('%.2f/%.2f/%.2f)', $stats->{'submit'}, $stats->{'execute'}, $stats->{'save'});
}
@@ -1013,8 +1097,16 @@ foreach my $group (sort keys %rings) {
$stats{'idle'} = (1.0 - $flat_busy{$ring} / $elapsed) * 100.0;
$stats{'busy'} = $running{$ring} / $elapsed * 100.0;
- $stats{'runnable'} = $runnable{$ring} / $elapsed * 100.0;
- $stats{'queued'} = $queued{$ring} / $elapsed * 100.0;
+ if (exists $runnable{$ring}) {
+ $stats{'runnable'} = $runnable{$ring} / $elapsed * 100.0;
+ } else {
+ $stats{'runnable'} = 0;
+ }
+ if (exists $queued{$ring}) {
+ $stats{'queued'} = $queued{$ring} / $elapsed * 100.0;
+ } else {
+ $stats{'queued'} = 0;
+ }
$reqw{$ring} = 0 unless exists $reqw{$ring};
$stats{'wait'} = $reqw{$ring} / $elapsed * 100.0;
$stats{'count'} = $batch_count{$ring};
@@ -1031,6 +1123,59 @@ foreach my $group (sort keys %rings) {
}
}
+sub sortVQueue {
+ my $as = $vdb{$a}->{'queue'};
+ my $bs = $vdb{$b}->{'queue'};
+ my $val;
+
+ $val = $as <=> $bs;
+ $val = $a cmp $b if $val == 0;
+
+ return $val;
+}
+
+my @sorted_vkeys = sort sortVQueue keys %vdb;
+my (%vqueued, %vrunnable);
+
+foreach my $key (@sorted_vkeys) {
+ my $ctx = $vdb{$key}->{'ctx'};
+
+ $vdb{$key}->{'submit-delay'} = $vdb{$key}->{'submit'} - $vdb{$key}->{'queue'};
+ $vdb{$key}->{'execute-delay'} = $vdb{$key}->{'start'} - $vdb{$key}->{'submit'};
+
+ $vqueued{$ctx} += $vdb{$key}->{'submit-delay'};
+ $vrunnable{$ctx} += $vdb{$key}->{'execute-delay'};
+}
+
+my $veng_id = $engine_start_id + scalar(keys %rings);
+
+foreach my $cid (sort keys %ctxmap) {
+ my $ctx = $ctxmap{$cid};
+ my $elapsed = $last_ts - $first_ts;
+ my %stats;
+
+ $stats{'virtual'} = 1;
+ if (exists $vrunnable{$ctx}) {
+ $stats{'runnable'} = $vrunnable{$ctx} / $elapsed * 100.0;
+ } else {
+ $stats{'runnable'} = 0;
+ }
+ if (exists $vqueued{$ctx}) {
+ $stats{'queued'} = $vqueued{$ctx} / $elapsed * 100.0;
+ } else {
+ $stats{'queued'} = 0;
+ }
+ $vreqw{$ctx} = 0 unless exists $vreqw{$ctx};
+ $stats{'wait'} = $vreqw{$ctx} / $elapsed * 100.0;
+ $stats{'count'} = scalar(grep {$ctx == $vdb{$_}->{'ctx'}} keys %vdb);
+
+ if ($html) {
+ html_stats(\%stats, $cid, $veng_id++);
+ } else {
+ stdio_stats(\%stats, $cid, $veng_id++);
+ }
+}
+
exit 0 unless $html;
print <<ENDHTML;
@@ -1134,6 +1279,7 @@ sub box_style
}
my $i = 0;
+my $req = 0;
foreach my $key (sort sortQueue keys %db) {
my ($name, $ctx, $seqno) = ($db{$key}->{'name'}, $db{$key}->{'ctx'}, $db{$key}->{'seqno'});
my ($queue, $start, $notify, $end) = ($db{$key}->{'queue'}, $db{$key}->{'start'}, $db{$key}->{'notify'}, $db{$key}->{'end'});
@@ -1147,7 +1293,7 @@ foreach my $key (sort sortQueue keys %db) {
my $skey;
# submit to execute
- unless (exists $skip_box{'queue'}) {
+ unless (exists $skip_box{'queue'} or exists $db{$key}->{'virtual'}) {
$skey = 2 * $max_seqno * $ctx + 2 * $seqno;
$style = box_style($ctx, 'queue');
$content = "$name<br>$db{$key}->{'submit-delay'}us <small>($db{$key}->{'execute-delay'}us)</small>";
@@ -1158,7 +1304,7 @@ foreach my $key (sort sortQueue keys %db) {
# execute to start
$engine_start = $db{$key}->{'start'} unless defined $engine_start;
- unless (exists $skip_box{'ready'}) {
+ unless (exists $skip_box{'ready'} or exists $db{$key}->{'virtual'}) {
$skey = 2 * $max_seqno * $ctx + 2 * $seqno + 1;
$style = box_style($ctx, 'ready');
$content = "<small>$name<br>$db{$key}->{'execute-delay'}us</small>";
@@ -1199,7 +1345,7 @@ foreach my $key (sort sortQueue keys %db) {
$last_ts = $end;
- last if $i > $max_items;
+ last if ++$req > $max_requests;
}
push @freqs, [$prev_freq_ts, $last_ts, $prev_freq] if $prev_freq;
@@ -1232,6 +1378,43 @@ if ($gpu_timeline) {
}
}
+$req = 0;
+$veng_id = $engine_start_id + scalar(keys %rings);
+foreach my $key (@sorted_vkeys) {
+ my ($name, $ctx, $seqno) = ($vdb{$key}->{'name'}, $vdb{$key}->{'ctx'}, $vdb{$key}->{'seqno'});
+ my $queue = $vdb{$key}->{'queue'};
+ my $submit = $vdb{$key}->{'submit'};
+ my $engine_start = $db{$key}->{'engine-start'};
+ my ($content, $style, $startend, $skey);
+ my $group = $veng_id + $cids{$ctx};
+ my $subgroup = $ctx - $min_ctx;
+ my $type = ' type: \'range\',';
+ my $duration;
+
+ # submit to execute
+ unless (exists $skip_box{'queue'}) {
+ $skey = 2 * $max_seqno * $ctx + 2 * $seqno;
+ $style = box_style($ctx, 'queue');
+ $content = "$name<br>$vdb{$key}->{'submit-delay'}us <small>($vdb{$key}->{'execute-delay'}us)</small>";
+ $startend = 'start: ' . $queue . ', end: ' . $submit;
+ print "\t{id: $i, key: $skey, $type group: $group, subgroup: $subgroup, subgroupOrder: $subgroup, content: '$content', $startend, style: \'$style\'},\n";
+ $i++;
+ }
+
+ # execute to start
+ $engine_start = $vdb{$key}->{'start'} unless defined $engine_start;
+ unless (exists $skip_box{'ready'}) {
+ $skey = 2 * $max_seqno * $ctx + 2 * $seqno + 1;
+ $style = box_style($ctx, 'ready');
+ $content = "<small>$name<br>$vdb{$key}->{'execute-delay'}us</small>";
+ $startend = 'start: ' . $submit . ', end: ' . $engine_start;
+ print "\t{id: $i, key: $skey, $type group: $group, subgroup: $subgroup, subgroupOrder: $subgroup, content: '$content', $startend, style: \'$style\'},\n";
+ $i++;
+ }
+
+ last if ++$req > $max_requests;
+}
+
my $end_ts = $first_ts + $width_us;
$first_ts = $first_ts;
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 5097debf..5a428b8a 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -96,6 +96,7 @@ gem_close_race_LDADD = $(LDADD) -lpthread
gem_ctx_thrash_CFLAGS = $(AM_CFLAGS) $(THREAD_CFLAGS)
gem_ctx_thrash_LDADD = $(LDADD) -lpthread
gem_ctx_sseu_LDADD = $(LDADD) $(top_builddir)/lib/libigt_perf.la
+gem_exec_balancer_LDADD = $(LDADD) $(top_builddir)/lib/libigt_perf.la
gem_exec_capture_LDADD = $(LDADD) -lz
gem_exec_parallel_CFLAGS = $(AM_CFLAGS) $(THREAD_CFLAGS)
gem_exec_parallel_LDADD = $(LDADD) -lpthread
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index 7f921f6c..027ed82f 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -6,6 +6,7 @@ NOUVEAU_TESTS = \
AMDGPU_TESTS = \
amdgpu/amd_basic \
+ amdgpu/amd_color \
amdgpu/amd_cs_nop \
amdgpu/amd_prime \
amdgpu/amd_abm \
@@ -27,6 +28,7 @@ TESTS_progs = \
kms_atomic_interruptible \
kms_atomic_transition \
kms_available_modes_crc \
+ kms_big_fb \
kms_busy \
kms_ccs \
kms_color \
@@ -130,9 +132,15 @@ gem_cs_tlb_SOURCES = i915/gem_cs_tlb.c
TESTS_progs += gem_ctx_bad_destroy
gem_ctx_bad_destroy_SOURCES = i915/gem_ctx_bad_destroy.c
+TESTS_progs += gem_ctx_clone
+gem_ctx_clone_SOURCES = i915/gem_ctx_clone.c
+
TESTS_progs += gem_ctx_create
gem_ctx_create_SOURCES = i915/gem_ctx_create.c
+TESTS_progs += gem_ctx_engines
+gem_ctx_engines_SOURCES = i915/gem_ctx_engines.c
+
TESTS_progs += gem_ctx_exec
gem_ctx_exec_SOURCES = i915/gem_ctx_exec.c
@@ -142,6 +150,9 @@ gem_ctx_isolation_SOURCES = i915/gem_ctx_isolation.c
TESTS_progs += gem_ctx_param
gem_ctx_param_SOURCES = i915/gem_ctx_param.c
+TESTS_progs += gem_ctx_shared
+gem_ctx_shared_SOURCES = i915/gem_ctx_shared.c
+
TESTS_progs += gem_ctx_sseu
gem_ctx_sseu_SOURCES = i915/gem_ctx_sseu.c
@@ -175,6 +186,9 @@ gem_exec_await_SOURCES = i915/gem_exec_await.c
TESTS_progs += gem_exec_bad_domains
gem_exec_bad_domains_SOURCES = i915/gem_exec_bad_domains.c
+TESTS_progs += gem_exec_balancer
+gem_exec_balancer_SOURCES = i915/gem_exec_balancer.c
+
TESTS_progs += gem_exec_basic
gem_exec_basic_SOURCES = i915/gem_exec_basic.c
@@ -505,6 +519,9 @@ i915_selftest_SOURCES = i915/i915_selftest.c
TESTS_progs += i915_suspend
i915_suspend_SOURCES = i915/i915_suspend.c
+TESTS_progs += gem_vm_create
+gem_vm_create_SOURCES = i915/gem_vm_create.c
+
TESTS_progs_X = gem_concurrent_all
gem_concurrent_all_SOURCES = i915/gem_concurrent_all.c
diff --git a/tests/amdgpu/amd_color.c b/tests/amdgpu/amd_color.c
new file mode 100644
index 00000000..0bbee43d
--- /dev/null
+++ b/tests/amdgpu/amd_color.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "igt.h"
+
+/* (De)gamma LUT. */
+typedef struct lut {
+ struct drm_color_lut *data;
+ uint32_t size;
+} lut_t;
+
+/* RGB color. */
+typedef struct color {
+ double r;
+ double g;
+ double b;
+} color_t;
+
+/* Common test data. */
+typedef struct data {
+ igt_display_t display;
+ igt_plane_t *primary;
+ igt_output_t *output;
+ igt_pipe_t *pipe;
+ igt_pipe_crc_t *pipe_crc;
+ drmModeModeInfo *mode;
+ enum pipe pipe_id;
+ int fd;
+ int w;
+ int h;
+ uint32_t regamma_lut_size;
+ uint32_t degamma_lut_size;
+} data_t;
+
+static void lut_init(lut_t *lut, uint32_t size)
+{
+ igt_assert(size > 0);
+
+ lut->size = size;
+ lut->data = malloc(size * sizeof(struct drm_color_lut));
+ igt_assert(lut);
+}
+
+/* Generates the linear gamma LUT. */
+static void lut_gen_linear(lut_t *lut, uint16_t mask)
+{
+ uint32_t n = lut->size - 1;
+ uint32_t i;
+
+ for (i = 0; i < lut->size; ++i) {
+ uint32_t v = (i * 0xffff / n) & mask;
+
+ lut->data[i].red = v;
+ lut->data[i].blue = v;
+ lut->data[i].green = v;
+ }
+}
+
+/* Generates the sRGB degamma LUT. */
+static void lut_gen_degamma_srgb(lut_t *lut, uint16_t mask)
+{
+ double range = lut->size - 1;
+ uint32_t i;
+
+ for (i = 0; i < lut->size; ++i) {
+ double u, coeff;
+ uint32_t v;
+
+ u = i / range;
+ coeff = u <= 0.040449936 ? (u / 12.92)
+ : (pow((u + 0.055) / 1.055, 2.4));
+ v = (uint32_t)(coeff * 0xffff) & mask;
+
+ lut->data[i].red = v;
+ lut->data[i].blue = v;
+ lut->data[i].green = v;
+ }
+}
+
+/* Generates the sRGB gamma LUT. */
+static void lut_gen_regamma_srgb(lut_t *lut, uint16_t mask)
+{
+ double range = lut->size - 1;
+ uint32_t i;
+
+ for (i = 0; i < lut->size; ++i) {
+ double u, coeff;
+ uint32_t v;
+
+ u = i / range;
+ coeff = u <= 0.00313080 ? (12.92 * u)
+ : (1.055 * pow(u, 1.0 / 2.4) - 0.055);
+ v = (uint32_t)(coeff * 0xffff) & mask;
+
+ lut->data[i].red = v;
+ lut->data[i].blue = v;
+ lut->data[i].green = v;
+ }
+}
+
+static void lut_free(lut_t *lut)
+{
+ if (lut->data) {
+ free(lut->data);
+ lut->data = NULL;
+ }
+
+ lut->size = 0;
+}
+
+/* Fills a FB with the solid color given. */
+static void draw_color(igt_fb_t *fb, double r, double g, double b)
+{
+ cairo_t *cr = igt_get_cairo_ctx(fb->fd, fb);
+
+ cairo_set_operator(cr, CAIRO_OPERATOR_SOURCE);
+ igt_paint_color(cr, 0, 0, fb->width, fb->height, r, g, b);
+ igt_put_cairo_ctx(fb->fd, fb, cr);
+}
+
+/* Generates the gamma test pattern. */
+static void draw_gamma_test(igt_fb_t *fb)
+{
+ cairo_t *cr = igt_get_cairo_ctx(fb->fd, fb);
+ int gh = fb->height / 4;
+
+ igt_paint_color_gradient(cr, 0, 0, fb->width, gh, 1, 1, 1);
+ igt_paint_color_gradient(cr, 0, gh, fb->width, gh, 1, 0, 0);
+ igt_paint_color_gradient(cr, 0, gh * 2, fb->width, gh, 0, 1, 0);
+ igt_paint_color_gradient(cr, 0, gh * 3, fb->width, gh, 0, 0, 1);
+
+ igt_put_cairo_ctx(fb->fd, fb, cr);
+}
+
+/* Sets the degamma LUT. */
+static void set_degamma_lut(data_t *data, lut_t const *lut)
+{
+ size_t size = lut ? sizeof(lut->data[0]) * lut->size : 0;
+ const void *ptr = lut ? lut->data : NULL;
+
+ igt_pipe_obj_replace_prop_blob(data->pipe, IGT_CRTC_DEGAMMA_LUT, ptr,
+ size);
+}
+
+/* Sets the regamma LUT. */
+static void set_regamma_lut(data_t *data, lut_t const *lut)
+{
+ size_t size = lut ? sizeof(lut->data[0]) * lut->size : 0;
+ const void *ptr = lut ? lut->data : NULL;
+
+ igt_pipe_obj_replace_prop_blob(data->pipe, IGT_CRTC_GAMMA_LUT, ptr,
+ size);
+}
+
+/* Common test setup. */
+static void test_init(data_t *data)
+{
+ igt_display_t *display = &data->display;
+
+ /* It doesn't matter which pipe we choose on amdpgu. */
+ data->pipe_id = PIPE_A;
+ data->pipe = &data->display.pipes[data->pipe_id];
+
+ igt_display_reset(display);
+
+ data->output = igt_get_single_output_for_pipe(display, data->pipe_id);
+ igt_require(data->output);
+
+ data->mode = igt_output_get_mode(data->output);
+ igt_assert(data->mode);
+
+ data->primary =
+ igt_pipe_get_plane_type(data->pipe, DRM_PLANE_TYPE_PRIMARY);
+
+ data->pipe_crc = igt_pipe_crc_new(data->fd, data->pipe_id,
+ INTEL_PIPE_CRC_SOURCE_AUTO);
+
+ igt_output_set_pipe(data->output, data->pipe_id);
+
+ data->degamma_lut_size =
+ igt_pipe_obj_get_prop(data->pipe, IGT_CRTC_DEGAMMA_LUT_SIZE);
+ igt_assert_lt(0, data->degamma_lut_size);
+
+ data->regamma_lut_size =
+ igt_pipe_obj_get_prop(data->pipe, IGT_CRTC_GAMMA_LUT_SIZE);
+ igt_assert_lt(0, data->regamma_lut_size);
+
+ data->w = data->mode->hdisplay;
+ data->h = data->mode->vdisplay;
+}
+
+/* Common test cleanup. */
+static void test_fini(data_t *data)
+{
+ igt_pipe_crc_free(data->pipe_crc);
+ igt_display_reset(&data->display);
+}
+
+/*
+ * Older versions of amdgpu would put the pipe into bypass mode for degamma
+ * when passed a linear sRGB matrix but would still use an sRGB regamma
+ * matrix if not passed any. The whole pipe should be in linear bypass mode
+ * when all the matrices are NULL - CRCs for a linear degamma matrix and
+ * a NULL one should match.
+ */
+static void test_crtc_linear_degamma(data_t *data)
+{
+ igt_display_t *display = &data->display;
+ igt_crc_t ref_crc, new_crc;
+ igt_fb_t afb;
+ lut_t lut_linear;
+
+ test_init(data);
+
+ lut_init(&lut_linear, data->degamma_lut_size);
+ lut_gen_linear(&lut_linear, 0xffff);
+
+ igt_create_fb(data->fd, data->w, data->h, DRM_FORMAT_XRGB8888, 0, &afb);
+ draw_gamma_test(&afb);
+
+ /* Draw the reference image. */
+ igt_plane_set_fb(data->primary, &afb);
+ set_regamma_lut(data, NULL);
+ set_degamma_lut(data, NULL);
+ igt_display_commit_atomic(display, DRM_MODE_ATOMIC_ALLOW_MODESET, NULL);
+
+ igt_pipe_crc_collect_crc(data->pipe_crc, &ref_crc);
+
+ /* Apply a linear degamma. The result should remain the same. */
+ set_degamma_lut(data, &lut_linear);
+ igt_display_commit_atomic(display, 0, NULL);
+
+ igt_pipe_crc_collect_crc(data->pipe_crc, &new_crc);
+ igt_assert_crc_equal(&ref_crc, &new_crc);
+
+ test_fini(data);
+ igt_remove_fb(data->fd, &afb);
+ lut_free(&lut_linear);
+}
+
+/*
+ * Older versions of amdgpu would apply the CRTC regamma on top of a custom
+ * sRGB regamma matrix with incorrect calculations or rounding errors.
+ * If we put the pipe into bypass or use the hardware defined sRGB regamma
+ * on the plane then we can and should get the correct CRTC when passing a
+ * liner regamma matrix to DRM.
+ */
+static void test_crtc_linear_regamma(data_t *data)
+{
+ igt_display_t *display = &data->display;
+ igt_crc_t ref_crc, new_crc;
+ igt_fb_t afb;
+ lut_t lut_linear;
+
+ test_init(data);
+
+ lut_init(&lut_linear, data->regamma_lut_size);
+ lut_gen_linear(&lut_linear, 0xffff);
+
+ igt_create_fb(data->fd, data->w, data->h, DRM_FORMAT_XRGB8888, 0, &afb);
+ draw_gamma_test(&afb);
+
+ /* Draw the reference image. */
+ igt_plane_set_fb(data->primary, &afb);
+ set_regamma_lut(data, NULL);
+ set_degamma_lut(data, NULL);
+ igt_display_commit_atomic(display, DRM_MODE_ATOMIC_ALLOW_MODESET, NULL);
+
+ igt_pipe_crc_collect_crc(data->pipe_crc, &ref_crc);
+
+ /* Apply a linear degamma. The result should remain the same. */
+ set_regamma_lut(data, &lut_linear);
+ igt_display_commit_atomic(display, 0, NULL);
+
+ igt_pipe_crc_collect_crc(data->pipe_crc, &new_crc);
+ igt_assert_crc_equal(&ref_crc, &new_crc);
+
+ test_fini(data);
+ igt_remove_fb(data->fd, &afb);
+ lut_free(&lut_linear);
+}
+
+/*
+ * Tests LUT accuracy. CRTC regamma and CRTC degamma should produce a visually
+ * correct image when used. Hardware limitations on degamma prevent this from
+ * being CRC level accurate across a full test gradient but most values should
+ * still match.
+ *
+ * This test can't pass on DCE because it doesn't support non-linear degamma.
+ */
+static void test_crtc_lut_accuracy(data_t *data)
+{
+ /*
+ * Channels are independent, so we can verify multiple colors at the
+ * same time for improved performance.
+ */
+ static const color_t colors[] = {
+ { 1.00, 1.00, 1.00 },
+ { 0.90, 0.85, 0.75 }, /* 0.95 fails */
+ { 0.70, 0.65, 0.60 },
+ { 0.55, 0.50, 0.45 },
+ { 0.40, 0.35, 0.30 },
+ { 0.25, 0.20, 0.15 },
+ { 0.10, 0.04, 0.02 }, /* 0.05 fails */
+ { 0.00, 0.00, 0.00 },
+ };
+ igt_display_t *display = &data->display;
+ igt_crc_t ref_crc, new_crc;
+ igt_fb_t afb;
+ lut_t lut_degamma, lut_regamma;
+ int i, w, h;
+
+ test_init(data);
+
+ lut_init(&lut_degamma, data->degamma_lut_size);
+ lut_gen_degamma_srgb(&lut_degamma, 0xffff);
+
+ lut_init(&lut_regamma, data->regamma_lut_size);
+ lut_gen_regamma_srgb(&lut_regamma, 0xffff);
+
+ /* Don't draw across the whole screen to improve perf. */
+ w = 64;
+ h = 64;
+
+ igt_create_fb(data->fd, w, h, DRM_FORMAT_XRGB8888, 0, &afb);
+ igt_plane_set_fb(data->primary, &afb);
+ igt_display_commit_atomic(display, DRM_MODE_ATOMIC_ALLOW_MODESET, NULL);
+
+ /* Test colors. */
+ for (i = 0; i < ARRAY_SIZE(colors); ++i) {
+ color_t col = colors[i];
+
+ igt_info("Testing color (%.2f, %.2f, %.2f) ...\n", col.r, col.g,
+ col.b);
+
+ draw_color(&afb, col.r, col.g, col.b);
+
+ set_regamma_lut(data, NULL);
+ set_degamma_lut(data, NULL);
+ igt_display_commit_atomic(display, 0, NULL);
+
+ igt_pipe_crc_collect_crc(data->pipe_crc, &ref_crc);
+
+ set_degamma_lut(data, &lut_degamma);
+ set_regamma_lut(data, &lut_regamma);
+ igt_display_commit_atomic(display, 0, NULL);
+
+ igt_pipe_crc_collect_crc(data->pipe_crc, &new_crc);
+
+ igt_assert_crc_equal(&ref_crc, &new_crc);
+ }
+
+ test_fini(data);
+ igt_remove_fb(data->fd, &afb);
+ lut_free(&lut_regamma);
+ lut_free(&lut_degamma);
+}
+
+igt_main
+{
+ data_t data;
+
+ igt_skip_on_simulation();
+
+ memset(&data, 0, sizeof(data));
+
+ igt_fixture
+ {
+ data.fd = drm_open_driver_master(DRIVER_AMDGPU);
+
+ kmstest_set_vt_graphics_mode();
+
+ igt_display_require(&data.display, data.fd);
+ igt_require(data.display.is_atomic);
+ igt_display_require_output(&data.display);
+ }
+
+ igt_subtest("crtc-linear-degamma") test_crtc_linear_degamma(&data);
+ igt_subtest("crtc-linear-regamma") test_crtc_linear_regamma(&data);
+ igt_subtest("crtc-lut-accuracy") test_crtc_lut_accuracy(&data);
+
+ igt_fixture
+ {
+ igt_display_fini(&data.display);
+ }
+}
diff --git a/tests/amdgpu/meson.build b/tests/amdgpu/meson.build
index 1afece86..42086374 100644
--- a/tests/amdgpu/meson.build
+++ b/tests/amdgpu/meson.build
@@ -4,6 +4,7 @@ amdgpu_deps = test_deps
if libdrm_amdgpu.found()
amdgpu_progs += [ 'amd_abm',
'amd_basic',
+ 'amd_color',
'amd_cs_nop',
'amd_prime',
]
diff --git a/tests/core_auth.c b/tests/core_auth.c
index 1bdc2261..9c240fdb 100644
--- a/tests/core_auth.c
+++ b/tests/core_auth.c
@@ -1,6 +1,5 @@
/*
* Copyright 2015 David Herrmann <dh.herrmann@gmail.com>
- * Copyright 2018 Collabora, Ltd
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -43,12 +42,9 @@
#include <sys/time.h>
#include <sys/poll.h>
#include <sys/resource.h>
-#include <sys/sysmacros.h>
#include "drm.h"
-#ifdef __linux__
-# include <sys/syscall.h>
-#else
+#ifndef __linux__
# include <pthread.h>
#endif
@@ -62,7 +58,7 @@ is_local_tid(pid_t tid)
#else
/* On Linux systems, drmGetClient() would return the thread ID
instead of the actual process ID */
- return syscall(SYS_gettid) == tid;
+ return gettid() == tid;
#endif
}
@@ -194,105 +190,6 @@ static void test_basic_auth(int master)
close(slave);
}
-static bool has_prime_import(int fd)
-{
- uint64_t value;
-
- if (drmGetCap(fd, DRM_CAP_PRIME, &value))
- return false;
-
- return value & DRM_PRIME_CAP_IMPORT;
-}
-
-static void check_auth_sanity(int master)
-{
- uint32_t handle;
-
- igt_assert(check_auth(master) == true);
- igt_require(has_prime_import(master));
-
- igt_assert(drmPrimeFDToHandle(master, -1, &handle) < 0);
-
- /* IOCTL requires authenticated master as done in drm_permit.
- * As we get past that, we'll fail due to the invalid FD.
- *
- * Note: strictly speaking this is unrelated to the goal of
- * the test, although danvet requested it.
- */
- igt_assert(errno == EBADF);
-}
-
-static bool has_render_node(int fd)
-{
- char node_name[80];
- struct stat sbuf;
-
- if (fstat(fd, &sbuf))
- return false;
-
- sprintf(node_name, "/dev/dri/renderD%d", minor(sbuf.st_rdev) | 0x80);
- if (stat(node_name, &sbuf))
- return false;
-
- return true;
-}
-
-/*
- * Testcase: Render capable, unauthenticated master doesn't throw -EACCES for
- * DRM_RENDER_ALLOW ioctls.
- */
-static void test_unauth_vs_render(int master)
-{
- int slave;
- uint32_t handle;
- struct stat statbuf;
- bool has_render;
-
- /* need to check for render nodes before we wreak the filesystem */
- has_render = has_render_node(master);
-
- /* create a card node matching master which (only) we can access as
- * non-root */
- do_or_die(fstat(master, &statbuf));
- do_or_die(unshare(CLONE_NEWNS));
- do_or_die(mount(NULL, "/", NULL, MS_PRIVATE | MS_REC, NULL));
- do_or_die(mount("none", "/dev/dri", "tmpfs", 0, NULL));
- umask(0);
- do_or_die(mknod("/dev/dri/card", S_IFCHR | 0666, statbuf.st_rdev));
-
- igt_drop_root();
-
- slave = open("/dev/dri/card", O_RDWR);
-
- igt_assert(slave >= 0);
-
- /*
- * The second open() happens without CAP_SYS_ADMIN, thus it will NOT
- * be authenticated.
- */
- igt_assert(check_auth(slave) == false);
-
- /* Issuing the following ioctl will fail, no doubt about it. */
- igt_assert(drmPrimeFDToHandle(slave, -1, &handle) < 0);
-
- /*
- * Updated kernels allow render capable, unauthenticated master to
- * issue DRM_AUTH ioctls (like FD2HANDLE above), as long as they are
- * annotated as DRM_RENDER_ALLOW.
- *
- * Otherwise, errno is set to -EACCES
- *
- * Note: We are _not_ interested in the FD2HANDLE specific errno,
- * yet the EBADF check is added on the explicit request by danvet.
- */
- if (has_render)
- igt_assert(errno == EBADF);
- else
- igt_assert(errno == EACCES);
-
- close(slave);
-}
-
igt_main
{
int master;
@@ -331,17 +228,4 @@ igt_main
igt_subtest("many-magics")
test_many_magics(master);
}
-
- igt_subtest_group {
- igt_fixture
- master = drm_open_driver(DRIVER_ANY);
-
- igt_subtest("unauth-vs-render") {
- check_auth_sanity(master);
-
- igt_fork(child, 1)
- test_unauth_vs_render(master);
- igt_waitchildren();
- }
- }
}
diff --git a/tests/drm_import_export.c b/tests/drm_import_export.c
index 4bc7b7d4..f79c09db 100644
--- a/tests/drm_import_export.c
+++ b/tests/drm_import_export.c
@@ -120,7 +120,7 @@ static void start_test(void)
static void * test_thread(void * par)
{
#ifdef __linux__
- igt_debug("start %ld\n", syscall(SYS_gettid));
+ igt_debug("start %ld\n", (long) gettid());
#else
igt_debug("start %ld\n", (long) pthread_self());
#endif
diff --git a/tests/i915/gem_busy.c b/tests/i915/gem_busy.c
index c120faf1..a5535870 100644
--- a/tests/i915/gem_busy.c
+++ b/tests/i915/gem_busy.c
@@ -66,22 +66,9 @@ static void __gem_busy(int fd,
*read = busy.busy >> 16;
}
-static uint32_t ring_to_class(unsigned int ring)
-{
- uint32_t class[] = {
- [I915_EXEC_DEFAULT] = I915_ENGINE_CLASS_RENDER,
- [I915_EXEC_RENDER] = I915_ENGINE_CLASS_RENDER,
- [I915_EXEC_BLT] = I915_ENGINE_CLASS_COPY,
- [I915_EXEC_BSD] = I915_ENGINE_CLASS_VIDEO,
- [I915_EXEC_VEBOX] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
- };
- igt_assert(ring < ARRAY_SIZE(class));
- return class[ring];
-}
-
static bool exec_noop(int fd,
uint32_t *handles,
- unsigned ring,
+ unsigned flags,
bool write)
{
struct drm_i915_gem_execbuffer2 execbuf;
@@ -97,9 +84,9 @@ static bool exec_noop(int fd,
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(exec);
execbuf.buffer_count = 3;
- execbuf.flags = ring;
- igt_debug("Queuing handle for %s on ring %d\n",
- write ? "writing" : "reading", ring & 0x7);
+ execbuf.flags = flags;
+ igt_debug("Queuing handle for %s on engine %d\n",
+ write ? "writing" : "reading", flags);
return __gem_execbuf(fd, &execbuf) == 0;
}
@@ -110,18 +97,17 @@ static bool still_busy(int fd, uint32_t handle)
return write;
}
-static void semaphore(int fd, unsigned ring, uint32_t flags)
+static void semaphore(int fd, const struct intel_execution_engine2 *e)
{
+ struct intel_execution_engine2 *__e;
uint32_t bbe = MI_BATCH_BUFFER_END;
- const unsigned uabi = ring_to_class(ring & 63);
+ const unsigned uabi = e->class;
igt_spin_t *spin;
uint32_t handle[3];
uint32_t read, write;
uint32_t active;
unsigned i;
- gem_require_ring(fd, ring | flags);
-
handle[TEST] = gem_create(fd, 4096);
handle[BATCH] = gem_create(fd, 4096);
gem_write(fd, handle[BATCH], 0, &bbe, sizeof(bbe));
@@ -129,18 +115,18 @@ static void semaphore(int fd, unsigned ring, uint32_t flags)
/* Create a long running batch which we can use to hog the GPU */
handle[BUSY] = gem_create(fd, 4096);
spin = igt_spin_new(fd,
- .engine = ring,
+ .engine = e->flags,
.dependency = handle[BUSY]);
/* Queue a batch after the busy, it should block and remain "busy" */
- igt_assert(exec_noop(fd, handle, ring | flags, false));
+ igt_assert(exec_noop(fd, handle, e->flags, false));
igt_assert(still_busy(fd, handle[BUSY]));
__gem_busy(fd, handle[TEST], &read, &write);
igt_assert_eq(read, 1 << uabi);
igt_assert_eq(write, 0);
/* Requeue with a write */
- igt_assert(exec_noop(fd, handle, ring | flags, true));
+ igt_assert(exec_noop(fd, handle, e->flags, true));
igt_assert(still_busy(fd, handle[BUSY]));
__gem_busy(fd, handle[TEST], &read, &write);
igt_assert_eq(read, 1 << uabi);
@@ -148,9 +134,9 @@ static void semaphore(int fd, unsigned ring, uint32_t flags)
/* Now queue it for a read across all available rings */
active = 0;
- for (i = I915_EXEC_RENDER; i <= I915_EXEC_VEBOX; i++) {
- if (exec_noop(fd, handle, i | flags, false))
- active |= 1 << ring_to_class(i);
+ __for_each_physical_engine(fd, __e) {
+ if (exec_noop(fd, handle, __e->flags, false))
+ active |= 1 << __e->class;
}
igt_assert(still_busy(fd, handle[BUSY]));
__gem_busy(fd, handle[TEST], &read, &write);
@@ -173,7 +159,7 @@ static void semaphore(int fd, unsigned ring, uint32_t flags)
#define PARALLEL 1
#define HANG 2
-static void one(int fd, unsigned ring, unsigned test_flags)
+static void one(int fd, const struct intel_execution_engine2 *e, unsigned test_flags)
{
const int gen = intel_gen(intel_get_drm_devid(fd));
struct drm_i915_gem_exec_object2 obj[2];
@@ -182,7 +168,7 @@ static void one(int fd, unsigned ring, unsigned test_flags)
struct drm_i915_gem_relocation_entry store[1024+1];
struct drm_i915_gem_execbuffer2 execbuf;
unsigned size = ALIGN(ARRAY_SIZE(store)*16 + 4, 4096);
- const unsigned uabi = ring_to_class(ring & 63);
+ const unsigned uabi = e->class;
uint32_t read[2], write[2];
struct timespec tv;
uint32_t *batch, *bbe;
@@ -191,7 +177,7 @@ static void one(int fd, unsigned ring, unsigned test_flags)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
- execbuf.flags = ring;
+ execbuf.flags = e->flags;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
@@ -263,17 +249,18 @@ static void one(int fd, unsigned ring, unsigned test_flags)
__gem_busy(fd, obj[BATCH].handle, &read[BATCH], &write[BATCH]);
if (test_flags & PARALLEL) {
- unsigned other;
+ struct intel_execution_engine2 *e2;
- for_each_physical_engine(fd, other) {
- if (other == ring)
+ __for_each_physical_engine(fd, e2) {
+ if (e2->class == e->class &&
+ e2->instance == e->instance)
continue;
- if (!gem_can_store_dword(fd, other))
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- igt_debug("Testing %s in parallel\n", e__->name);
- one(fd, other, 0);
+ igt_debug("Testing %s in parallel\n", e2->name);
+ one(fd, e2, 0);
}
}
@@ -430,7 +417,7 @@ static bool has_semaphores(int fd)
static bool has_extended_busy_ioctl(int fd)
{
- igt_spin_t *spin = igt_spin_new(fd, .engine = I915_EXEC_RENDER);
+ igt_spin_t *spin = igt_spin_new(fd, .engine = I915_EXEC_DEFAULT);
uint32_t read, write;
__gem_busy(fd, spin->handle, &read, &write);
@@ -439,11 +426,11 @@ static bool has_extended_busy_ioctl(int fd)
return read != 0;
}
-static void basic(int fd, unsigned ring, unsigned flags)
+static void basic(int fd, const struct intel_execution_engine2 *e, unsigned flags)
{
igt_spin_t *spin =
igt_spin_new(fd,
- .engine = ring,
+ .engine = e->flags,
.flags = IGT_SPIN_NO_PREEMPTION);
struct timespec tv;
int timeout;
@@ -473,15 +460,24 @@ static void basic(int fd, unsigned ring, unsigned flags)
igt_spin_free(fd, spin);
}
+static void all(int i915)
+{
+ const struct intel_execution_engine2 *e;
+
+ __for_each_physical_engine(i915, e)
+ basic(i915, e, 0);
+}
+
igt_main
{
- const struct intel_execution_engine *e;
+ const struct intel_execution_engine2 *e;
int fd = -1;
igt_fixture {
fd = drm_open_driver_master(DRIVER_INTEL);
igt_require_gem(fd);
- igt_require(gem_can_store_dword(fd, 0));
+ igt_require(gem_class_can_store_dword(fd,
+ I915_ENGINE_CLASS_RENDER));
}
igt_subtest_group {
@@ -489,14 +485,16 @@ igt_main
igt_fork_hang_detector(fd);
}
- for (e = intel_execution_engines; e->name; e++) {
+ igt_subtest("busy-all") {
+ gem_quiescent_gpu(fd);
+ all(fd);
+ }
+
+ __for_each_physical_engine(fd, e) {
igt_subtest_group {
- igt_subtest_f("%sbusy-%s",
- e->exec_id == 0 ? "basic-" : "",
- e->name) {
- igt_require(gem_has_ring(fd, e->exec_id | e->flags));
+ igt_subtest_f("busy-%s", e->name) {
gem_quiescent_gpu(fd);
- basic(fd, e->exec_id | e->flags, 0);
+ basic(fd, e, 0);
}
}
}
@@ -507,31 +505,22 @@ igt_main
gem_require_mmap_wc(fd);
}
- for (e = intel_execution_engines; e->name; e++) {
- /* default exec-id is purely symbolic */
- if (e->exec_id == 0)
- continue;
-
+ __for_each_physical_engine(fd, e) {
igt_subtest_f("extended-%s", e->name) {
- igt_require(gem_ring_has_physical_engine(fd, e->exec_id | e->flags));
- igt_require(gem_can_store_dword(fd, e->exec_id | e->flags));
+ igt_require(gem_class_can_store_dword(fd,
+ e->class));
gem_quiescent_gpu(fd);
- one(fd, e->exec_id | e->flags, 0);
+ one(fd, e, 0);
gem_quiescent_gpu(fd);
}
}
- for (e = intel_execution_engines; e->name; e++) {
- /* default exec-id is purely symbolic */
- if (e->exec_id == 0)
- continue;
-
+ __for_each_physical_engine(fd, e) {
igt_subtest_f("extended-parallel-%s", e->name) {
- igt_require(gem_ring_has_physical_engine(fd, e->exec_id | e->flags));
- igt_require(gem_can_store_dword(fd, e->exec_id | e->flags));
+ igt_require(gem_class_can_store_dword(fd, e->class));
gem_quiescent_gpu(fd);
- one(fd, e->exec_id | e->flags, PARALLEL);
+ one(fd, e, PARALLEL);
gem_quiescent_gpu(fd);
}
}
@@ -543,13 +532,9 @@ igt_main
igt_require(has_semaphores(fd));
}
- for (e = intel_execution_engines; e->name; e++) {
- /* default exec-id is purely symbolic */
- if (e->exec_id == 0)
- continue;
-
+ __for_each_physical_engine(fd, e) {
igt_subtest_f("extended-semaphore-%s", e->name)
- semaphore(fd, e->exec_id, e->flags);
+ semaphore(fd, e);
}
}
@@ -568,14 +553,13 @@ igt_main
hang = igt_allow_hang(fd, 0, 0);
}
- for (e = intel_execution_engines; e->name; e++) {
+ __for_each_physical_engine(fd, e) {
igt_subtest_f("%shang-%s",
- e->exec_id == 0 ? "basic-" : "",
- e->name) {
+ e->class == I915_ENGINE_CLASS_RENDER
+ ? "basic-" : "", e->name) {
igt_skip_on_simulation();
- igt_require(gem_has_ring(fd, e->exec_id | e->flags));
gem_quiescent_gpu(fd);
- basic(fd, e->exec_id | e->flags, HANG);
+ basic(fd, e, HANG);
}
}
@@ -585,18 +569,13 @@ igt_main
gem_require_mmap_wc(fd);
}
- for (e = intel_execution_engines; e->name; e++) {
- /* default exec-id is purely symbolic */
- if (e->exec_id == 0)
- continue;
-
+ __for_each_physical_engine(fd, e) {
igt_subtest_f("extended-hang-%s", e->name) {
igt_skip_on_simulation();
- igt_require(gem_ring_has_physical_engine(fd, e->exec_id | e->flags));
- igt_require(gem_can_store_dword(fd, e->exec_id | e->flags));
+ igt_require(gem_class_can_store_dword(fd, e->class));
gem_quiescent_gpu(fd);
- one(fd, e->exec_id | e->flags, HANG);
+ one(fd, e, HANG);
gem_quiescent_gpu(fd);
}
}
diff --git a/tests/i915/gem_close_race.c b/tests/i915/gem_close_race.c
index 11d626dc..57e00480 100644
--- a/tests/i915/gem_close_race.c
+++ b/tests/i915/gem_close_race.c
@@ -51,7 +51,6 @@
static uint32_t devid;
static bool has_64bit_relocations;
-#define gettid() syscall(__NR_gettid)
#define sigev_notify_thread_id _sigev_un._tid
static void selfcopy(int fd, uint32_t handle, int loops)
diff --git a/tests/i915/gem_cs_tlb.c b/tests/i915/gem_cs_tlb.c
index 51e1c4e1..13de5499 100644
--- a/tests/i915/gem_cs_tlb.c
+++ b/tests/i915/gem_cs_tlb.c
@@ -140,7 +140,7 @@ static void run_on_ring(int fd, unsigned ring_id, const char *ring_name)
igt_main
{
- const struct intel_execution_engine *e;
+ const struct intel_execution_engine2 *e;
int fd = -1;
igt_skip_on_simulation();
@@ -150,9 +150,9 @@ igt_main
igt_require_gem(fd);
}
- for (e = intel_execution_engines; e->name; e++)
- igt_subtest_f("%s%s", e->exec_id ? "" : "basic-", e->name)
- run_on_ring(fd, e->exec_id | e->flags, e->name);
+ __for_each_physical_engine(fd, e)
+ igt_subtest_f("%s", e->name)
+ run_on_ring(fd, e->flags, e->name);
igt_fixture
close(fd);
diff --git a/tests/i915/gem_ctx_clone.c b/tests/i915/gem_ctx_clone.c
new file mode 100644
index 00000000..896b24dc
--- /dev/null
+++ b/tests/i915/gem_ctx_clone.c
@@ -0,0 +1,462 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "igt.h"
+#include "igt_gt.h"
+#include "i915/gem_vm.h"
+#include "i915_drm.h"
+
+static int ctx_create_ioctl(int i915, struct drm_i915_gem_context_create_ext *arg)
+{
+ int err;
+
+ err = 0;
+ if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, arg)) {
+ err = -errno;
+ igt_assume(err);
+ }
+
+ errno = 0;
+ return err;
+}
+
+static bool has_ctx_clone(int i915)
+{
+ struct drm_i915_gem_context_create_ext_clone ext = {
+ { .name = I915_CONTEXT_CREATE_EXT_CLONE },
+ .clone_id = -1,
+ };
+ struct drm_i915_gem_context_create_ext create = {
+ .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ .extensions = to_user_pointer(&ext),
+ };
+ return ctx_create_ioctl(i915, &create) == -ENOENT;
+}
+
+static void invalid_clone(int i915)
+{
+ struct drm_i915_gem_context_create_ext_clone ext = {
+ { .name = I915_CONTEXT_CREATE_EXT_CLONE },
+ };
+ struct drm_i915_gem_context_create_ext create = {
+ .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ .extensions = to_user_pointer(&ext),
+ };
+
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+ gem_context_destroy(i915, create.ctx_id);
+
+ ext.flags = -1; /* Hopefully we won't run out of flags */
+ igt_assert_eq(ctx_create_ioctl(i915, &create), -EINVAL);
+ ext.flags = 0;
+
+ ext.base.next_extension = -1;
+ igt_assert_eq(ctx_create_ioctl(i915, &create), -EFAULT);
+ ext.base.next_extension = to_user_pointer(&ext);
+ igt_assert_eq(ctx_create_ioctl(i915, &create), -E2BIG);
+ ext.base.next_extension = 0;
+
+ ext.clone_id = -1;
+ igt_assert_eq(ctx_create_ioctl(i915, &create), -ENOENT);
+ ext.clone_id = 0;
+}
+
+static void clone_flags(int i915)
+{
+ struct drm_i915_gem_context_create_ext_setparam set = {
+ { .name = I915_CONTEXT_CREATE_EXT_SETPARAM },
+ { .param = I915_CONTEXT_PARAM_RECOVERABLE },
+ };
+ struct drm_i915_gem_context_create_ext_clone ext = {
+ { .name = I915_CONTEXT_CREATE_EXT_CLONE },
+ .flags = I915_CONTEXT_CLONE_FLAGS,
+ };
+ struct drm_i915_gem_context_create_ext create = {
+ .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ .extensions = to_user_pointer(&ext),
+ };
+ int expected;
+
+ set.param.value = 1; /* default is recoverable */
+ igt_require(__gem_context_set_param(i915, &set.param) == 0);
+
+ for (int pass = 0; pass < 2; pass++) { /* cloning default, then child */
+ igt_debug("Cloning %d\n", ext.clone_id);
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+
+ set.param.ctx_id = ext.clone_id;
+ gem_context_get_param(i915, &set.param);
+ expected = set.param.value;
+
+ set.param.ctx_id = create.ctx_id;
+ gem_context_get_param(i915, &set.param);
+
+ igt_assert_eq_u64(set.param.param,
+ I915_CONTEXT_PARAM_RECOVERABLE);
+ igt_assert_eq((int)set.param.value, expected);
+
+ gem_context_destroy(i915, create.ctx_id);
+
+ expected = set.param.value = 0;
+ set.param.ctx_id = ext.clone_id;
+ gem_context_set_param(i915, &set.param);
+
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+
+ set.param.ctx_id = create.ctx_id;
+ gem_context_get_param(i915, &set.param);
+
+ igt_assert_eq_u64(set.param.param,
+ I915_CONTEXT_PARAM_RECOVERABLE);
+ igt_assert_eq((int)set.param.value, expected);
+
+ gem_context_destroy(i915, create.ctx_id);
+
+ /* clone but then reset priority to default... */
+ set.param.ctx_id = 0;
+ set.param.value = 1;
+ ext.base.next_extension = to_user_pointer(&set);
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+ ext.base.next_extension = 0;
+
+ /* then new context should have updated priority... */
+ set.param.ctx_id = create.ctx_id;
+ gem_context_get_param(i915, &set.param);
+ igt_assert_eq_u64(set.param.value, 1);
+
+ /* but original context should have default priority */
+ set.param.ctx_id = ext.clone_id;
+ gem_context_get_param(i915, &set.param);
+ igt_assert_eq_u64(set.param.value, 0);
+
+ gem_context_destroy(i915, create.ctx_id);
+ ext.clone_id = gem_context_create(i915);
+ }
+
+ gem_context_destroy(i915, ext.clone_id);
+}
+
+static void clone_engines(int i915)
+{
+ struct drm_i915_gem_context_create_ext_setparam set = {
+ { .name = I915_CONTEXT_CREATE_EXT_SETPARAM },
+ { .param = I915_CONTEXT_PARAM_ENGINES },
+ };
+ struct drm_i915_gem_context_create_ext_clone ext = {
+ { .name = I915_CONTEXT_CREATE_EXT_CLONE },
+ .flags = I915_CONTEXT_CLONE_ENGINES,
+ };
+ struct drm_i915_gem_context_create_ext create = {
+ .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ .extensions = to_user_pointer(&ext),
+ };
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(expected, 64);
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 64);
+ uint64_t ex_size;
+
+ memset(&expected, 0, sizeof(expected));
+ memset(&engines, 0, sizeof(engines));
+
+ igt_require(__gem_context_set_param(i915, &set.param) == 0);
+
+ for (int pass = 0; pass < 2; pass++) { /* cloning default, then child */
+ igt_debug("Cloning %d\n", ext.clone_id);
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+
+ /* Check that we cloned the engine map */
+ set.param.ctx_id = ext.clone_id;
+ set.param.size = sizeof(expected);
+ set.param.value = to_user_pointer(&expected);
+ gem_context_get_param(i915, &set.param);
+ ex_size = set.param.size;
+
+ set.param.ctx_id = create.ctx_id;
+ set.param.size = sizeof(engines);
+ set.param.value = to_user_pointer(&engines);
+ gem_context_get_param(i915, &set.param);
+
+ igt_assert_eq_u64(set.param.param, I915_CONTEXT_PARAM_ENGINES);
+ igt_assert_eq_u64(set.param.size, ex_size);
+ igt_assert(!memcmp(&engines, &expected, ex_size));
+
+ gem_context_destroy(i915, create.ctx_id);
+
+ /* Check that the clone will replace an earlier set */
+ expected.engines[0].engine_class =
+ I915_ENGINE_CLASS_INVALID;
+ expected.engines[0].engine_instance =
+ I915_ENGINE_CLASS_INVALID_NONE;
+ ex_size = (sizeof(struct i915_context_param_engines) +
+ sizeof(expected.engines[0]));
+
+ set.param.ctx_id = ext.clone_id;
+ set.param.size = ex_size;
+ set.param.value = to_user_pointer(&expected);
+ gem_context_set_param(i915, &set.param);
+
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+
+ set.param.ctx_id = create.ctx_id;
+ set.param.size = sizeof(engines);
+ set.param.value = to_user_pointer(&engines);
+ gem_context_get_param(i915, &set.param);
+
+ igt_assert_eq_u64(set.param.size, ex_size);
+ igt_assert(!memcmp(&engines, &expected, ex_size));
+
+ gem_context_destroy(i915, create.ctx_id);
+
+ /* clone but then reset engines to default */
+ set.param.ctx_id = 0;
+ set.param.size = 0;
+ set.param.value = 0;
+ ext.base.next_extension = to_user_pointer(&set);
+
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+ ext.base.next_extension = 0;
+
+ set.param.ctx_id = create.ctx_id;
+ set.param.size = sizeof(engines);
+ set.param.value = to_user_pointer(&engines);
+ gem_context_get_param(i915, &set.param);
+ igt_assert_eq_u64(set.param.size, 0);
+
+ gem_context_destroy(i915, create.ctx_id);
+
+ /* And check we ignore the flag */
+ ext.flags = 0;
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+ ext.flags = I915_CONTEXT_CLONE_ENGINES;
+
+ set.param.ctx_id = create.ctx_id;
+ set.param.size = sizeof(engines);
+ set.param.value = to_user_pointer(&engines);
+ gem_context_get_param(i915, &set.param);
+ igt_assert_eq_u64(set.param.size, 0);
+
+ ext.clone_id = gem_context_create(i915);
+ }
+
+ gem_context_destroy(i915, ext.clone_id);
+}
+
+static void clone_scheduler(int i915)
+{
+ struct drm_i915_gem_context_create_ext_setparam set = {
+ { .name = I915_CONTEXT_CREATE_EXT_SETPARAM },
+ { .param = I915_CONTEXT_PARAM_PRIORITY },
+ };
+ struct drm_i915_gem_context_create_ext_clone ext = {
+ { .name = I915_CONTEXT_CREATE_EXT_CLONE },
+ .flags = I915_CONTEXT_CLONE_SCHEDATTR,
+ };
+ struct drm_i915_gem_context_create_ext create = {
+ .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ .extensions = to_user_pointer(&ext),
+ };
+ int expected;
+
+ igt_require(__gem_context_set_param(i915, &set.param) == 0);
+
+ for (int pass = 0; pass < 2; pass++) { /* cloning default, then child */
+ igt_debug("Cloning %d\n", ext.clone_id);
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+
+ set.param.ctx_id = ext.clone_id;
+ gem_context_get_param(i915, &set.param);
+ expected = set.param.value;
+
+ set.param.ctx_id = create.ctx_id;
+ gem_context_get_param(i915, &set.param);
+
+ igt_assert_eq_u64(set.param.param, I915_CONTEXT_PARAM_PRIORITY);
+ igt_assert_eq((int)set.param.value, expected);
+
+ gem_context_destroy(i915, create.ctx_id);
+
+ expected = set.param.value = 1;
+ set.param.ctx_id = ext.clone_id;
+ gem_context_set_param(i915, &set.param);
+
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+
+ set.param.ctx_id = create.ctx_id;
+ gem_context_get_param(i915, &set.param);
+
+ igt_assert_eq_u64(set.param.param, I915_CONTEXT_PARAM_PRIORITY);
+ igt_assert_eq((int)set.param.value, expected);
+
+ gem_context_destroy(i915, create.ctx_id);
+
+ /* clone but then reset priority to default */
+ set.param.ctx_id = 0;
+ set.param.value = 0;
+ ext.base.next_extension = to_user_pointer(&set);
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+ ext.base.next_extension = 0;
+
+ set.param.ctx_id = create.ctx_id;
+ gem_context_get_param(i915, &set.param);
+ igt_assert_eq_u64(set.param.value, 0);
+
+ set.param.ctx_id = ext.clone_id;
+ gem_context_get_param(i915, &set.param);
+ igt_assert_eq_u64(set.param.value, 1);
+
+ gem_context_destroy(i915, create.ctx_id);
+ ext.clone_id = gem_context_create(i915);
+ }
+
+ gem_context_destroy(i915, ext.clone_id);
+}
+
+static uint32_t __batch_create(int i915, uint32_t offset)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ uint32_t handle;
+
+ handle = gem_create(i915, ALIGN(offset + 4, 4096));
+ gem_write(i915, handle, offset, &bbe, sizeof(bbe));
+
+ return handle;
+}
+
+static uint32_t batch_create(int i915)
+{
+ return __batch_create(i915, 0);
+}
+
+static void check_same_vm(int i915, uint32_t ctx_a, uint32_t ctx_b)
+{
+ struct drm_i915_gem_exec_object2 batch = {
+ .handle = batch_create(i915),
+ };
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ };
+
+ /* First verify that we try to use "softpinning" by default */
+ batch.offset = 48 << 20;
+ eb.rsvd1 = ctx_a;
+ gem_execbuf(i915, &eb);
+ igt_assert_eq_u64(batch.offset, 48 << 20);
+
+ /* An already active VMA will try to keep its offset */
+ batch.offset = 0;
+ eb.rsvd1 = ctx_b;
+ gem_execbuf(i915, &eb);
+ igt_assert_eq_u64(batch.offset, 48 << 20);
+
+ gem_sync(i915, batch.handle);
+ gem_close(i915, batch.handle);
+
+ gem_quiescent_gpu(i915); /* evict the vma */
+}
+
+static void clone_vm(int i915)
+{
+ struct drm_i915_gem_context_param set = {
+ .param = I915_CONTEXT_PARAM_VM,
+ };
+ struct drm_i915_gem_context_create_ext_clone ext = {
+ { .name = I915_CONTEXT_CREATE_EXT_CLONE },
+ .flags = I915_CONTEXT_CLONE_VM,
+ };
+ struct drm_i915_gem_context_create_ext create = {
+ .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ .extensions = to_user_pointer(&ext),
+ };
+ uint32_t vm_id[2];
+
+ igt_require(__gem_context_set_param(i915, &set) == -ENOENT);
+
+ /* Scrub the VM for our tests */
+ i915 = gem_reopen_driver(i915);
+
+ set.ctx_id = gem_context_create(i915);
+ gem_context_get_param(i915, &set);
+ vm_id[0] = set.value;
+ gem_context_destroy(i915, set.ctx_id);
+
+ vm_id[1] = gem_vm_create(i915);
+
+ for (int pass = 0; pass < 2; pass++) { /* cloning default, then child */
+ igt_debug("Cloning %d\n", ext.clone_id);
+
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+ check_same_vm(i915, ext.clone_id, create.ctx_id);
+ gem_context_destroy(i915, create.ctx_id);
+
+ set.value = vm_id[pass];
+ set.ctx_id = ext.clone_id;
+ gem_context_set_param(i915, &set);
+
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+ check_same_vm(i915, ext.clone_id, create.ctx_id);
+ gem_context_destroy(i915, create.ctx_id);
+
+ ext.clone_id = gem_context_create(i915);
+ }
+
+ gem_context_destroy(i915, ext.clone_id);
+
+ for (int i = 0; i < ARRAY_SIZE(vm_id); i++)
+ gem_vm_destroy(i915, vm_id[i]);
+
+ close(i915);
+}
+
+igt_main
+{
+ int i915 = -1;
+
+ igt_fixture {
+ i915 = drm_open_driver(DRIVER_INTEL);
+ igt_require_gem(i915);
+ gem_require_contexts(i915);
+
+ igt_require(has_ctx_clone(i915));
+ igt_fork_hang_detector(i915);
+ }
+
+ igt_subtest("invalid")
+ invalid_clone(i915);
+
+ igt_subtest("engines")
+ clone_engines(i915);
+
+ igt_subtest("flags")
+ clone_flags(i915);
+
+ igt_subtest("scheduler")
+ clone_scheduler(i915);
+
+ igt_subtest("vm")
+ clone_vm(i915);
+
+ igt_fixture {
+ igt_stop_hang_detector();
+ close(i915);
+ }
+}
diff --git a/tests/i915/gem_ctx_create.c b/tests/i915/gem_ctx_create.c
index a664070d..1e2c40c4 100644
--- a/tests/i915/gem_ctx_create.c
+++ b/tests/i915/gem_ctx_create.c
@@ -33,6 +33,7 @@
#include <time.h>
#include "igt_rand.h"
+#include "sw_sync.h"
#define LOCAL_I915_EXEC_BSD_SHIFT (13)
#define LOCAL_I915_EXEC_BSD_MASK (3 << LOCAL_I915_EXEC_BSD_SHIFT)
@@ -45,12 +46,33 @@ static unsigned all_nengine;
static unsigned ppgtt_engines[16];
static unsigned ppgtt_nengine;
-static int __gem_context_create_local(int fd, struct drm_i915_gem_context_create *arg)
+static int create_ioctl(int fd, struct drm_i915_gem_context_create *arg)
{
- int ret = 0;
- if (drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, arg))
- ret = -errno;
- return ret;
+ int err;
+
+ err = 0;
+ if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, arg)) {
+ err = -errno;
+ igt_assume(err);
+ }
+
+ errno = 0;
+ return err;
+}
+
+static int create_ext_ioctl(int i915,
+ struct drm_i915_gem_context_create_ext *arg)
+{
+ int err;
+
+ err = 0;
+ if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, arg)) {
+ err = -errno;
+ igt_assume(err);
+ }
+
+ errno = 0;
+ return err;
}
static double elapsed(const struct timespec *start,
@@ -308,6 +330,196 @@ static void maximum(int fd, int ncpus, unsigned mode)
free(contexts);
}
+static void basic_ext_param(int i915)
+{
+ struct drm_i915_gem_context_create_ext_setparam ext = {
+ { .name = I915_CONTEXT_CREATE_EXT_SETPARAM },
+ };
+ struct drm_i915_gem_context_create_ext create = {
+ .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS
+ };
+ struct drm_i915_gem_context_param get;
+
+ igt_require(create_ext_ioctl(i915, &create) == 0);
+ gem_context_destroy(i915, create.ctx_id);
+
+ create.extensions = -1ull;
+ igt_assert_eq(create_ext_ioctl(i915, &create), -EFAULT);
+
+ create.extensions = to_user_pointer(&ext);
+ igt_assert_eq(create_ext_ioctl(i915, &create), -EINVAL);
+
+ ext.param.param = I915_CONTEXT_PARAM_PRIORITY;
+ if (create_ext_ioctl(i915, &create) != -ENODEV) {
+ gem_context_destroy(i915, create.ctx_id);
+
+ ext.base.next_extension = -1ull;
+ igt_assert_eq(create_ext_ioctl(i915, &create), -EFAULT);
+ ext.base.next_extension = to_user_pointer(&ext);
+ igt_assert_eq(create_ext_ioctl(i915, &create), -E2BIG);
+ ext.base.next_extension = 0;
+
+ ext.param.value = 32;
+ igt_assert_eq(create_ext_ioctl(i915, &create), 0);
+
+ memset(&get, 0, sizeof(get));
+ get.ctx_id = create.ctx_id;
+ get.param = I915_CONTEXT_PARAM_PRIORITY;
+ gem_context_get_param(i915, &get);
+ igt_assert_eq(get.value, ext.param.value);
+
+ gem_context_destroy(i915, create.ctx_id);
+
+ /* Having demonstrated a valid setup, check a few invalids */
+ ext.param.ctx_id = 1;
+ igt_assert_eq(create_ext_ioctl(i915, &create), -EINVAL);
+ ext.param.ctx_id = create.ctx_id;
+ igt_assert_eq(create_ext_ioctl(i915, &create), -EINVAL);
+ ext.param.ctx_id = -1;
+ igt_assert_eq(create_ext_ioctl(i915, &create), -EINVAL);
+ ext.param.ctx_id = 0;
+ }
+}
+
+static void check_single_timeline(int i915, uint32_t ctx, int num_engines)
+{
+#define RCS_TIMESTAMP (0x2000 + 0x358)
+ const int gen = intel_gen(intel_get_drm_devid(i915));
+ const int has_64bit_reloc = gen >= 8;
+ struct drm_i915_gem_exec_object2 results = { .handle = gem_create(i915, 4096) };
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ int timeline = sw_sync_timeline_create();
+ uint32_t last, *map;
+
+ {
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&results),
+ .buffer_count = 1,
+ .rsvd1 = ctx,
+ };
+ gem_write(i915, results.handle, 0, &bbe, sizeof(bbe));
+ gem_execbuf(i915, &execbuf);
+ results.flags = EXEC_OBJECT_PINNED;
+ }
+
+ for (int i = 0; i < num_engines; i++) {
+ struct drm_i915_gem_exec_object2 obj[2] = {
+ results, /* write hazard lies! */
+ { .handle = gem_create(i915, 4096) },
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(obj),
+ .buffer_count = 2,
+ .rsvd1 = ctx,
+ .rsvd2 = sw_sync_timeline_create_fence(timeline, num_engines - i),
+ .flags = i | I915_EXEC_FENCE_IN,
+ };
+ uint64_t offset = results.offset + 4 * i;
+ uint32_t *cs;
+ int j = 0;
+
+ cs = gem_mmap__cpu(i915, obj[1].handle, 0, 4096, PROT_WRITE);
+
+ cs[j] = 0x24 << 23 | 1; /* SRM */
+ if (has_64bit_reloc)
+ cs[j]++;
+ j++;
+ cs[j++] = RCS_TIMESTAMP;
+ cs[j++] = offset;
+ if (has_64bit_reloc)
+ cs[j++] = offset >> 32;
+ cs[j++] = MI_BATCH_BUFFER_END;
+
+ munmap(cs, 4096);
+
+ gem_execbuf(i915, &execbuf);
+ gem_close(i915, obj[1].handle);
+ close(execbuf.rsvd2);
+ }
+ close(timeline);
+ gem_sync(i915, results.handle);
+
+ map = gem_mmap__cpu(i915, results.handle, 0, 4096, PROT_READ);
+ gem_set_domain(i915, results.handle, I915_GEM_DOMAIN_CPU, 0);
+ gem_close(i915, results.handle);
+
+ last = map[0];
+ for (int i = 1; i < num_engines; i++) {
+ igt_assert_f((map[i] - last) > 0,
+ "Engine instance [%d] executed too early: this:%x, last:%x\n",
+ i, map[i], last);
+ last = map[i];
+ }
+ munmap(map, 4096);
+}
+
+static void iris_pipeline(int i915)
+{
+#ifdef I915_DEFINE_CONTEXT_PARAM_ENGINES
+#define RCS0 {0, 0}
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
+ .engines = { RCS0, RCS0 }
+ };
+ struct drm_i915_gem_context_create_ext_setparam p_engines = {
+ .base = {
+ .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ .next_extension = 0, /* end of chain */
+ },
+ .param = {
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ .size = sizeof(engines),
+ },
+ };
+ struct drm_i915_gem_context_create_ext_setparam p_recover = {
+ .base = {
+ .name =I915_CONTEXT_CREATE_EXT_SETPARAM,
+ .next_extension = to_user_pointer(&p_engines),
+ },
+ .param = {
+ .param = I915_CONTEXT_PARAM_RECOVERABLE,
+ .value = 0,
+ },
+ };
+ struct drm_i915_gem_context_create_ext_setparam p_prio = {
+ .base = {
+ .name =I915_CONTEXT_CREATE_EXT_SETPARAM,
+ .next_extension = to_user_pointer(&p_recover),
+ },
+ .param = {
+ .param = I915_CONTEXT_PARAM_PRIORITY,
+ .value = 768,
+ },
+ };
+ struct drm_i915_gem_context_create_ext create = {
+ .flags = (I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE |
+ I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS),
+ };
+ struct drm_i915_gem_context_param get;
+
+ igt_require(create_ext_ioctl(i915, &create) == 0);
+
+ create.extensions = to_user_pointer(&p_prio);
+ igt_assert_eq(create_ext_ioctl(i915, &create), 0);
+
+ memset(&get, 0, sizeof(get));
+ get.ctx_id = create.ctx_id;
+ get.param = I915_CONTEXT_PARAM_PRIORITY;
+ gem_context_get_param(i915, &get);
+ igt_assert_eq(get.value, p_prio.param.value);
+
+ memset(&get, 0, sizeof(get));
+ get.ctx_id = create.ctx_id;
+ get.param = I915_CONTEXT_PARAM_RECOVERABLE;
+ gem_context_get_param(i915, &get);
+ igt_assert_eq(get.value, 0);
+
+ check_single_timeline(i915, create.ctx_id, 2);
+
+ gem_context_destroy(i915, create.ctx_id);
+#endif /* I915_DEFINE_CONTEXT_PARAM_ENGINES */
+}
+
igt_main
{
const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
@@ -340,17 +552,15 @@ igt_main
memset(&create, 0, sizeof(create));
create.ctx_id = rand();
create.pad = 0;
- igt_assert_eq(__gem_context_create_local(fd, &create), 0);
+ igt_assert_eq(create_ioctl(fd, &create), 0);
igt_assert(create.ctx_id != 0);
gem_context_destroy(fd, create.ctx_id);
}
- igt_subtest("invalid-pad") {
- memset(&create, 0, sizeof(create));
- create.ctx_id = rand();
- create.pad = 1;
- igt_assert_eq(__gem_context_create_local(fd, &create), -EINVAL);
- }
+ igt_subtest("ext-param")
+ basic_ext_param(fd);
+ igt_subtest("iris-pipeline")
+ iris_pipeline(fd);
igt_subtest("maximum-mem")
maximum(fd, ncpus, CHECK_RAM);
diff --git a/tests/i915/gem_ctx_engines.c b/tests/i915/gem_ctx_engines.c
new file mode 100644
index 00000000..8c66fb26
--- /dev/null
+++ b/tests/i915/gem_ctx_engines.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "igt.h"
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+
+#include <drm.h>
+
+#include "i915/gem_context.h"
+#include "sw_sync.h"
+
+#define engine_class(e, n) ((e)->engines[(n)].engine_class)
+#define engine_instance(e, n) ((e)->engines[(n)].engine_instance)
+
+static bool has_context_engines(int i915)
+{
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = 0,
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ };
+ return __gem_context_set_param(i915, &param) == 0;
+}
+
+static void invalid_engines(int i915)
+{
+ struct i915_context_param_engines stack = {}, *engines;
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = gem_context_create(i915),
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&stack),
+ };
+ uint32_t handle;
+ void *ptr;
+
+ param.size = 0;
+ igt_assert_eq(__gem_context_set_param(i915, &param), 0);
+
+ param.size = 1;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EINVAL);
+
+ param.size = sizeof(stack) - 1;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EINVAL);
+
+ param.size = sizeof(stack) + 1;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EINVAL);
+
+ param.size = 0;
+ igt_assert_eq(__gem_context_set_param(i915, &param), 0);
+
+ /* Create a single page surrounded by inaccessible nothingness */
+ ptr = mmap(NULL, 3 * 4096, PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
+ igt_assert(ptr != MAP_FAILED);
+
+ munmap(ptr, 4096);
+ engines = ptr + 4096;
+ munmap(ptr + 2 *4096, 4096);
+
+ param.size = sizeof(*engines) + sizeof(*engines->engines);
+ param.value = to_user_pointer(engines);
+
+ engines->engines[0].engine_class = -1;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -ENOENT);
+
+ mprotect(engines, 4096, PROT_READ);
+ igt_assert_eq(__gem_context_set_param(i915, &param), -ENOENT);
+
+ mprotect(engines, 4096, PROT_WRITE);
+ engines->engines[0].engine_class = 0;
+ if (__gem_context_set_param(i915, &param)) /* XXX needs RCS */
+ goto out;
+
+ engines->extensions = to_user_pointer(ptr);
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ engines->extensions = 0;
+ igt_assert_eq(__gem_context_set_param(i915, &param), 0);
+
+ param.value = to_user_pointer(engines - 1);
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ param.value = to_user_pointer(engines) - 1;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ param.value = to_user_pointer(engines) - param.size + 1;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ param.value = to_user_pointer(engines) + 4096;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ param.value = to_user_pointer(engines) - param.size + 4096;
+ igt_assert_eq(__gem_context_set_param(i915, &param), 0);
+
+ param.value = to_user_pointer(engines) - param.size + 4096 + 1;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ param.value = to_user_pointer(engines) + 4096;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ param.value = to_user_pointer(engines) + 4096 - 1;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ param.value = to_user_pointer(engines) - 1;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ param.value = to_user_pointer(engines - 1);
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ param.value = to_user_pointer(engines - 1) + 4096;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ param.value = to_user_pointer(engines - 1) + 4096 - sizeof(*engines->engines) / 2;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ handle = gem_create(i915, 4096 * 3);
+ ptr = gem_mmap__gtt(i915, handle, 4096 * 3, PROT_READ);
+ gem_close(i915, handle);
+
+ munmap(ptr, 4096);
+ munmap(ptr + 8192, 4096);
+
+ param.value = to_user_pointer(ptr + 4096);
+ igt_assert_eq(__gem_context_set_param(i915, &param), 0);
+
+ param.value = to_user_pointer(ptr);
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ param.value = to_user_pointer(ptr) + 4095;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ param.value = to_user_pointer(ptr) + 8192;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ param.value = to_user_pointer(ptr) + 12287;
+ igt_assert_eq(__gem_context_set_param(i915, &param), -EFAULT);
+
+ munmap(ptr + 4096, 4096);
+
+out:
+ munmap(engines, 4096);
+ gem_context_destroy(i915, param.ctx_id);
+}
+
+static void idempotent(int i915)
+{
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines , I915_EXEC_RING_MASK + 1);
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(expected , I915_EXEC_RING_MASK + 1);
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = gem_context_create(i915),
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ .size = sizeof(engines),
+ };
+ const size_t base = sizeof(struct i915_context_param_engines);
+ const struct intel_execution_engine2 *e;
+ int idx;
+
+ /* What goes in, must come out. And what comes out, must go in */
+
+ gem_context_get_param(i915, &p);
+ igt_assert_eq(p.size, 0); /* atm default is to use legacy ring mask */
+
+ idx = 0;
+ memset(&engines, 0, sizeof(engines));
+ __for_each_physical_engine(i915, e) {
+ engines.engines[idx].engine_class = e->class;
+ engines.engines[idx].engine_instance = e->instance;
+ idx++;
+ }
+ idx *= sizeof(*engines.engines);
+ p.size = base + idx;
+ gem_context_set_param(i915, &p);
+
+ memcpy(&expected, &engines, sizeof(expected));
+
+ gem_context_get_param(i915, &p);
+ igt_assert_eq(p.size, base + idx);
+ igt_assert(!memcmp(&expected, &engines, idx));
+
+ p.size = base;
+ gem_context_set_param(i915, &p);
+ gem_context_get_param(i915, &p);
+ igt_assert_eq(p.size, base);
+
+ /* and it should not have overwritten the previous contents */
+ igt_assert(!memcmp(&expected, &engines, idx));
+
+ memset(&engines, 0, sizeof(engines));
+ engines.engines[0].engine_class = I915_ENGINE_CLASS_INVALID;
+ engines.engines[0].engine_instance = I915_ENGINE_CLASS_INVALID_NONE;
+ idx = sizeof(*engines.engines);
+ p.size = base + idx;
+ gem_context_set_param(i915, &p);
+
+ memcpy(&expected, &engines, sizeof(expected));
+
+ gem_context_get_param(i915, &p);
+ igt_assert_eq(p.size, base + idx);
+ igt_assert(!memcmp(&expected, &engines, idx));
+
+ memset(&engines, 0, sizeof(engines));
+ p.size = sizeof(engines);
+ gem_context_set_param(i915, &p);
+
+ memcpy(&expected, &engines, sizeof(expected));
+
+ gem_context_get_param(i915, &p);
+ igt_assert_eq(p.size, sizeof(engines));
+ igt_assert(!memcmp(&expected, &engines, idx));
+
+ gem_context_destroy(i915, p.ctx_id);
+}
+
+static void execute_one(int i915)
+{
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines , I915_EXEC_RING_MASK + 1);
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = gem_context_create(i915),
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ /* .size to be filled in later */
+ };
+ struct drm_i915_gem_exec_object2 obj = {
+ .handle = gem_create(i915, 4096),
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .rsvd1 = param.ctx_id,
+ };
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ const struct intel_execution_engine2 *e;
+
+ gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe));
+
+ /* Unadulterated I915_EXEC_DEFAULT should work */
+ execbuf.flags = 0;
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, obj.handle);
+
+ __for_each_physical_engine(i915, e) {
+ struct drm_i915_gem_busy busy = { .handle = obj.handle };
+
+ for (int i = -1; i <= I915_EXEC_RING_MASK; i++) {
+ igt_spin_t *spin;
+
+ memset(&engines, 0, sizeof(engines));
+ engine_class(&engines, 0) = e->class;
+ engine_instance(&engines, 0) = e->instance;
+ param.size = offsetof(typeof(engines), engines[1]);
+ gem_context_set_param(i915, &param);
+
+ spin = igt_spin_new(i915,
+ .ctx = param.ctx_id,
+ .engine = 0,
+ .flags = (IGT_SPIN_NO_PREEMPTION |
+ IGT_SPIN_POLL_RUN));
+
+ igt_debug("Testing with map of %d engines\n", i + 1);
+ memset(&engines.engines, -1, sizeof(engines.engines));
+ if (i != -1) {
+ engine_class(&engines, i) = e->class;
+ engine_instance(&engines, i) = e->instance;
+ }
+ param.size = sizeof(uint64_t) + (i + 1) * sizeof(uint32_t);
+ gem_context_set_param(i915, &param);
+
+ igt_spin_busywait_until_started(spin);
+ for (int j = 0; j <= I915_EXEC_RING_MASK; j++) {
+ int expected = j == i ? 0 : -EINVAL;
+
+ execbuf.flags = j;
+ igt_assert_f(__gem_execbuf(i915, &execbuf) == expected,
+ "Failed to report the %s engine for slot %d (valid at %d)\n",
+ j == i ? "valid" : "invalid", j, i);
+ }
+
+ do_ioctl(i915, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ igt_assert_eq(busy.busy, i != -1 ? 1 << (e->class + 16) : 0);
+
+ igt_spin_free(i915, spin);
+
+ gem_sync(i915, obj.handle);
+ do_ioctl(i915, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ igt_assert_eq(busy.busy, 0);
+ }
+ }
+
+ /* Restore the defaults and check I915_EXEC_DEFAULT works again. */
+ param.size = 0;
+ gem_context_set_param(i915, &param);
+ execbuf.flags = 0;
+ gem_execbuf(i915, &execbuf);
+
+ gem_close(i915, obj.handle);
+ gem_context_destroy(i915, param.ctx_id);
+}
+
+static void execute_oneforall(int i915)
+{
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines , I915_EXEC_RING_MASK + 1);
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = gem_context_create(i915),
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ .size = sizeof(engines),
+ };
+ const struct intel_execution_engine2 *e;
+
+ __for_each_physical_engine(i915, e) {
+ memset(&engines, 0, sizeof(engines));
+ for (int i = 0; i <= I915_EXEC_RING_MASK; i++) {
+ engine_class(&engines, i) = e->class;
+ engine_instance(&engines, i) = e->instance;
+ }
+ gem_context_set_param(i915, &param);
+
+ for (int i = 0; i <= I915_EXEC_RING_MASK; i++) {
+ struct drm_i915_gem_busy busy = {};
+ igt_spin_t *spin;
+
+ spin = __igt_spin_new(i915,
+ .ctx = param.ctx_id,
+ .engine = i);
+
+ busy.handle = spin->handle;
+ do_ioctl(i915, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ igt_assert_eq(busy.busy, 1 << (e->class + 16));
+
+ igt_spin_free(i915, spin);
+ }
+ }
+
+ gem_context_destroy(i915, param.ctx_id);
+}
+
+static void execute_allforone(int i915)
+{
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines , I915_EXEC_RING_MASK + 1);
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = gem_context_create(i915),
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ };
+ const struct intel_execution_engine2 *e;
+ int i;
+
+ i = 0;
+ memset(&engines, 0, sizeof(engines));
+ __for_each_physical_engine(i915, e) {
+ engine_class(&engines, i) = e->class;
+ engine_instance(&engines, i) = e->instance;
+ i++;
+ }
+ param.size = sizeof(uint64_t) + i * sizeof(uint32_t);
+ gem_context_set_param(i915, &param);
+
+ i = 0;
+ __for_each_physical_engine(i915, e) {
+ struct drm_i915_gem_busy busy = {};
+ igt_spin_t *spin;
+
+ spin = __igt_spin_new(i915,
+ .ctx = param.ctx_id,
+ .engine = i++);
+
+ busy.handle = spin->handle;
+ do_ioctl(i915, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ igt_assert_eq(busy.busy, 1 << (e->class + 16));
+
+ igt_spin_free(i915, spin);
+ }
+
+ gem_context_destroy(i915, param.ctx_id);
+}
+
+static void independent(int i915)
+{
+#define RCS_TIMESTAMP (0x2000 + 0x358)
+ const int gen = intel_gen(intel_get_drm_devid(i915));
+ const int has_64bit_reloc = gen >= 8;
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines , I915_EXEC_RING_MASK + 1);
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = gem_context_create(i915),
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ .size = sizeof(engines),
+ };
+ struct drm_i915_gem_exec_object2 results = { .handle = gem_create(i915, 4096) };
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ int timeline = sw_sync_timeline_create();
+ uint32_t last, *map;
+
+ igt_require(gen >= 6); /* No per-engine TIMESTAMP on older gen */
+
+ {
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&results),
+ .buffer_count = 1,
+ .rsvd1 = param.ctx_id,
+ };
+ gem_write(i915, results.handle, 0, &bbe, sizeof(bbe));
+ gem_execbuf(i915, &execbuf);
+ results.flags = EXEC_OBJECT_PINNED;
+ }
+
+ memset(&engines, 0, sizeof(engines)); /* All rcs0 */
+ gem_context_set_param(i915, &param);
+
+ for (int i = 0; i < I915_EXEC_RING_MASK + 1; i++) {
+ struct drm_i915_gem_exec_object2 obj[2] = {
+ results, /* write hazard lies! */
+ { .handle = gem_create(i915, 4096) },
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(obj),
+ .buffer_count = 2,
+ .rsvd1 = param.ctx_id,
+ .rsvd2 = sw_sync_timeline_create_fence(timeline, i + 1),
+ .flags = (I915_EXEC_RING_MASK - i) | I915_EXEC_FENCE_IN,
+ };
+ uint64_t offset = results.offset + 4 * i;
+ uint32_t *cs;
+ int j = 0;
+
+ cs = gem_mmap__cpu(i915, obj[1].handle, 0, 4096, PROT_WRITE);
+
+ cs[j] = 0x24 << 23 | 1; /* SRM */
+ if (has_64bit_reloc)
+ cs[j]++;
+ j++;
+ cs[j++] = RCS_TIMESTAMP;
+ cs[j++] = offset;
+ if (has_64bit_reloc)
+ cs[j++] = offset >> 32;
+ cs[j++] = MI_BATCH_BUFFER_END;
+
+ munmap(cs, 4096);
+
+ gem_execbuf(i915, &execbuf);
+ gem_close(i915, obj[1].handle);
+ close(execbuf.rsvd2);
+ }
+ close(timeline);
+ gem_sync(i915, results.handle);
+
+ map = gem_mmap__cpu(i915, results.handle, 0, 4096, PROT_READ);
+ gem_set_domain(i915, results.handle, I915_GEM_DOMAIN_CPU, 0);
+ gem_close(i915, results.handle);
+
+ last = map[0];
+ for (int i = 1; i < I915_EXEC_RING_MASK + 1; i++) {
+ igt_assert_f((map[i] - last) > 0,
+ "Engine instance [%d] executed too late\n", i);
+ last = map[i];
+ }
+ munmap(map, 4096);
+
+ gem_context_destroy(i915, param.ctx_id);
+}
+
+igt_main
+{
+ int i915 = -1;
+
+ igt_fixture {
+ i915 = drm_open_driver_render(DRIVER_INTEL);
+ igt_require_gem(i915);
+
+ gem_require_contexts(i915);
+ igt_require(has_context_engines(i915));
+ }
+
+ igt_subtest("invalid-engines")
+ invalid_engines(i915);
+
+ igt_subtest("idempotent")
+ idempotent(i915);
+
+ igt_subtest("execute-one")
+ execute_one(i915);
+
+ igt_subtest("execute-oneforall")
+ execute_oneforall(i915);
+
+ igt_subtest("execute-allforone")
+ execute_allforone(i915);
+
+ igt_subtest("independent")
+ independent(i915);
+}
diff --git a/tests/i915/gem_ctx_exec.c b/tests/i915/gem_ctx_exec.c
index b8e0e074..614a9f40 100644
--- a/tests/i915/gem_ctx_exec.c
+++ b/tests/i915/gem_ctx_exec.c
@@ -111,7 +111,8 @@ static void big_exec(int fd, uint32_t handle, int ring)
gem_sync(fd, handle);
}
-static void invalid_context(int fd, unsigned ring, uint32_t handle)
+static void invalid_context(int fd, const struct intel_execution_engine2 *e,
+ uint32_t handle)
{
struct drm_i915_gem_exec_object2 obj = {
.handle = handle,
@@ -119,7 +120,7 @@ static void invalid_context(int fd, unsigned ring, uint32_t handle)
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
- .flags = ring,
+ .flags = e->flags,
};
unsigned int i;
uint32_t ctx;
@@ -198,7 +199,7 @@ static void norecovery(int i915)
igt_main
{
const uint32_t batch[2] = { 0, MI_BATCH_BUFFER_END };
- const struct intel_execution_engine *e;
+ const struct intel_execution_engine2 *e;
uint32_t handle;
uint32_t ctx_id;
int fd;
@@ -228,12 +229,9 @@ igt_main
gem_sync(fd, handle);
}
- for (e = intel_execution_engines; e->name; e++) {
- igt_subtest_f("basic-invalid-context-%s", e->name) {
- gem_require_ring(fd, e->exec_id | e->flags);
- invalid_context(fd, e->exec_id | e->flags, handle);
- }
- }
+ __for_each_physical_engine(fd, e)
+ igt_subtest_f("basic-invalid-context-%s", e->name)
+ invalid_context(fd, e, handle);
igt_subtest("eviction")
big_exec(fd, handle, 0);
diff --git a/tests/i915/gem_ctx_isolation.c b/tests/i915/gem_ctx_isolation.c
index bcd0f481..5b054c81 100644
--- a/tests/i915/gem_ctx_isolation.c
+++ b/tests/i915/gem_ctx_isolation.c
@@ -321,8 +321,7 @@ static uint32_t read_regs(int fd,
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
- execbuf.flags =
- gem_class_instance_to_eb_flags(fd, e->class, e->instance);
+ execbuf.flags = e->flags;
execbuf.rsvd1 = ctx;
gem_execbuf(fd, &execbuf);
gem_close(fd, obj[1].handle);
@@ -377,8 +376,7 @@ static void write_regs(int fd,
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
- execbuf.flags =
- gem_class_instance_to_eb_flags(fd, e->class, e->instance);
+ execbuf.flags = e->flags;
execbuf.rsvd1 = ctx;
gem_execbuf(fd, &execbuf);
gem_close(fd, obj.handle);
@@ -448,8 +446,7 @@ static void restore_regs(int fd,
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
- execbuf.flags =
- gem_class_instance_to_eb_flags(fd, e->class, e->instance);
+ execbuf.flags = e->flags;
execbuf.rsvd1 = ctx;
gem_execbuf(fd, &execbuf);
gem_close(fd, obj[1].handle);
@@ -559,8 +556,7 @@ static void nonpriv(int fd,
0x0505c0c0,
0xdeadbeef
};
- unsigned int engine =
- gem_class_instance_to_eb_flags(fd, e->class, e->instance);
+ unsigned int engine = e->flags;
unsigned int num_values = ARRAY_SIZE(values);
/* Sigh -- hsw: we need cmdparser access to our own registers! */
@@ -616,9 +612,7 @@ static void isolation(int fd,
0xaaaaaaaa,
0xdeadbeef
};
- unsigned int engine = gem_class_instance_to_eb_flags(fd,
- e->class,
- e->instance);
+ unsigned int engine = e->flags;
unsigned int num_values =
flags & (DIRTY1 | DIRTY2) ? ARRAY_SIZE(values) : 1;
@@ -729,8 +723,7 @@ static void preservation(int fd,
0xdeadbeef
};
const unsigned int num_values = ARRAY_SIZE(values);
- unsigned int engine =
- gem_class_instance_to_eb_flags(fd, e->class, e->instance);
+ unsigned int engine = e->flags;
uint32_t ctx[num_values +1 ];
uint32_t regs[num_values + 1][2];
igt_spin_t *spin;
@@ -840,7 +833,7 @@ igt_main
igt_subtest_group {
igt_fixture {
igt_require(has_context_isolation & (1 << e->class));
- gem_require_engine(fd, e->class, e->instance);
+ gem_require_ring(fd, e->flags);
igt_fork_hang_detector(fd);
}
diff --git a/tests/i915/gem_ctx_param.c b/tests/i915/gem_ctx_param.c
index b6f57236..fa0ab520 100644
--- a/tests/i915/gem_ctx_param.c
+++ b/tests/i915/gem_ctx_param.c
@@ -28,6 +28,7 @@
#include <limits.h>
#include "igt.h"
+#include "i915/gem_vm.h"
IGT_TEST_DESCRIPTION("Basic test for context set/get param input validation.");
@@ -36,17 +37,6 @@ IGT_TEST_DESCRIPTION("Basic test for context set/get param input validation.");
#define NEW_CTX BIT(0)
#define USER BIT(1)
-static int reopen_driver(int fd)
-{
- char path[256];
-
- snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
- fd = open(path, O_RDWR);
- igt_assert_lte(0, fd);
-
- return fd;
-}
-
static void set_priority(int i915)
{
static const int64_t test_values[] = {
@@ -91,7 +81,7 @@ static void set_priority(int i915)
igt_permute_array(values, size, igt_exchange_int64);
igt_fork(flags, NEW_CTX | USER) {
- int fd = reopen_driver(i915);
+ int fd = gem_reopen_driver(i915);
struct drm_i915_gem_context_param arg = {
.param = I915_CONTEXT_PARAM_PRIORITY,
.ctx_id = flags & NEW_CTX ? gem_context_create(fd) : 0,
@@ -143,6 +133,105 @@ static void set_priority(int i915)
free(values);
}
+static uint32_t __batch_create(int i915, uint32_t offset)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ uint32_t handle;
+
+ handle = gem_create(i915, ALIGN(offset + 4, 4096));
+ gem_write(i915, handle, offset, &bbe, sizeof(bbe));
+
+ return handle;
+}
+
+static uint32_t batch_create(int i915)
+{
+ return __batch_create(i915, 0);
+}
+
+static void test_vm(int i915)
+{
+ const uint64_t nonzero_offset = 48 << 20;
+ struct drm_i915_gem_exec_object2 batch = {
+ .handle = batch_create(i915),
+ };
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ };
+ struct drm_i915_gem_context_param arg = {
+ .param = I915_CONTEXT_PARAM_VM,
+ };
+ uint32_t parent, child;
+
+ /*
+ * Proving 2 contexts share the same GTT is quite tricky as we have no
+ * means of directly comparing them (each handle returned to userspace
+ * is unique). What we do instead is rely on a quirk of execbuf that
+ * it does not try to move an VMA without good reason, and so that
+ * having used an object in one context, it will have the same address
+ * in the next context that shared the VM.
+ */
+
+ arg.value = -1ull;
+ igt_require(__gem_context_set_param(i915, &arg) == -ENOENT);
+
+ parent = gem_context_create(i915);
+ child = gem_context_create(i915);
+
+ /* Using implicit soft-pinning */
+ eb.rsvd1 = parent;
+ batch.offset = nonzero_offset;
+ gem_execbuf(i915, &eb);
+ igt_assert_eq_u64(batch.offset, nonzero_offset);
+
+ eb.rsvd1 = child;
+ batch.offset = 0;
+ gem_execbuf(i915, &eb);
+ igt_assert_eq_u64(batch.offset, 0);
+
+ eb.rsvd1 = parent;
+ gem_execbuf(i915, &eb);
+ igt_assert_eq_u64(batch.offset, nonzero_offset);
+
+ arg.ctx_id = parent;
+ gem_context_get_param(i915, &arg);
+ gem_context_set_param(i915, &arg);
+
+ /* Still the same VM, so expect the old VMA again */
+ batch.offset = 0;
+ gem_execbuf(i915, &eb);
+ igt_assert_eq_u64(batch.offset, nonzero_offset);
+
+ arg.ctx_id = child;
+ gem_context_set_param(i915, &arg);
+
+ eb.rsvd1 = child;
+ batch.offset = 0;
+ gem_execbuf(i915, &eb);
+ igt_assert_eq_u64(batch.offset, nonzero_offset);
+
+ gem_context_destroy(i915, child);
+ gem_context_destroy(i915, parent);
+
+ /* both contexts destroyed, but we still keep hold of the vm */
+ child = gem_context_create(i915);
+
+ arg.ctx_id = child;
+ gem_context_set_param(i915, &arg);
+
+ eb.rsvd1 = child;
+ batch.offset = 0;
+ gem_execbuf(i915, &eb);
+ igt_assert_eq_u64(batch.offset, nonzero_offset);
+
+ gem_context_destroy(i915, child);
+ gem_vm_destroy(i915, arg.value);
+
+ gem_sync(i915, batch.handle);
+ gem_close(i915, batch.handle);
+}
+
igt_main
{
struct drm_i915_gem_context_param arg;
@@ -253,6 +342,9 @@ igt_main
gem_context_set_param(fd, &arg);
}
+ igt_subtest("vm")
+ test_vm(fd);
+
arg.param = I915_CONTEXT_PARAM_PRIORITY;
igt_subtest("set-priority-not-supported") {
diff --git a/tests/i915/gem_ctx_shared.c b/tests/i915/gem_ctx_shared.c
new file mode 100644
index 00000000..4b1020b9
--- /dev/null
+++ b/tests/i915/gem_ctx_shared.c
@@ -0,0 +1,862 @@
+/*
+ * Copyright © 2017-2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "igt.h"
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+
+#include <drm.h>
+
+#include "igt_rand.h"
+#include "igt_vgem.h"
+#include "sync_file.h"
+
+#define LO 0
+#define HI 1
+#define NOISE 2
+
+#define MAX_PRIO LOCAL_I915_CONTEXT_MAX_USER_PRIORITY
+#define MIN_PRIO LOCAL_I915_CONTEXT_MIN_USER_PRIORITY
+
+static int priorities[] = {
+ [LO] = MIN_PRIO / 2,
+ [HI] = MAX_PRIO / 2,
+};
+
+#define MAX_ELSP_QLEN 16
+
+IGT_TEST_DESCRIPTION("Test shared contexts.");
+
+static void create_shared_gtt(int i915, unsigned int flags)
+#define DETACHED 0x1
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj = {
+ .handle = gem_create(i915, 4096),
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ };
+ uint32_t parent, child;
+
+ gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe));
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, obj.handle);
+
+ child = flags & DETACHED ? gem_context_create(i915) : 0;
+ igt_until_timeout(2) {
+ parent = flags & DETACHED ? child : 0;
+ child = gem_context_clone(i915,
+ parent, I915_CONTEXT_CLONE_VM,
+ 0);
+ execbuf.rsvd1 = child;
+ gem_execbuf(i915, &execbuf);
+
+ if (flags & DETACHED) {
+ gem_context_destroy(i915, parent);
+ gem_execbuf(i915, &execbuf);
+ } else {
+ parent = child;
+ gem_context_destroy(i915, parent);
+ }
+
+ execbuf.rsvd1 = parent;
+ igt_assert_eq(__gem_execbuf(i915, &execbuf), -ENOENT);
+ igt_assert_eq(__gem_context_clone(i915,
+ parent, I915_CONTEXT_CLONE_VM,
+ 0, &parent), -ENOENT);
+ }
+ if (flags & DETACHED)
+ gem_context_destroy(i915, child);
+
+ gem_sync(i915, obj.handle);
+ gem_close(i915, obj.handle);
+}
+
+static void disjoint_timelines(int i915)
+{
+ IGT_CORK_HANDLE(cork);
+ igt_spin_t *spin[2];
+ uint32_t plug, child;
+
+ igt_require(gem_has_execlists(i915));
+
+ /*
+ * Each context, although they share a vm, are expected to be
+ * distinct timelines. A request queued to one context should be
+ * independent of any shared contexts.
+ */
+ child = gem_context_clone(i915, 0, I915_CONTEXT_CLONE_VM, 0);
+ plug = igt_cork_plug(&cork, i915);
+
+ spin[0] = __igt_spin_new(i915, .ctx = 0, .dependency = plug);
+ spin[1] = __igt_spin_new(i915, .ctx = child);
+
+ /* Wait for the second spinner, will hang if stuck behind the first */
+ igt_spin_end(spin[1]);
+ gem_sync(i915, spin[1]->handle);
+
+ igt_cork_unplug(&cork);
+
+ igt_spin_free(i915, spin[1]);
+ igt_spin_free(i915, spin[0]);
+}
+
+static void exhaust_shared_gtt(int i915, unsigned int flags)
+#define EXHAUST_LRC 0x1
+{
+ i915 = gem_reopen_driver(i915);
+
+ igt_fork(pid, 1) {
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj = {
+ .handle = gem_create(i915, 4096)
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ };
+ uint32_t parent, child;
+ unsigned long count = 0;
+ int err;
+
+ gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe));
+
+ child = 0;
+ for (;;) {
+ parent = child;
+ err = __gem_context_clone(i915,
+ parent, I915_CONTEXT_CLONE_VM,
+ 0, &child);
+ if (err)
+ break;
+
+ if (flags & EXHAUST_LRC) {
+ execbuf.rsvd1 = child;
+ err = __gem_execbuf(i915, &execbuf);
+ if (err)
+ break;
+ }
+
+ count++;
+ }
+ gem_sync(i915, obj.handle);
+
+ igt_info("Created %lu shared contexts, before %d (%s)\n",
+ count, err, strerror(-err));
+ }
+ close(i915);
+ igt_waitchildren();
+}
+
+static void exec_shared_gtt(int i915, unsigned int ring)
+{
+ const int gen = intel_gen(intel_get_drm_devid(i915));
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj = {};
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .flags = ring,
+ };
+ uint32_t scratch, *s;
+ uint32_t batch[16];
+ int i;
+
+ gem_require_ring(i915, ring);
+ igt_require(gem_can_store_dword(i915, ring));
+
+ /* Find a hole big enough for both objects later */
+ scratch = gem_create(i915, 16384);
+ gem_write(i915, scratch, 0, &bbe, sizeof(bbe));
+ obj.handle = scratch;
+ gem_execbuf(i915, &execbuf);
+ gem_close(i915, scratch);
+ obj.flags |= EXEC_OBJECT_PINNED; /* reuse this address */
+
+ scratch = gem_create(i915, 4096);
+ s = gem_mmap__cpu(i915, scratch, 0, 4096, PROT_WRITE);
+
+ gem_set_domain(i915, scratch, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+ *s = bbe;
+
+ /* Load object into place in the GTT */
+ obj.handle = scratch;
+ gem_execbuf(i915, &execbuf);
+
+ /* Presume nothing causes an eviction in the meantime! */
+
+ obj.handle = gem_create(i915, 4096);
+
+ i = 0;
+ batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+ if (gen >= 8) {
+ batch[++i] = obj.offset;
+ batch[++i] = 0;
+ } else if (gen >= 4) {
+ batch[++i] = 0;
+ batch[++i] = obj.offset;
+ } else {
+ batch[i]--;
+ batch[++i] = obj.offset;
+ }
+ batch[++i] = 0xc0ffee;
+ batch[++i] = bbe;
+ gem_write(i915, obj.handle, 0, batch, sizeof(batch));
+
+ obj.offset += 8192; /* make sure we don't cause an eviction! */
+ execbuf.rsvd1 = gem_context_clone(i915, 0, I915_CONTEXT_CLONE_VM, 0);
+ if (gen > 3 && gen < 6)
+ execbuf.flags |= I915_EXEC_SECURE;
+
+ gem_execbuf(i915, &execbuf);
+ gem_context_destroy(i915, execbuf.rsvd1);
+ gem_sync(i915, obj.handle); /* write hazard lies */
+ gem_close(i915, obj.handle);
+
+ /*
+ * If we created the new context with the old GTT, the write
+ * into the stale location of scratch will have landed in the right
+ * object. Otherwise, it should read the previous value of
+ * MI_BATCH_BUFFER_END.
+ *
+ * Setting .write = CPU to paper over our write hazard lies above.
+ */
+ gem_set_domain(i915, scratch, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+ igt_assert_eq_u32(*s, 0xc0ffee);
+
+ munmap(s, 4096);
+ gem_close(i915, scratch);
+}
+
+static int nop_sync(int i915, uint32_t ctx, unsigned int ring, int64_t timeout)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj = {
+ .handle = gem_create(i915, 4096),
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .flags = ring,
+ .rsvd1 = ctx,
+ };
+ int err;
+
+ gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe));
+ gem_execbuf(i915, &execbuf);
+ err = gem_wait(i915, obj.handle, &timeout);
+ gem_close(i915, obj.handle);
+
+ return err;
+}
+
+static bool has_single_timeline(int i915)
+{
+ uint32_t ctx;
+
+ __gem_context_clone(i915, 0, 0,
+ I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE,
+ &ctx);
+ if (ctx)
+ gem_context_destroy(i915, ctx);
+
+ return ctx != 0;
+}
+
+static void single_timeline(int i915)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj = {
+ .handle = gem_create(i915, 4096),
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ };
+ struct sync_fence_info rings[16];
+ struct sync_file_info sync_file_info = {
+ .num_fences = 1,
+ };
+ unsigned int engine;
+ int n;
+
+ igt_require(has_single_timeline(i915));
+
+ gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe));
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, obj.handle);
+
+ /*
+ * For a "single timeline" context, each ring is on the common
+ * timeline, unlike a normal context where each ring has an
+ * independent timeline. That is no matter which engine we submit
+ * to, it reports the same timeline name and fence context. However,
+ * the fence context is not reported through the sync_fence_info.
+ */
+ execbuf.rsvd1 =
+ gem_context_clone(i915, 0, 0,
+ I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+ execbuf.flags = I915_EXEC_FENCE_OUT;
+ n = 0;
+ for_each_engine(i915, engine) {
+ gem_execbuf_wr(i915, &execbuf);
+ sync_file_info.sync_fence_info = to_user_pointer(&rings[n]);
+ do_ioctl(execbuf.rsvd2 >> 32, SYNC_IOC_FILE_INFO, &sync_file_info);
+ close(execbuf.rsvd2 >> 32);
+
+ igt_info("ring[%d] fence: %s %s\n",
+ n, rings[n].driver_name, rings[n].obj_name);
+ n++;
+ }
+ gem_sync(i915, obj.handle);
+ gem_close(i915, obj.handle);
+
+ for (int i = 1; i < n; i++) {
+ igt_assert(!strcmp(rings[0].driver_name, rings[i].driver_name));
+ igt_assert(!strcmp(rings[0].obj_name, rings[i].obj_name));
+ }
+}
+
+static void exec_single_timeline(int i915, unsigned int engine)
+{
+ unsigned int other;
+ igt_spin_t *spin;
+ uint32_t ctx;
+
+ igt_require(gem_ring_has_physical_engine(i915, engine));
+ igt_require(has_single_timeline(i915));
+
+ /*
+ * On an ordinary context, a blockage on one engine doesn't prevent
+ * execution on an other.
+ */
+ ctx = 0;
+ spin = NULL;
+ for_each_physical_engine(i915, other) {
+ if (other == engine)
+ continue;
+
+ if (spin == NULL) {
+ spin = __igt_spin_new(i915, .ctx = ctx, .engine = other);
+ } else {
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = spin->execbuf.buffers_ptr,
+ .buffer_count = spin->execbuf.buffer_count,
+ .flags = other,
+ .rsvd1 = ctx,
+ };
+ gem_execbuf(i915, &execbuf);
+ }
+ }
+ igt_require(spin);
+ igt_assert_eq(nop_sync(i915, ctx, engine, NSEC_PER_SEC), 0);
+ igt_spin_free(i915, spin);
+
+ /*
+ * But if we create a context with just a single shared timeline,
+ * then it will block waiting for the earlier requests on the
+ * other engines.
+ */
+ ctx = gem_context_clone(i915, 0, 0,
+ I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+ spin = NULL;
+ for_each_physical_engine(i915, other) {
+ if (other == engine)
+ continue;
+
+ if (spin == NULL) {
+ spin = __igt_spin_new(i915, .ctx = ctx, .engine = other);
+ } else {
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = spin->execbuf.buffers_ptr,
+ .buffer_count = spin->execbuf.buffer_count,
+ .flags = other,
+ .rsvd1 = ctx,
+ };
+ gem_execbuf(i915, &execbuf);
+ }
+ }
+ igt_assert(spin);
+ igt_assert_eq(nop_sync(i915, ctx, engine, NSEC_PER_SEC), -ETIME);
+ igt_spin_free(i915, spin);
+}
+
+static void store_dword(int i915, uint32_t ctx, unsigned ring,
+ uint32_t target, uint32_t offset, uint32_t value,
+ uint32_t cork, unsigned write_domain)
+{
+ const int gen = intel_gen(intel_get_drm_devid(i915));
+ struct drm_i915_gem_exec_object2 obj[3];
+ struct drm_i915_gem_relocation_entry reloc;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ uint32_t batch[16];
+ int i;
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(obj + !cork);
+ execbuf.buffer_count = 2 + !!cork;
+ execbuf.flags = ring;
+ if (gen < 6)
+ execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx;
+
+ memset(obj, 0, sizeof(obj));
+ obj[0].handle = cork;
+ obj[1].handle = target;
+ obj[2].handle = gem_create(i915, 4096);
+
+ memset(&reloc, 0, sizeof(reloc));
+ reloc.target_handle = obj[1].handle;
+ reloc.presumed_offset = 0;
+ reloc.offset = sizeof(uint32_t);
+ reloc.delta = offset;
+ reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ reloc.write_domain = write_domain;
+ obj[2].relocs_ptr = to_user_pointer(&reloc);
+ obj[2].relocation_count = 1;
+
+ i = 0;
+ batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+ if (gen >= 8) {
+ batch[++i] = offset;
+ batch[++i] = 0;
+ } else if (gen >= 4) {
+ batch[++i] = 0;
+ batch[++i] = offset;
+ reloc.offset += sizeof(uint32_t);
+ } else {
+ batch[i]--;
+ batch[++i] = offset;
+ }
+ batch[++i] = value;
+ batch[++i] = MI_BATCH_BUFFER_END;
+ gem_write(i915, obj[2].handle, 0, batch, sizeof(batch));
+ gem_execbuf(i915, &execbuf);
+ gem_close(i915, obj[2].handle);
+}
+
+static uint32_t create_highest_priority(int i915)
+{
+ uint32_t ctx = gem_context_create(i915);
+
+ /*
+ * If there is no priority support, all contexts will have equal
+ * priority (and therefore the max user priority), so no context
+ * can overtake us, and we effectively can form a plug.
+ */
+ __gem_context_set_priority(i915, ctx, MAX_PRIO);
+
+ return ctx;
+}
+
+static void unplug_show_queue(int i915, struct igt_cork *c, unsigned int engine)
+{
+ igt_spin_t *spin[MAX_ELSP_QLEN];
+
+ for (int n = 0; n < ARRAY_SIZE(spin); n++) {
+ const struct igt_spin_factory opts = {
+ .ctx = create_highest_priority(i915),
+ .engine = engine,
+ };
+ spin[n] = __igt_spin_factory(i915, &opts);
+ gem_context_destroy(i915, opts.ctx);
+ }
+
+ igt_cork_unplug(c); /* batches will now be queued on the engine */
+ igt_debugfs_dump(i915, "i915_engine_info");
+
+ for (int n = 0; n < ARRAY_SIZE(spin); n++)
+ igt_spin_free(i915, spin[n]);
+}
+
+static uint32_t store_timestamp(int i915,
+ uint32_t ctx, unsigned ring,
+ unsigned mmio_base,
+ int offset)
+{
+ const bool r64b = intel_gen(intel_get_drm_devid(i915)) >= 8;
+ struct drm_i915_gem_exec_object2 obj = {
+ .handle = gem_create(i915, 4096),
+ .relocation_count = 1,
+ };
+ struct drm_i915_gem_relocation_entry reloc = {
+ .target_handle = obj.handle,
+ .offset = 2 * sizeof(uint32_t),
+ .delta = offset * sizeof(uint32_t),
+ .read_domains = I915_GEM_DOMAIN_INSTRUCTION,
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .flags = ring,
+ .rsvd1 = ctx,
+ };
+ uint32_t batch[] = {
+ 0x24 << 23 | (1 + r64b), /* SRM */
+ mmio_base + 0x358,
+ offset * sizeof(uint32_t),
+ 0,
+ MI_BATCH_BUFFER_END
+ };
+
+ igt_require(intel_gen(intel_get_drm_devid(i915)) >= 7);
+
+ gem_write(i915, obj.handle, 0, batch, sizeof(batch));
+ obj.relocs_ptr = to_user_pointer(&reloc);
+
+ gem_execbuf(i915, &execbuf);
+
+ return obj.handle;
+}
+
+static void independent(int i915, unsigned ring, unsigned flags)
+{
+ const int TIMESTAMP = 1023;
+ uint32_t handle[ARRAY_SIZE(priorities)];
+ igt_spin_t *spin[MAX_ELSP_QLEN];
+ unsigned int mmio_base;
+
+ /* XXX i915_query()! */
+ switch (ring) {
+ case I915_EXEC_DEFAULT:
+ case I915_EXEC_RENDER:
+ mmio_base = 0x2000;
+ break;
+#if 0
+ case I915_EXEC_BSD:
+ mmio_base = 0x12000;
+ break;
+#endif
+ case I915_EXEC_BLT:
+ mmio_base = 0x22000;
+ break;
+
+#define GEN11_VECS0_BASE 0x1c8000
+#define GEN11_VECS1_BASE 0x1d8000
+ case I915_EXEC_VEBOX:
+ if (intel_gen(intel_get_drm_devid(i915)) >= 11)
+ mmio_base = GEN11_VECS0_BASE;
+ else
+ mmio_base = 0x1a000;
+ break;
+
+ default:
+ igt_skip("mmio base not known\n");
+ }
+
+ for (int n = 0; n < ARRAY_SIZE(spin); n++) {
+ const struct igt_spin_factory opts = {
+ .ctx = create_highest_priority(i915),
+ .engine = ring,
+ };
+ spin[n] = __igt_spin_factory(i915, &opts);
+ gem_context_destroy(i915, opts.ctx);
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(priorities); i++) {
+ uint32_t ctx = gem_queue_create(i915);
+ gem_context_set_priority(i915, ctx, priorities[i]);
+ handle[i] = store_timestamp(i915, ctx, ring, mmio_base, TIMESTAMP);
+ gem_context_destroy(i915, ctx);
+ }
+
+ for (int n = 0; n < ARRAY_SIZE(spin); n++)
+ igt_spin_free(i915, spin[n]);
+
+ for (int i = 0; i < ARRAY_SIZE(priorities); i++) {
+ uint32_t *ptr;
+
+ ptr = gem_mmap__gtt(i915, handle[i], 4096, PROT_READ);
+ gem_set_domain(i915, handle[i], /* no write hazard lies! */
+ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ gem_close(i915, handle[i]);
+
+ handle[i] = ptr[TIMESTAMP];
+ munmap(ptr, 4096);
+
+ igt_debug("ctx[%d] .prio=%d, timestamp=%u\n",
+ i, priorities[i], handle[i]);
+ }
+
+ igt_assert((int32_t)(handle[HI] - handle[LO]) < 0);
+}
+
+static void reorder(int i915, unsigned ring, unsigned flags)
+#define EQUAL 1
+{
+ IGT_CORK_HANDLE(cork);
+ uint32_t scratch;
+ uint32_t *ptr;
+ uint32_t ctx[2];
+ uint32_t plug;
+
+ ctx[LO] = gem_queue_create(i915);
+ gem_context_set_priority(i915, ctx[LO], MIN_PRIO);
+
+ ctx[HI] = gem_queue_create(i915);
+ gem_context_set_priority(i915, ctx[HI], flags & EQUAL ? MIN_PRIO : 0);
+
+ scratch = gem_create(i915, 4096);
+ plug = igt_cork_plug(&cork, i915);
+
+ /* We expect the high priority context to be executed first, and
+ * so the final result will be value from the low priority context.
+ */
+ store_dword(i915, ctx[LO], ring, scratch, 0, ctx[LO], plug, 0);
+ store_dword(i915, ctx[HI], ring, scratch, 0, ctx[HI], plug, 0);
+
+ unplug_show_queue(i915, &cork, ring);
+ gem_close(i915, plug);
+
+ gem_context_destroy(i915, ctx[LO]);
+ gem_context_destroy(i915, ctx[HI]);
+
+ ptr = gem_mmap__gtt(i915, scratch, 4096, PROT_READ);
+ gem_set_domain(i915, scratch, /* no write hazard lies! */
+ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ gem_close(i915, scratch);
+
+ if (flags & EQUAL) /* equal priority, result will be fifo */
+ igt_assert_eq_u32(ptr[0], ctx[HI]);
+ else
+ igt_assert_eq_u32(ptr[0], ctx[LO]);
+ munmap(ptr, 4096);
+}
+
+static void promotion(int i915, unsigned ring)
+{
+ IGT_CORK_HANDLE(cork);
+ uint32_t result, dep;
+ uint32_t *ptr;
+ uint32_t ctx[3];
+ uint32_t plug;
+
+ ctx[LO] = gem_queue_create(i915);
+ gem_context_set_priority(i915, ctx[LO], MIN_PRIO);
+
+ ctx[HI] = gem_queue_create(i915);
+ gem_context_set_priority(i915, ctx[HI], 0);
+
+ ctx[NOISE] = gem_queue_create(i915);
+ gem_context_set_priority(i915, ctx[NOISE], MIN_PRIO/2);
+
+ result = gem_create(i915, 4096);
+ dep = gem_create(i915, 4096);
+
+ plug = igt_cork_plug(&cork, i915);
+
+ /* Expect that HI promotes LO, so the order will be LO, HI, NOISE.
+ *
+ * fifo would be NOISE, LO, HI.
+ * strict priority would be HI, NOISE, LO
+ */
+ store_dword(i915, ctx[NOISE], ring, result, 0, ctx[NOISE], plug, 0);
+ store_dword(i915, ctx[LO], ring, result, 0, ctx[LO], plug, 0);
+
+ /* link LO <-> HI via a dependency on another buffer */
+ store_dword(i915, ctx[LO], ring, dep, 0, ctx[LO], 0, I915_GEM_DOMAIN_INSTRUCTION);
+ store_dword(i915, ctx[HI], ring, dep, 0, ctx[HI], 0, 0);
+
+ store_dword(i915, ctx[HI], ring, result, 0, ctx[HI], 0, 0);
+
+ unplug_show_queue(i915, &cork, ring);
+ gem_close(i915, plug);
+
+ gem_context_destroy(i915, ctx[NOISE]);
+ gem_context_destroy(i915, ctx[LO]);
+ gem_context_destroy(i915, ctx[HI]);
+
+ ptr = gem_mmap__gtt(i915, dep, 4096, PROT_READ);
+ gem_set_domain(i915, dep, /* no write hazard lies! */
+ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ gem_close(i915, dep);
+
+ igt_assert_eq_u32(ptr[0], ctx[HI]);
+ munmap(ptr, 4096);
+
+ ptr = gem_mmap__gtt(i915, result, 4096, PROT_READ);
+ gem_set_domain(i915, result, /* no write hazard lies! */
+ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ gem_close(i915, result);
+
+ igt_assert_eq_u32(ptr[0], ctx[NOISE]);
+ munmap(ptr, 4096);
+}
+
+static void smoketest(int i915, unsigned ring, unsigned timeout)
+{
+ const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ unsigned engines[16];
+ unsigned nengine;
+ unsigned engine;
+ uint32_t scratch;
+ uint32_t *ptr;
+
+ nengine = 0;
+ for_each_physical_engine(i915, engine)
+ engines[nengine++] = engine;
+ igt_require(nengine);
+
+ scratch = gem_create(i915, 4096);
+ igt_fork(child, ncpus) {
+ unsigned long count = 0;
+ uint32_t ctx;
+
+ hars_petruska_f54_1_random_perturb(child);
+
+ ctx = gem_queue_create(i915);
+ igt_until_timeout(timeout) {
+ int prio;
+
+ prio = hars_petruska_f54_1_random_unsafe_max(MAX_PRIO - MIN_PRIO) + MIN_PRIO;
+ gem_context_set_priority(i915, ctx, prio);
+
+ engine = engines[hars_petruska_f54_1_random_unsafe_max(nengine)];
+ store_dword(i915, ctx, engine, scratch,
+ 8*child + 0, ~child,
+ 0, 0);
+ for (unsigned int step = 0; step < 8; step++)
+ store_dword(i915, ctx, engine, scratch,
+ 8*child + 4, count++,
+ 0, 0);
+ }
+ gem_context_destroy(i915, ctx);
+ }
+ igt_waitchildren();
+
+ ptr = gem_mmap__gtt(i915, scratch, 4096, PROT_READ);
+ gem_set_domain(i915, scratch, /* no write hazard lies! */
+ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ gem_close(i915, scratch);
+
+ for (unsigned n = 0; n < ncpus; n++) {
+ igt_assert_eq_u32(ptr[2*n], ~n);
+ /*
+ * Note this count is approximate due to unconstrained
+ * ordering of the dword writes between engines.
+ *
+ * Take the result with a pinch of salt.
+ */
+ igt_info("Child[%d] completed %u cycles\n", n, ptr[2*n+1]);
+ }
+ munmap(ptr, 4096);
+}
+
+igt_main
+{
+ const struct intel_execution_engine *e;
+ int i915 = -1;
+
+ igt_fixture {
+ i915 = drm_open_driver(DRIVER_INTEL);
+ igt_require_gem(i915);
+ }
+
+ igt_subtest_group {
+ igt_fixture {
+ igt_require(gem_contexts_has_shared_gtt(i915));
+ igt_fork_hang_detector(i915);
+ }
+
+ igt_subtest("create-shared-gtt")
+ create_shared_gtt(i915, 0);
+
+ igt_subtest("detached-shared-gtt")
+ create_shared_gtt(i915, DETACHED);
+
+ igt_subtest("disjoint-timelines")
+ disjoint_timelines(i915);
+
+ igt_subtest("single-timeline")
+ single_timeline(i915);
+
+ igt_subtest("exhaust-shared-gtt")
+ exhaust_shared_gtt(i915, 0);
+
+ igt_subtest("exhaust-shared-gtt-lrc")
+ exhaust_shared_gtt(i915, EXHAUST_LRC);
+
+ for (e = intel_execution_engines; e->name; e++) {
+ igt_subtest_f("exec-shared-gtt-%s", e->name)
+ exec_shared_gtt(i915, e->exec_id | e->flags);
+
+ igt_subtest_f("exec-single-timeline-%s", e->name)
+ exec_single_timeline(i915,
+ e->exec_id | e->flags);
+
+ /*
+ * Check that the shared contexts operate independently,
+ * that is requests on one ("queue") can be scheduled
+ * around another queue. We only check the basics here,
+ * enough to reduce the queue into just another context,
+ * and so rely on gem_exec_schedule to prove the rest.
+ */
+ igt_subtest_group {
+ igt_fixture {
+ gem_require_ring(i915, e->exec_id | e->flags);
+ igt_require(gem_can_store_dword(i915, e->exec_id | e->flags));
+ igt_require(gem_scheduler_enabled(i915));
+ igt_require(gem_scheduler_has_ctx_priority(i915));
+ }
+
+ igt_subtest_f("Q-independent-%s", e->name)
+ independent(i915, e->exec_id | e->flags, 0);
+
+ igt_subtest_f("Q-in-order-%s", e->name)
+ reorder(i915, e->exec_id | e->flags, EQUAL);
+
+ igt_subtest_f("Q-out-order-%s", e->name)
+ reorder(i915, e->exec_id | e->flags, 0);
+
+ igt_subtest_f("Q-promotion-%s", e->name)
+ promotion(i915, e->exec_id | e->flags);
+
+ igt_subtest_f("Q-smoketest-%s", e->name)
+ smoketest(i915, e->exec_id | e->flags, 5);
+ }
+ }
+
+ igt_subtest("Q-smoketest-all") {
+ igt_require(gem_scheduler_enabled(i915));
+ igt_require(gem_scheduler_has_ctx_priority(i915));
+ smoketest(i915, -1, 30);
+ }
+
+ igt_fixture {
+ igt_stop_hang_detector();
+ }
+ }
+}
diff --git a/tests/i915/gem_ctx_switch.c b/tests/i915/gem_ctx_switch.c
index 87e13b91..647911d4 100644
--- a/tests/i915/gem_ctx_switch.c
+++ b/tests/i915/gem_ctx_switch.c
@@ -44,7 +44,8 @@
#define LOCAL_I915_EXEC_NO_RELOC (1<<11)
#define LOCAL_I915_EXEC_HANDLE_LUT (1<<12)
-#define INTERRUPTIBLE 1
+#define INTERRUPTIBLE 0x1
+#define QUEUE 0x2
static double elapsed(const struct timespec *start, const struct timespec *end)
{
@@ -126,8 +127,12 @@ static void single(int fd, uint32_t handle,
gem_require_ring(fd, e->exec_id | e->flags);
- for (n = 0; n < 64; n++)
- contexts[n] = gem_context_create(fd);
+ for (n = 0; n < 64; n++) {
+ if (flags & QUEUE)
+ contexts[n] = gem_queue_create(fd);
+ else
+ contexts[n] = gem_context_create(fd);
+ }
memset(&obj, 0, sizeof(obj));
obj.handle = handle;
@@ -232,8 +237,12 @@ static void all(int fd, uint32_t handle, unsigned flags, int timeout)
}
igt_require(nengine);
- for (n = 0; n < ARRAY_SIZE(contexts); n++)
- contexts[n] = gem_context_create(fd);
+ for (n = 0; n < ARRAY_SIZE(contexts); n++) {
+ if (flags & QUEUE)
+ contexts[n] = gem_queue_create(fd);
+ else
+ contexts[n] = gem_context_create(fd);
+ }
memset(obj, 0, sizeof(obj));
obj[1].handle = handle;
@@ -298,6 +307,17 @@ igt_main
{
const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
const struct intel_execution_engine *e;
+ static const struct {
+ const char *name;
+ unsigned int flags;
+ bool (*require)(int fd);
+ } phases[] = {
+ { "", 0, NULL },
+ { "-interruptible", INTERRUPTIBLE, NULL },
+ { "-queue", QUEUE, gem_has_queues },
+ { "-queue-interruptible", QUEUE | INTERRUPTIBLE, gem_has_queues },
+ { }
+ };
uint32_t light = 0, heavy;
int fd = -1;
@@ -319,21 +339,26 @@ igt_main
}
for (e = intel_execution_engines; e->name; e++) {
- igt_subtest_f("%s%s", e->exec_id == 0 ? "basic-" : "", e->name)
- single(fd, light, e, 0, 1, 5);
-
- igt_skip_on_simulation();
-
- igt_subtest_f("%s%s-heavy", e->exec_id == 0 ? "basic-" : "", e->name)
- single(fd, heavy, e, 0, 1, 5);
- igt_subtest_f("%s-interruptible", e->name)
- single(fd, light, e, INTERRUPTIBLE, 1, 150);
- igt_subtest_f("forked-%s", e->name)
- single(fd, light, e, 0, ncpus, 150);
- igt_subtest_f("forked-%s-heavy", e->name)
- single(fd, heavy, e, 0, ncpus, 150);
- igt_subtest_f("forked-%s-interruptible", e->name)
- single(fd, light, e, INTERRUPTIBLE, ncpus, 150);
+ for (typeof(*phases) *p = phases; p->name; p++) {
+ igt_subtest_group {
+ igt_fixture {
+ if (p->require)
+ igt_require(p->require(fd));
+ }
+
+ igt_subtest_f("%s%s%s", e->exec_id == 0 ? "basic-" : "", e->name, p->name)
+ single(fd, light, e, p->flags, 1, 5);
+
+ igt_skip_on_simulation();
+
+ igt_subtest_f("%s%s-heavy%s", e->exec_id == 0 ? "basic-" : "", e->name, p->name)
+ single(fd, heavy, e, p->flags, 1, 5);
+ igt_subtest_f("forked-%s%s", e->name, p->name)
+ single(fd, light, e, p->flags, ncpus, 150);
+ igt_subtest_f("forked-%s-heavy%s", e->name, p->name)
+ single(fd, heavy, e, p->flags, ncpus, 150);
+ }
+ }
}
igt_subtest("basic-all-light")
@@ -341,6 +366,16 @@ igt_main
igt_subtest("basic-all-heavy")
all(fd, heavy, 0, 5);
+ igt_subtest_group {
+ igt_fixture {
+ igt_require(gem_has_queues(fd));
+ }
+ igt_subtest("basic-queue-light")
+ all(fd, light, QUEUE, 5);
+ igt_subtest("basic-queue-heavy")
+ all(fd, heavy, QUEUE, 5);
+ }
+
igt_fixture {
igt_stop_hang_detector();
gem_close(fd, heavy);
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
new file mode 100644
index 00000000..b2074486
--- /dev/null
+++ b/tests/i915/gem_exec_balancer.c
@@ -0,0 +1,1332 @@
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <sched.h>
+
+#include "igt.h"
+#include "igt_perf.h"
+#include "i915/gem_ring.h"
+#include "sw_sync.h"
+
+IGT_TEST_DESCRIPTION("Exercise in-kernel load-balancing");
+
+#define INSTANCE_COUNT (1 << I915_PMU_SAMPLE_INSTANCE_BITS)
+
+static size_t sizeof_load_balance(int count)
+{
+ return offsetof(struct i915_context_engines_load_balance,
+ engines[count]);
+}
+
+static size_t sizeof_param_engines(int count)
+{
+ return offsetof(struct i915_context_param_engines,
+ engines[count]);
+}
+
+static size_t sizeof_engines_bond(int count)
+{
+ return offsetof(struct i915_context_engines_bond,
+ engines[count]);
+}
+
+#define alloca0(sz) ({ size_t sz__ = (sz); memset(alloca(sz__), 0, sz__); })
+
+static bool has_class_instance(int i915, uint16_t class, uint16_t instance)
+{
+ int fd;
+
+ fd = perf_i915_open(I915_PMU_ENGINE_BUSY(class, instance));
+ if (fd != -1) {
+ close(fd);
+ return true;
+ }
+
+ return false;
+}
+
+static struct i915_engine_class_instance *
+list_engines(int i915, uint32_t class_mask, unsigned int *out)
+{
+ unsigned int count = 0, size = 64;
+ struct i915_engine_class_instance *engines;
+
+ engines = malloc(size * sizeof(*engines));
+ igt_assert(engines);
+
+ for (enum drm_i915_gem_engine_class class = I915_ENGINE_CLASS_RENDER;
+ class_mask;
+ class++, class_mask >>= 1) {
+ if (!(class_mask & 1))
+ continue;
+
+ for (unsigned int instance = 0;
+ instance < INSTANCE_COUNT;
+ instance++) {
+ if (!has_class_instance(i915, class, instance))
+ continue;
+
+ if (count == size) {
+ size *= 2;
+ engines = realloc(engines,
+ size * sizeof(*engines));
+ igt_assert(engines);
+ }
+
+ engines[count++] = (struct i915_engine_class_instance){
+ .engine_class = class,
+ .engine_instance = instance,
+ };
+ }
+ }
+
+ if (!count) {
+ free(engines);
+ engines = NULL;
+ }
+
+ *out = count;
+ return engines;
+}
+
+static int __set_engines(int i915, uint32_t ctx,
+ const struct i915_engine_class_instance *ci,
+ unsigned int count)
+{
+ struct i915_context_param_engines *engines =
+ alloca0(sizeof_param_engines(count));
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = ctx,
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .size = sizeof_param_engines(count),
+ .value = to_user_pointer(engines)
+ };
+
+ engines->extensions = 0;
+ memcpy(engines->engines, ci, count * sizeof(*ci));
+
+ return __gem_context_set_param(i915, &p);
+}
+
+static void set_engines(int i915, uint32_t ctx,
+ const struct i915_engine_class_instance *ci,
+ unsigned int count)
+{
+ igt_assert_eq(__set_engines(i915, ctx, ci, count), 0);
+}
+
+static int __set_load_balancer(int i915, uint32_t ctx,
+ const struct i915_engine_class_instance *ci,
+ unsigned int count,
+ void *ext)
+{
+ struct i915_context_engines_load_balance *balancer =
+ alloca0(sizeof_load_balance(count));
+ struct i915_context_param_engines *engines =
+ alloca0(sizeof_param_engines(count + 1));
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = ctx,
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .size = sizeof_param_engines(count + 1),
+ .value = to_user_pointer(engines)
+ };
+
+ balancer->base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE;
+ balancer->base.next_extension = to_user_pointer(ext);
+
+ igt_assert(count);
+ balancer->num_siblings = count;
+ memcpy(balancer->engines, ci, count * sizeof(*ci));
+
+ engines->extensions = to_user_pointer(balancer);
+ engines->engines[0].engine_class =
+ I915_ENGINE_CLASS_INVALID;
+ engines->engines[0].engine_instance =
+ I915_ENGINE_CLASS_INVALID_NONE;
+ memcpy(engines->engines + 1, ci, count * sizeof(*ci));
+
+ return __gem_context_set_param(i915, &p);
+}
+
+static void set_load_balancer(int i915, uint32_t ctx,
+ const struct i915_engine_class_instance *ci,
+ unsigned int count,
+ void *ext)
+{
+ igt_assert_eq(__set_load_balancer(i915, ctx, ci, count, ext), 0);
+}
+
+static uint32_t load_balancer_create(int i915,
+ const struct i915_engine_class_instance *ci,
+ unsigned int count)
+{
+ uint32_t ctx;
+
+ ctx = gem_context_create(i915);
+ set_load_balancer(i915, ctx, ci, count, NULL);
+
+ return ctx;
+}
+
+static uint32_t __batch_create(int i915, uint32_t offset)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ uint32_t handle;
+
+ handle = gem_create(i915, ALIGN(offset + 4, 4096));
+ gem_write(i915, handle, offset, &bbe, sizeof(bbe));
+
+ return handle;
+}
+
+static uint32_t batch_create(int i915)
+{
+ return __batch_create(i915, 0);
+}
+
+static void invalid_balancer(int i915)
+{
+ I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(balancer, 64);
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 64);
+ struct drm_i915_gem_context_param p = {
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines)
+ };
+ uint32_t handle;
+ void *ptr;
+
+ /*
+ * Assume that I915_CONTEXT_PARAM_ENGINE validates the array
+ * of engines[], our job is to determine if the load_balancer
+ * extension explodes.
+ */
+
+ for (int class = 0; class < 32; class++) {
+ struct i915_engine_class_instance *ci;
+ unsigned int count;
+
+ ci = list_engines(i915, 1 << class, &count);
+ if (!ci)
+ continue;
+
+ igt_assert_lte(count, 64);
+
+ p.ctx_id = gem_context_create(i915);
+ p.size = (sizeof(struct i915_context_param_engines) +
+ (count + 1) * sizeof(*engines.engines));
+
+ memset(&engines, 0, sizeof(engines));
+ engines.engines[0].engine_class = I915_ENGINE_CLASS_INVALID;
+ engines.engines[0].engine_instance = I915_ENGINE_CLASS_INVALID_NONE;
+ memcpy(engines.engines + 1, ci, count * sizeof(*ci));
+ gem_context_set_param(i915, &p);
+
+ engines.extensions = -1ull;
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+ engines.extensions = 1ull;
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+ memset(&balancer, 0, sizeof(balancer));
+ balancer.base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE;
+ balancer.num_siblings = count;
+ memcpy(balancer.engines, ci, count * sizeof(*ci));
+
+ engines.extensions = to_user_pointer(&balancer);
+ gem_context_set_param(i915, &p);
+
+ balancer.engine_index = 1;
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EEXIST);
+
+ balancer.engine_index = count;
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EEXIST);
+
+ balancer.engine_index = count + 1;
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EINVAL);
+
+ balancer.engine_index = 0;
+ gem_context_set_param(i915, &p);
+
+ balancer.base.next_extension = to_user_pointer(&balancer);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EEXIST);
+
+ balancer.base.next_extension = -1ull;
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+ handle = gem_create(i915, 4096 * 3);
+ ptr = gem_mmap__gtt(i915, handle, 4096 * 3, PROT_WRITE);
+ gem_close(i915, handle);
+
+ memset(&engines, 0, sizeof(engines));
+ engines.engines[0].engine_class = I915_ENGINE_CLASS_INVALID;
+ engines.engines[0].engine_instance = I915_ENGINE_CLASS_INVALID_NONE;
+ engines.engines[1].engine_class = I915_ENGINE_CLASS_INVALID;
+ engines.engines[1].engine_instance = I915_ENGINE_CLASS_INVALID_NONE;
+ memcpy(engines.engines + 2, ci, count * sizeof(ci));
+ p.size = (sizeof(struct i915_context_param_engines) +
+ (count + 2) * sizeof(*engines.engines));
+ gem_context_set_param(i915, &p);
+
+ balancer.base.next_extension = 0;
+ balancer.engine_index = 1;
+ engines.extensions = to_user_pointer(&balancer);
+ gem_context_set_param(i915, &p);
+
+ memcpy(ptr + 4096 - 8, &balancer, sizeof(balancer));
+ memcpy(ptr + 8192 - 8, &balancer, sizeof(balancer));
+ balancer.engine_index = 0;
+
+ engines.extensions = to_user_pointer(ptr) + 4096 - 8;
+ gem_context_set_param(i915, &p);
+
+ balancer.base.next_extension = engines.extensions;
+ engines.extensions = to_user_pointer(&balancer);
+ gem_context_set_param(i915, &p);
+
+ munmap(ptr, 4096);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+ engines.extensions = to_user_pointer(ptr) + 4096 - 8;
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+ engines.extensions = to_user_pointer(ptr) + 8192 - 8;
+ gem_context_set_param(i915, &p);
+
+ balancer.base.next_extension = engines.extensions;
+ engines.extensions = to_user_pointer(&balancer);
+ gem_context_set_param(i915, &p);
+
+ munmap(ptr + 8192, 4096);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+ engines.extensions = to_user_pointer(ptr) + 8192 - 8;
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+ munmap(ptr + 4096, 4096);
+
+ gem_context_destroy(i915, p.ctx_id);
+ free(ci);
+ }
+}
+
+static void invalid_bonds(int i915)
+{
+ I915_DEFINE_CONTEXT_ENGINES_BOND(bonds[16], 1);
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1);
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = gem_context_create(i915),
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ .size = sizeof(engines),
+ };
+ uint32_t handle;
+ void *ptr;
+
+ memset(&engines, 0, sizeof(engines));
+ gem_context_set_param(i915, &p);
+
+ memset(bonds, 0, sizeof(bonds));
+ for (int n = 0; n < ARRAY_SIZE(bonds); n++) {
+ bonds[n].base.name = I915_CONTEXT_ENGINES_EXT_BOND;
+ bonds[n].base.next_extension =
+ n ? to_user_pointer(&bonds[n - 1]) : 0;
+ bonds[n].num_bonds = 1;
+ }
+ engines.extensions = to_user_pointer(&bonds);
+ gem_context_set_param(i915, &p);
+
+ bonds[0].base.next_extension = -1ull;
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+ bonds[0].base.next_extension = to_user_pointer(&bonds[0]);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -E2BIG);
+
+ engines.extensions = to_user_pointer(&bonds[1]);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -E2BIG);
+ bonds[0].base.next_extension = 0;
+ gem_context_set_param(i915, &p);
+
+ handle = gem_create(i915, 4096 * 3);
+ ptr = gem_mmap__gtt(i915, handle, 4096 * 3, PROT_WRITE);
+ gem_close(i915, handle);
+
+ memcpy(ptr + 4096, &bonds[0], sizeof(bonds[0]));
+ engines.extensions = to_user_pointer(ptr) + 4096;
+ gem_context_set_param(i915, &p);
+
+ memcpy(ptr, &bonds[0], sizeof(bonds[0]));
+ bonds[0].base.next_extension = to_user_pointer(ptr);
+ memcpy(ptr + 4096, &bonds[0], sizeof(bonds[0]));
+ gem_context_set_param(i915, &p);
+
+ munmap(ptr, 4096);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+ bonds[0].base.next_extension = 0;
+ memcpy(ptr + 8192, &bonds[0], sizeof(bonds[0]));
+ bonds[0].base.next_extension = to_user_pointer(ptr) + 8192;
+ memcpy(ptr + 4096, &bonds[0], sizeof(bonds[0]));
+ gem_context_set_param(i915, &p);
+
+ munmap(ptr + 8192, 4096);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+ munmap(ptr + 4096, 4096);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+ gem_context_destroy(i915, p.ctx_id);
+}
+
+static void kick_kthreads(void)
+{
+ usleep(20 * 1000); /* 20ms should be enough for ksoftirqd! */
+}
+
+static double measure_load(int pmu, int period_us)
+{
+ uint64_t data[2];
+ uint64_t d_t, d_v;
+
+ kick_kthreads();
+
+ igt_assert_eq(read(pmu, data, sizeof(data)), sizeof(data));
+ d_v = -data[0];
+ d_t = -data[1];
+
+ usleep(period_us);
+
+ igt_assert_eq(read(pmu, data, sizeof(data)), sizeof(data));
+ d_v += data[0];
+ d_t += data[1];
+
+ return d_v / (double)d_t;
+}
+
+static double measure_min_load(int pmu, unsigned int num, int period_us)
+{
+ uint64_t data[2 + num];
+ uint64_t d_t, d_v[num];
+ uint64_t min = -1, max = 0;
+
+ kick_kthreads();
+
+ igt_assert_eq(read(pmu, data, sizeof(data)), sizeof(data));
+ for (unsigned int n = 0; n < num; n++)
+ d_v[n] = -data[2 + n];
+ d_t = -data[1];
+
+ usleep(period_us);
+
+ igt_assert_eq(read(pmu, data, sizeof(data)), sizeof(data));
+
+ d_t += data[1];
+ for (unsigned int n = 0; n < num; n++) {
+ d_v[n] += data[2 + n];
+ igt_debug("engine[%d]: %.1f%%\n",
+ n, d_v[n] / (double)d_t * 100);
+ if (d_v[n] < min)
+ min = d_v[n];
+ if (d_v[n] > max)
+ max = d_v[n];
+ }
+
+ igt_debug("elapsed: %"PRIu64"ns, load [%.1f, %.1f]%%\n",
+ d_t, min / (double)d_t * 100, max / (double)d_t * 100);
+
+ return min / (double)d_t;
+}
+
+static void measure_all_load(int pmu, double *v, unsigned int num, int period_us)
+{
+ uint64_t data[2 + num];
+ uint64_t d_t, d_v[num];
+
+ kick_kthreads();
+
+ igt_assert_eq(read(pmu, data, sizeof(data)), sizeof(data));
+ for (unsigned int n = 0; n < num; n++)
+ d_v[n] = -data[2 + n];
+ d_t = -data[1];
+
+ usleep(period_us);
+
+ igt_assert_eq(read(pmu, data, sizeof(data)), sizeof(data));
+
+ d_t += data[1];
+ for (unsigned int n = 0; n < num; n++) {
+ d_v[n] += data[2 + n];
+ igt_debug("engine[%d]: %.1f%%\n",
+ n, d_v[n] / (double)d_t * 100);
+ v[n] = d_v[n] / (double)d_t;
+ }
+}
+
+static int add_pmu(int pmu, const struct i915_engine_class_instance *ci)
+{
+ return perf_i915_open_group(I915_PMU_ENGINE_BUSY(ci->engine_class,
+ ci->engine_instance),
+ pmu);
+}
+
+static const char *class_to_str(int class)
+{
+ const char *str[] = {
+ [I915_ENGINE_CLASS_RENDER] = "rcs",
+ [I915_ENGINE_CLASS_COPY] = "bcs",
+ [I915_ENGINE_CLASS_VIDEO] = "vcs",
+ [I915_ENGINE_CLASS_VIDEO_ENHANCE] = "vecs",
+ };
+
+ if (class < ARRAY_SIZE(str))
+ return str[class];
+
+ return "unk";
+}
+
+static void check_individual_engine(int i915,
+ uint32_t ctx,
+ const struct i915_engine_class_instance *ci,
+ int idx)
+{
+ igt_spin_t *spin;
+ double load;
+ int pmu;
+
+ pmu = perf_i915_open(I915_PMU_ENGINE_BUSY(ci[idx].engine_class,
+ ci[idx].engine_instance));
+
+ spin = igt_spin_new(i915, .ctx = ctx, .engine = idx + 1);
+ load = measure_load(pmu, 10000);
+ igt_spin_free(i915, spin);
+
+ close(pmu);
+
+ igt_assert_f(load > 0.90,
+ "engine %d (class:instance %d:%d) was found to be only %.1f%% busy\n",
+ idx, ci[idx].engine_class, ci[idx].engine_instance, load*100);
+}
+
+static void individual(int i915)
+{
+ uint32_t ctx;
+
+ /*
+ * I915_CONTEXT_PARAM_ENGINE allows us to index into the user
+ * supplied array from gem_execbuf(). Our check is to build the
+ * ctx->engine[] with various different engine classes, feed in
+ * a spinner and then ask pmu to confirm it the expected engine
+ * was busy.
+ */
+
+ ctx = gem_context_create(i915);
+
+ for (int class = 0; class < 32; class++) {
+ struct i915_engine_class_instance *ci;
+ unsigned int count;
+
+ ci = list_engines(i915, 1u << class, &count);
+ if (!ci)
+ continue;
+
+ for (int pass = 0; pass < count; pass++) { /* approx. count! */
+ igt_assert(sizeof(*ci) == sizeof(int));
+ igt_permute_array(ci, count, igt_exchange_int);
+ set_load_balancer(i915, ctx, ci, count, NULL);
+ for (unsigned int n = 0; n < count; n++)
+ check_individual_engine(i915, ctx, ci, n);
+ }
+
+ free(ci);
+ }
+
+ gem_context_destroy(i915, ctx);
+ gem_quiescent_gpu(i915);
+}
+
+static void bonded(int i915, unsigned int flags)
+#define CORK 0x1
+{
+ I915_DEFINE_CONTEXT_ENGINES_BOND(bonds[16], 1);
+ struct i915_engine_class_instance *master_engines;
+ uint32_t master;
+
+ /*
+ * I915_CONTEXT_PARAM_ENGINE provides an extension that allows us
+ * to specify which engine(s) to pair with a parallel (EXEC_SUBMIT)
+ * request submitted to another engine.
+ */
+
+ master = gem_queue_create(i915);
+
+ memset(bonds, 0, sizeof(bonds));
+ for (int n = 0; n < ARRAY_SIZE(bonds); n++) {
+ bonds[n].base.name = I915_CONTEXT_ENGINES_EXT_BOND;
+ bonds[n].base.next_extension =
+ n ? to_user_pointer(&bonds[n - 1]) : 0;
+ bonds[n].num_bonds = 1;
+ }
+
+ for (int class = 0; class < 32; class++) {
+ struct i915_engine_class_instance *siblings;
+ unsigned int count, limit, *order;
+ uint32_t ctx;
+ int n;
+
+ siblings = list_engines(i915, 1u << class, &count);
+ if (!siblings)
+ continue;
+
+ if (count < 2) {
+ free(siblings);
+ continue;
+ }
+
+ master_engines = list_engines(i915, ~(1u << class), &limit);
+ set_engines(i915, master, master_engines, limit);
+
+ limit = min(count, limit);
+ igt_assert(limit <= ARRAY_SIZE(bonds));
+ for (n = 0; n < limit; n++) {
+ bonds[n].master = master_engines[n];
+ bonds[n].engines[0] = siblings[n];
+ }
+
+ ctx = gem_context_clone(i915,
+ master, I915_CONTEXT_CLONE_VM,
+ I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+ set_load_balancer(i915, ctx, siblings, count, &bonds[limit - 1]);
+
+ order = malloc(sizeof(*order) * 8 * limit);
+ igt_assert(order);
+ for (n = 0; n < limit; n++)
+ order[2 * limit - n - 1] = order[n] = n % limit;
+ memcpy(order + 2 * limit, order, 2 * limit * sizeof(*order));
+ memcpy(order + 4 * limit, order, 4 * limit * sizeof(*order));
+ igt_permute_array(order + 2 * limit, 6 * limit, igt_exchange_int);
+
+ for (n = 0; n < 8 * limit; n++) {
+ struct drm_i915_gem_execbuffer2 eb;
+ igt_spin_t *spin, *plug;
+ IGT_CORK_HANDLE(cork);
+ double v[limit];
+ int pmu[limit + 1];
+ int bond = order[n];
+
+ pmu[0] = -1;
+ for (int i = 0; i < limit; i++)
+ pmu[i] = add_pmu(pmu[0], &siblings[i]);
+ pmu[limit] = add_pmu(pmu[0], &master_engines[bond]);
+
+ igt_assert(siblings[bond].engine_class !=
+ master_engines[bond].engine_class);
+
+ plug = NULL;
+ if (flags & CORK) {
+ plug = __igt_spin_new(i915,
+ .ctx = master,
+ .engine = bond,
+ .dependency = igt_cork_plug(&cork, i915));
+ }
+
+ spin = __igt_spin_new(i915,
+ .ctx = master,
+ .engine = bond,
+ .flags = IGT_SPIN_FENCE_OUT);
+
+ eb = spin->execbuf;
+ eb.rsvd1 = ctx;
+ eb.rsvd2 = spin->out_fence;
+ eb.flags = I915_EXEC_FENCE_SUBMIT;
+ gem_execbuf(i915, &eb);
+
+ if (plug) {
+ igt_cork_unplug(&cork);
+ igt_spin_free(i915, plug);
+ }
+
+ measure_all_load(pmu[0], v, limit + 1, 10000);
+ igt_spin_free(i915, spin);
+
+ igt_assert_f(v[bond] > 0.90,
+ "engine %d (class:instance %s:%d) was found to be only %.1f%% busy\n",
+ bond,
+ class_to_str(siblings[bond].engine_class),
+ siblings[bond].engine_instance,
+ 100 * v[bond]);
+ for (int other = 0; other < limit; other++) {
+ if (other == bond)
+ continue;
+
+ igt_assert_f(v[other] == 0,
+ "engine %d (class:instance %s:%d) was not idle, and actually %.1f%% busy\n",
+ other,
+ class_to_str(siblings[other].engine_class),
+ siblings[other].engine_instance,
+ 100 * v[other]);
+ }
+ igt_assert_f(v[limit] > 0.90,
+ "master (class:instance %s:%d) was found to be only %.1f%% busy\n",
+ class_to_str(master_engines[bond].engine_class),
+ master_engines[bond].engine_instance,
+ 100 * v[limit]);
+
+ close(pmu[0]);
+ }
+
+ free(order);
+ gem_context_destroy(i915, ctx);
+ free(master_engines);
+ free(siblings);
+ }
+
+ gem_context_destroy(i915, master);
+}
+
+static void indices(int i915)
+{
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, I915_EXEC_RING_MASK + 1);
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = gem_context_create(i915),
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines)
+ };
+
+ struct drm_i915_gem_exec_object2 batch = {
+ .handle = batch_create(i915),
+ };
+
+ unsigned int nengines = 0;
+ void *balancers = NULL;
+
+ /*
+ * We can populate our engine map with multiple virtual engines.
+ * Do so.
+ */
+
+ for (int class = 0; class < 32; class++) {
+ struct i915_engine_class_instance *ci;
+ unsigned int count;
+
+ ci = list_engines(i915, 1u << class, &count);
+ if (!ci)
+ continue;
+
+ for (int n = 0; n < count; n++) {
+ struct i915_context_engines_load_balance *balancer;
+
+ engines.engines[nengines].engine_class =
+ I915_ENGINE_CLASS_INVALID;
+ engines.engines[nengines].engine_instance =
+ I915_ENGINE_CLASS_INVALID_NONE;
+
+ balancer = calloc(sizeof_load_balance(count), 1);
+ igt_assert(balancer);
+
+ balancer->base.name =
+ I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE;
+ balancer->base.next_extension =
+ to_user_pointer(balancers);
+ balancers = balancer;
+
+ balancer->engine_index = nengines++;
+ balancer->num_siblings = count;
+
+ memcpy(balancer->engines,
+ ci, count * sizeof(*ci));
+ }
+ free(ci);
+ }
+
+ igt_require(balancers);
+ engines.extensions = to_user_pointer(balancers);
+ p.size = (sizeof(struct i915_engine_class_instance) * nengines +
+ sizeof(struct i915_context_param_engines));
+ gem_context_set_param(i915, &p);
+
+ for (unsigned int n = 0; n < nengines; n++) {
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ .flags = n,
+ .rsvd1 = p.ctx_id,
+ };
+ igt_debug("Executing on index=%d\n", n);
+ gem_execbuf(i915, &eb);
+ }
+ gem_context_destroy(i915, p.ctx_id);
+
+ gem_sync(i915, batch.handle);
+ gem_close(i915, batch.handle);
+
+ while (balancers) {
+ struct i915_context_engines_load_balance *b, *n;
+
+ b = balancers;
+ n = from_user_pointer(b->base.next_extension);
+ free(b);
+
+ balancers = n;
+ }
+
+ gem_quiescent_gpu(i915);
+}
+
+static void busy(int i915)
+{
+ uint32_t scratch = gem_create(i915, 4096);
+
+ /*
+ * Check that virtual engines are reported via GEM_BUSY.
+ *
+ * When running, the batch will be on the real engine and report
+ * the actual class.
+ *
+ * Prior to running, if the load-balancer is across multiple
+ * classes we don't know which engine the batch will
+ * execute on, so we report them all!
+ *
+ * However, as we only support (and test) creating a load-balancer
+ * from engines of only one class, that can be propagated accurately
+ * through to GEM_BUSY.
+ */
+
+ for (int class = 0; class < 16; class++) {
+ struct drm_i915_gem_busy busy;
+ struct i915_engine_class_instance *ci;
+ unsigned int count;
+ igt_spin_t *spin[2];
+ uint32_t ctx;
+
+ ci = list_engines(i915, 1u << class, &count);
+ if (!ci)
+ continue;
+
+ ctx = load_balancer_create(i915, ci, count);
+ free(ci);
+
+ spin[0] = __igt_spin_new(i915,
+ .ctx = ctx,
+ .flags = IGT_SPIN_POLL_RUN);
+ spin[1] = __igt_spin_new(i915,
+ .ctx = ctx,
+ .dependency = scratch);
+
+ igt_spin_busywait_until_started(spin[0]);
+
+ /* Running: actual class */
+ busy.handle = spin[0]->handle;
+ do_ioctl(i915, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ igt_assert_eq_u32(busy.busy, 1u << (class + 16));
+
+ /* Queued(read): expected class */
+ busy.handle = spin[1]->handle;
+ do_ioctl(i915, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ igt_assert_eq_u32(busy.busy, 1u << (class + 16));
+
+ /* Queued(write): expected class */
+ busy.handle = scratch;
+ do_ioctl(i915, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ igt_assert_eq_u32(busy.busy,
+ (1u << (class + 16)) | (class + 1));
+
+ igt_spin_free(i915, spin[1]);
+ igt_spin_free(i915, spin[0]);
+
+ gem_context_destroy(i915, ctx);
+ }
+
+ gem_close(i915, scratch);
+ gem_quiescent_gpu(i915);
+}
+
+static void full(int i915, unsigned int flags)
+#define PULSE 0x1
+#define LATE 0x2
+{
+ struct drm_i915_gem_exec_object2 batch = {
+ .handle = batch_create(i915),
+ };
+
+ if (flags & LATE)
+ igt_require_sw_sync();
+
+ /*
+ * I915_CONTEXT_PARAM_ENGINE changes the meaning of engine selector in
+ * execbuf to utilize our own map, into which we replace I915_EXEC_DEFAULT
+ * to provide an automatic selection from the other ctx->engine[]. It
+ * employs load-balancing to evenly distribute the workload the
+ * array. If we submit N spinners, we expect them to be simultaneously
+ * running across N engines and use PMU to confirm that the entire
+ * set of engines are busy.
+ *
+ * We complicate matters by interspersing short-lived tasks to
+ * challenge the kernel to search for space in which to insert new
+ * batches.
+ */
+
+ for (int class = 0; class < 32; class++) {
+ struct i915_engine_class_instance *ci;
+ igt_spin_t *spin = NULL;
+ IGT_CORK_FENCE(cork);
+ unsigned int count;
+ double load;
+ int fence = -1;
+ int *pmu;
+
+ ci = list_engines(i915, 1u << class, &count);
+ if (!ci)
+ continue;
+
+ pmu = malloc(sizeof(*pmu) * count);
+ igt_assert(pmu);
+
+ if (flags & LATE)
+ fence = igt_cork_plug(&cork, i915);
+
+ pmu[0] = -1;
+ for (unsigned int n = 0; n < count; n++) {
+ uint32_t ctx;
+
+ pmu[n] = add_pmu(pmu[0], &ci[n]);
+
+ if (flags & PULSE) {
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ .rsvd2 = fence,
+ .flags = flags & LATE ? I915_EXEC_FENCE_IN : 0,
+ };
+ gem_execbuf(i915, &eb);
+ }
+
+ /*
+ * Each spinner needs to be one a new timeline,
+ * otherwise they will just sit in the single queue
+ * and not run concurrently.
+ */
+ ctx = load_balancer_create(i915, ci, count);
+
+ if (spin == NULL) {
+ spin = __igt_spin_new(i915, .ctx = ctx);
+ } else {
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffers_ptr = spin->execbuf.buffers_ptr,
+ .buffer_count = spin->execbuf.buffer_count,
+ .rsvd1 = ctx,
+ .rsvd2 = fence,
+ .flags = flags & LATE ? I915_EXEC_FENCE_IN : 0,
+ };
+ gem_execbuf(i915, &eb);
+ }
+
+ gem_context_destroy(i915, ctx);
+ }
+
+ if (flags & LATE) {
+ igt_cork_unplug(&cork);
+ close(fence);
+ }
+
+ load = measure_min_load(pmu[0], count, 10000);
+ igt_spin_free(i915, spin);
+
+ close(pmu[0]);
+ free(pmu);
+
+ free(ci);
+
+ igt_assert_f(load > 0.90,
+ "minimum load for %d x class:%d was found to be only %.1f%% busy\n",
+ count, class, load*100);
+ gem_quiescent_gpu(i915);
+ }
+
+ gem_close(i915, batch.handle);
+ gem_quiescent_gpu(i915);
+}
+
+static void nop(int i915)
+{
+ struct drm_i915_gem_exec_object2 batch = {
+ .handle = batch_create(i915),
+ };
+
+ for (int class = 0; class < 32; class++) {
+ struct i915_engine_class_instance *ci;
+ unsigned int count;
+ uint32_t ctx;
+
+ ci = list_engines(i915, 1u << class, &count);
+ if (!ci)
+ continue;
+
+ ctx = load_balancer_create(i915, ci, count);
+
+ for (int n = 0; n < count; n++) {
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ .flags = n + 1,
+ .rsvd1 = ctx,
+ };
+ struct timespec tv = {};
+ unsigned long nops;
+ double t;
+
+ igt_nsec_elapsed(&tv);
+ nops = 0;
+ do {
+ for (int r = 0; r < 1024; r++)
+ gem_execbuf(i915, &execbuf);
+ nops += 1024;
+ } while (igt_seconds_elapsed(&tv) < 2);
+ gem_sync(i915, batch.handle);
+
+ t = igt_nsec_elapsed(&tv) * 1e-3 / nops;
+ igt_info("%s:%d %.3fus\n", class_to_str(class), n, t);
+ }
+
+ {
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ .rsvd1 = ctx,
+ };
+ struct timespec tv = {};
+ unsigned long nops;
+ double t;
+
+ igt_nsec_elapsed(&tv);
+ nops = 0;
+ do {
+ for (int r = 0; r < 1024; r++)
+ gem_execbuf(i915, &execbuf);
+ nops += 1024;
+ } while (igt_seconds_elapsed(&tv) < 2);
+ gem_sync(i915, batch.handle);
+
+ t = igt_nsec_elapsed(&tv) * 1e-3 / nops;
+ igt_info("%s:* %.3fus\n", class_to_str(class), t);
+ }
+
+
+ igt_fork(child, count) {
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ .flags = child + 1,
+ .rsvd1 = gem_context_clone(i915, ctx,
+ I915_CONTEXT_CLONE_ENGINES, 0),
+ };
+ struct timespec tv = {};
+ unsigned long nops;
+ double t;
+
+ igt_nsec_elapsed(&tv);
+ nops = 0;
+ do {
+ for (int r = 0; r < 1024; r++)
+ gem_execbuf(i915, &execbuf);
+ nops += 1024;
+ } while (igt_seconds_elapsed(&tv) < 2);
+ gem_sync(i915, batch.handle);
+
+ t = igt_nsec_elapsed(&tv) * 1e-3 / nops;
+ igt_info("[%d] %s:%d %.3fus\n",
+ child, class_to_str(class), child, t);
+
+ memset(&tv, 0, sizeof(tv));
+ execbuf.flags = 0;
+
+ igt_nsec_elapsed(&tv);
+ nops = 0;
+ do {
+ for (int r = 0; r < 1024; r++)
+ gem_execbuf(i915, &execbuf);
+ nops += 1024;
+ } while (igt_seconds_elapsed(&tv) < 2);
+ gem_sync(i915, batch.handle);
+
+ t = igt_nsec_elapsed(&tv) * 1e-3 / nops;
+ igt_info("[%d] %s:* %.3fus\n",
+ child, class_to_str(class), t);
+
+ gem_context_destroy(i915, execbuf.rsvd1);
+ }
+
+ igt_waitchildren();
+
+ gem_context_destroy(i915, ctx);
+ free(ci);
+ }
+
+ gem_close(i915, batch.handle);
+ gem_quiescent_gpu(i915);
+}
+
+static void ping(int i915, uint32_t ctx, unsigned int engine)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .handle = batch_create(i915),
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .flags = engine,
+ .rsvd1 = ctx,
+ };
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, obj.handle);
+ gem_close(i915, obj.handle);
+}
+
+static void semaphore(int i915)
+{
+ uint32_t block[2], scratch;
+ igt_spin_t *spin[3];
+
+ /*
+ * If we are using HW semaphores to launch serialised requests
+ * on different engine concurrently, we want to verify that real
+ * work is unimpeded.
+ */
+ igt_require(gem_scheduler_has_preemption(i915));
+
+ block[0] = gem_context_create(i915);
+ block[1] = gem_context_create(i915);
+
+ scratch = gem_create(i915, 4096);
+ spin[2] = igt_spin_new(i915, .dependency = scratch);
+ for (int class = 1; class < 32; class++) {
+ struct i915_engine_class_instance *ci;
+ unsigned int count;
+ uint32_t vip;
+
+ ci = list_engines(i915, 1u << class, &count);
+ if (!ci)
+ continue;
+
+ if (count < ARRAY_SIZE(block))
+ continue;
+
+ /* Ensure that we completely occupy all engines in this group */
+ count = ARRAY_SIZE(block);
+
+ for (int i = 0; i < count; i++) {
+ set_load_balancer(i915, block[i], ci, count, NULL);
+ spin[i] = __igt_spin_new(i915,
+ .ctx = block[i],
+ .dependency = scratch);
+ }
+
+ /*
+ * Either we haven't blocked both engines with semaphores,
+ * or we let the vip through. If not, we hang.
+ */
+ vip = gem_context_create(i915);
+ set_load_balancer(i915, vip, ci, count, NULL);
+ ping(i915, vip, 0);
+ gem_context_destroy(i915, vip);
+
+ for (int i = 0; i < count; i++)
+ igt_spin_free(i915, spin[i]);
+
+ free(ci);
+ }
+ igt_spin_free(i915, spin[2]);
+ gem_close(i915, scratch);
+
+ gem_context_destroy(i915, block[1]);
+ gem_context_destroy(i915, block[0]);
+
+ gem_quiescent_gpu(i915);
+}
+
+static void smoketest(int i915, int timeout)
+{
+ struct drm_i915_gem_exec_object2 batch[2] = {
+ { .handle = __batch_create(i915, 16380) }
+ };
+ unsigned int ncontext = 0;
+ uint32_t *contexts = NULL;
+ uint32_t *handles = NULL;
+
+ igt_require_sw_sync();
+
+ for (int class = 0; class < 32; class++) {
+ struct i915_engine_class_instance *ci;
+ unsigned int count = 0;
+
+ ci = list_engines(i915, 1u << class, &count);
+ if (!ci || count < 2) {
+ free(ci);
+ continue;
+ }
+
+ ncontext += 128;
+ contexts = realloc(contexts, sizeof(*contexts) * ncontext);
+ igt_assert(contexts);
+
+ for (unsigned int n = ncontext - 128; n < ncontext; n++) {
+ contexts[n] = load_balancer_create(i915, ci, count);
+ igt_assert(contexts[n]);
+ }
+
+ free(ci);
+ }
+ igt_debug("Created %d virtual engines (one per context)\n", ncontext);
+ igt_require(ncontext);
+
+ contexts = realloc(contexts, sizeof(*contexts) * ncontext * 4);
+ igt_assert(contexts);
+ memcpy(contexts + ncontext, contexts, ncontext * sizeof(*contexts));
+ ncontext *= 2;
+ memcpy(contexts + ncontext, contexts, ncontext * sizeof(*contexts));
+ ncontext *= 2;
+
+ handles = malloc(sizeof(*handles) * ncontext);
+ igt_assert(handles);
+ for (unsigned int n = 0; n < ncontext; n++)
+ handles[n] = gem_create(i915, 4096);
+
+ igt_until_timeout(timeout) {
+ unsigned int count = 1 + (rand() % (ncontext - 1));
+ IGT_CORK_FENCE(cork);
+ int fence = igt_cork_plug(&cork, i915);
+
+ for (unsigned int n = 0; n < count; n++) {
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffers_ptr = to_user_pointer(batch),
+ .buffer_count = ARRAY_SIZE(batch),
+ .rsvd1 = contexts[n],
+ .rsvd2 = fence,
+ .flags = I915_EXEC_BATCH_FIRST | I915_EXEC_FENCE_IN,
+ };
+ batch[1].handle = handles[n];
+ gem_execbuf(i915, &eb);
+ }
+ igt_permute_array(handles, count, igt_exchange_int);
+
+ igt_cork_unplug(&cork);
+ for (unsigned int n = 0; n < count; n++)
+ gem_sync(i915, handles[n]);
+
+ close(fence);
+ }
+
+ for (unsigned int n = 0; n < ncontext; n++) {
+ gem_close(i915, handles[n]);
+ __gem_context_destroy(i915, contexts[n]);
+ }
+ free(handles);
+ free(contexts);
+ gem_close(i915, batch[0].handle);
+}
+
+static bool has_context_engines(int i915)
+{
+ struct drm_i915_gem_context_param p = {
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ };
+
+ return __gem_context_set_param(i915, &p) == 0;
+}
+
+static bool has_load_balancer(int i915)
+{
+ struct i915_engine_class_instance ci = {};
+ uint32_t ctx;
+ int err;
+
+ ctx = gem_context_create(i915);
+ err = __set_load_balancer(i915, ctx, &ci, 1, NULL);
+ gem_context_destroy(i915, ctx);
+
+ return err == 0;
+}
+
+igt_main
+{
+ int i915 = -1;
+
+ igt_skip_on_simulation();
+
+ igt_fixture {
+ i915 = drm_open_driver(DRIVER_INTEL);
+ igt_require_gem(i915);
+
+ gem_require_contexts(i915);
+ igt_require(has_context_engines(i915));
+ igt_require(has_load_balancer(i915));
+
+ igt_fork_hang_detector(i915);
+ }
+
+ igt_subtest("invalid-balancer")
+ invalid_balancer(i915);
+
+ igt_subtest("invalid-bonds")
+ invalid_bonds(i915);
+
+ igt_subtest("individual")
+ individual(i915);
+
+ igt_subtest("indices")
+ indices(i915);
+
+ igt_subtest("busy")
+ busy(i915);
+
+ igt_subtest_group {
+ static const struct {
+ const char *name;
+ unsigned int flags;
+ } phases[] = {
+ { "", 0 },
+ { "-pulse", PULSE },
+ { "-late", LATE },
+ { "-late-pulse", PULSE | LATE },
+ { }
+ };
+ for (typeof(*phases) *p = phases; p->name; p++)
+ igt_subtest_f("full%s", p->name)
+ full(i915, p->flags);
+ }
+
+ igt_subtest("nop")
+ nop(i915);
+
+ igt_subtest("semaphore")
+ semaphore(i915);
+
+ igt_subtest("smoke")
+ smoketest(i915, 20);
+
+ igt_subtest("bonded-imm")
+ bonded(i915, 0);
+
+ igt_subtest("bonded-cork")
+ bonded(i915, CORK);
+
+ igt_fixture {
+ igt_stop_hang_detector();
+ }
+}
diff --git a/tests/i915/gem_exec_basic.c b/tests/i915/gem_exec_basic.c
index dcb83864..1287860b 100644
--- a/tests/i915/gem_exec_basic.c
+++ b/tests/i915/gem_exec_basic.c
@@ -42,12 +42,12 @@ static void batch_fini(int fd, uint32_t handle)
gem_close(fd, handle);
}
-static void noop(int fd, unsigned ring)
+static void noop(int fd, uint64_t flags)
{
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 exec;
- gem_require_ring(fd, ring);
+ gem_require_ring(fd, flags);
memset(&exec, 0, sizeof(exec));
@@ -56,18 +56,18 @@ static void noop(int fd, unsigned ring)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&exec);
execbuf.buffer_count = 1;
- execbuf.flags = ring;
+ execbuf.flags = flags;
gem_execbuf(fd, &execbuf);
batch_fini(fd, exec.handle);
}
-static void readonly(int fd, unsigned ring)
+static void readonly(int fd, uint64_t flags)
{
struct drm_i915_gem_execbuffer2 *execbuf;
struct drm_i915_gem_exec_object2 exec;
- gem_require_ring(fd, ring);
+ gem_require_ring(fd, flags);
memset(&exec, 0, sizeof(exec));
exec.handle = batch_create(fd);
@@ -77,7 +77,7 @@ static void readonly(int fd, unsigned ring)
execbuf->buffers_ptr = to_user_pointer(&exec);
execbuf->buffer_count = 1;
- execbuf->flags = ring;
+ execbuf->flags = flags;
igt_assert(mprotect(execbuf, 4096, PROT_READ) == 0);
gem_execbuf(fd, execbuf);
@@ -87,13 +87,13 @@ static void readonly(int fd, unsigned ring)
batch_fini(fd, exec.handle);
}
-static void gtt(int fd, unsigned ring)
+static void gtt(int fd, uint64_t flags)
{
struct drm_i915_gem_execbuffer2 *execbuf;
struct drm_i915_gem_exec_object2 *exec;
uint32_t handle;
- gem_require_ring(fd, ring);
+ gem_require_ring(fd, flags);
handle = gem_create(fd, 4096);
@@ -106,7 +106,7 @@ static void gtt(int fd, unsigned ring)
execbuf->buffers_ptr = to_user_pointer(exec);
execbuf->buffer_count = 1;
- execbuf->flags = ring;
+ execbuf->flags = flags;
gem_execbuf(fd, execbuf);
@@ -114,9 +114,33 @@ static void gtt(int fd, unsigned ring)
munmap(execbuf, 4096);
}
+static void all(int i915)
+{
+ const struct intel_execution_engine2 *e;
+
+ __for_each_physical_engine(i915, e)
+ noop(i915, e->flags);
+}
+
+static void readonly_all(int i915)
+{
+ const struct intel_execution_engine2 *e;
+
+ __for_each_physical_engine(i915, e)
+ readonly(i915, e->flags);
+}
+
+static void gtt_all(int i915)
+{
+ const struct intel_execution_engine2 *e;
+
+ __for_each_physical_engine(i915, e)
+ gtt(i915, e->flags);
+}
+
igt_main
{
- const struct intel_execution_engine *e;
+ const struct intel_execution_engine2 *e;
int fd = -1;
igt_fixture {
@@ -126,13 +150,22 @@ igt_main
igt_fork_hang_detector(fd);
}
- for (e = intel_execution_engines; e->name; e++) {
+ igt_subtest("basic-all")
+ all(fd);
+
+ igt_subtest("readonly-all")
+ readonly_all(fd);
+
+ igt_subtest("gtt-all")
+ gtt_all(fd);
+
+ __for_each_physical_engine(fd, e) {
igt_subtest_f("basic-%s", e->name)
- noop(fd, e->exec_id | e->flags);
+ noop(fd, e->flags);
igt_subtest_f("readonly-%s", e->name)
- readonly(fd, e->exec_id | e->flags);
+ readonly(fd, e->flags);
igt_subtest_f("gtt-%s", e->name)
- gtt(fd, e->exec_id | e->flags);
+ gtt(fd, e->flags);
}
igt_fixture {
diff --git a/tests/i915/gem_exec_blt.c b/tests/i915/gem_exec_blt.c
index 00926e55..94de1a82 100644
--- a/tests/i915/gem_exec_blt.c
+++ b/tests/i915/gem_exec_blt.c
@@ -293,7 +293,7 @@ static void set_max_freq(int sysfs)
}
-int main(int argc, char **argv)
+igt_main
{
const struct {
const char *suffix;
@@ -307,8 +307,6 @@ int main(int argc, char **argv)
int min = -1, max = -1;
int fd, sysfs;
- igt_subtest_init(argc, argv);
-
igt_skip_on_simulation();
igt_fixture {
@@ -344,6 +342,4 @@ int main(int argc, char **argv)
close(sysfs);
close(fd);
}
-
- igt_exit();
}
diff --git a/tests/i915/gem_exec_fence.c b/tests/i915/gem_exec_fence.c
index 8120f8b5..0befb54f 100644
--- a/tests/i915/gem_exec_fence.c
+++ b/tests/i915/gem_exec_fence.c
@@ -317,68 +317,22 @@ static void test_fence_busy_all(int fd, unsigned flags)
static void test_fence_await(int fd, unsigned ring, unsigned flags)
{
- const int gen = intel_gen(intel_get_drm_devid(fd));
- struct drm_i915_gem_exec_object2 obj;
- struct drm_i915_gem_relocation_entry reloc;
- struct drm_i915_gem_execbuffer2 execbuf;
uint32_t scratch = gem_create(fd, 4096);
- uint32_t *batch, *out;
+ igt_spin_t *spin;
unsigned engine;
- int fence, i;
+ uint32_t *out;
+ int i;
igt_require(gem_can_store_dword(fd, 0));
- memset(&execbuf, 0, sizeof(execbuf));
- execbuf.buffers_ptr = to_user_pointer(&obj);
- execbuf.buffer_count = 1;
- execbuf.flags = ring | LOCAL_EXEC_FENCE_OUT;
-
- memset(&obj, 0, sizeof(obj));
- obj.handle = gem_create(fd, 4096);
-
- obj.relocs_ptr = to_user_pointer(&reloc);
- obj.relocation_count = 1;
- memset(&reloc, 0, sizeof(reloc));
-
out = gem_mmap__wc(fd, scratch, 0, 4096, PROT_WRITE);
- gem_set_domain(fd, obj.handle,
- I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
-
- batch = gem_mmap__wc(fd, obj.handle, 0, 4096, PROT_WRITE);
- gem_set_domain(fd, obj.handle,
+ gem_set_domain(fd, scratch,
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
- reloc.target_handle = obj.handle; /* recurse */
- reloc.presumed_offset = 0;
- reloc.offset = sizeof(uint32_t);
- reloc.delta = 0;
- reloc.read_domains = I915_GEM_DOMAIN_COMMAND;
- reloc.write_domain = 0;
-
- i = 0;
- batch[i] = MI_BATCH_BUFFER_START;
- if (gen >= 8) {
- batch[i] |= 1 << 8 | 1;
- batch[++i] = 0;
- batch[++i] = 0;
- } else if (gen >= 6) {
- batch[i] |= 1 << 8;
- batch[++i] = 0;
- } else {
- batch[i] |= 2 << 6;
- batch[++i] = 0;
- if (gen < 4) {
- batch[i] |= 1;
- reloc.delta = 1;
- }
- }
- i++;
-
- execbuf.rsvd2 = -1;
- gem_execbuf_wr(fd, &execbuf);
- gem_close(fd, obj.handle);
- fence = execbuf.rsvd2 >> 32;
- igt_assert(fence != -1);
+ spin = igt_spin_new(fd,
+ .engine = ring,
+ .flags = IGT_SPIN_FENCE_OUT);
+ igt_assert(spin->out_fence != -1);
i = 0;
for_each_physical_engine(fd, engine) {
@@ -386,27 +340,24 @@ static void test_fence_await(int fd, unsigned ring, unsigned flags)
continue;
if (flags & NONBLOCK) {
- store(fd, engine, fence, scratch, i);
+ store(fd, engine, spin->out_fence, scratch, i);
} else {
igt_fork(child, 1)
- store(fd, engine, fence, scratch, i);
+ store(fd, engine, spin->out_fence, scratch, i);
}
i++;
}
- close(fence);
sleep(1);
/* Check for invalidly completing the task early */
+ igt_assert(fence_busy(spin->out_fence));
for (int n = 0; n < i; n++)
igt_assert_eq_u32(out[n], 0);
- if ((flags & HANG) == 0) {
- *batch = MI_BATCH_BUFFER_END;
- __sync_synchronize();
- }
- munmap(batch, 4096);
+ if ((flags & HANG) == 0)
+ igt_spin_end(spin);
igt_waitchildren();
@@ -414,6 +365,8 @@ static void test_fence_await(int fd, unsigned ring, unsigned flags)
while (i--)
igt_assert_eq_u32(out[i], i);
munmap(out, 4096);
+
+ igt_spin_free(fd, spin);
gem_close(fd, scratch);
}
diff --git a/tests/i915/gem_exec_parallel.c b/tests/i915/gem_exec_parallel.c
index 7597befb..8a4482e5 100644
--- a/tests/i915/gem_exec_parallel.c
+++ b/tests/i915/gem_exec_parallel.c
@@ -149,7 +149,7 @@ static void *thread(void *data)
return NULL;
}
-static void all(int fd, unsigned engine, unsigned flags)
+static void all(int fd, struct intel_execution_engine2 *engine, unsigned flags)
{
const int gen = intel_gen(intel_get_drm_devid(fd));
pthread_mutex_t mutex;
@@ -170,15 +170,15 @@ static void all(int fd, unsigned engine, unsigned flags)
}
nengine = 0;
- if (engine == ALL_ENGINES) {
- for_each_physical_engine(fd, engine) {
- if (gem_can_store_dword(fd, engine))
- engines[nengine++] = engine;
+ if (!engine) {
+ struct intel_execution_engine2 *e;
+ __for_each_physical_engine(fd, e) {
+ if (gem_class_can_store_dword(fd, e->class))
+ engines[nengine++] = e->flags;
}
} else {
- igt_require(gem_has_ring(fd, engine));
- igt_require(gem_can_store_dword(fd, engine));
- engines[nengine++] = engine;
+ igt_require(gem_class_can_store_dword(fd, engine->class));
+ engines[nengine++] = engine->flags;
}
igt_require(nengine);
@@ -229,6 +229,8 @@ static void all(int fd, unsigned engine, unsigned flags)
igt_main
{
+ struct intel_execution_engine2 *e;
+
const struct mode {
const char *name;
unsigned flags;
@@ -249,16 +251,16 @@ igt_main
for (const struct mode *m = modes; m->name; m++)
igt_subtest_f("%s", *m->name ? m->name : "basic")
- all(fd, ALL_ENGINES, m->flags);
+ /* NULL value means all engines */
+ all(fd, NULL, m->flags);
- for (const struct intel_execution_engine *e = intel_execution_engines;
- e->name; e++) {
+ __for_each_physical_engine(fd, e) {
for (const struct mode *m = modes; m->name; m++)
igt_subtest_f("%s%s%s",
e->name,
*m->name ? "-" : "",
m->name)
- all(fd, e->exec_id | e->flags, m->flags);
+ all(fd, e, m->flags);
}
igt_fixture {
diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
index 330e8a54..7b418622 100644
--- a/tests/i915/gem_exec_schedule.c
+++ b/tests/i915/gem_exec_schedule.c
@@ -234,7 +234,7 @@ static void independent(int fd, unsigned int engine)
} else {
struct drm_i915_gem_execbuffer2 eb = {
.buffer_count = 1,
- .buffers_ptr = to_user_pointer(&spin->obj[1]),
+ .buffers_ptr = to_user_pointer(&spin->obj[IGT_SPIN_BATCH]),
.flags = other,
};
gem_execbuf(fd, &eb);
@@ -507,6 +507,7 @@ static void semaphore_resolve(int i915)
uint32_t handle, cancel;
uint32_t *cs, *map;
igt_spin_t *spin;
+ int64_t poke = 1;
if (!gem_can_store_dword(i915, engine))
continue;
@@ -536,7 +537,8 @@ static void semaphore_resolve(int i915)
/* Then cancel the spinner */
*cs++ = MI_STORE_DWORD_IMM;
- *cs++ = spin->obj[1].offset + offset_in_page(spin->condition);
+ *cs++ = spin->obj[IGT_SPIN_BATCH].offset +
+ offset_in_page(spin->condition);
*cs++ = 0;
*cs++ = MI_BATCH_BUFFER_END;
@@ -547,7 +549,7 @@ static void semaphore_resolve(int i915)
/* First up is our spinning semaphore */
memset(obj, 0, sizeof(obj));
- obj[0] = spin->obj[1];
+ obj[0] = spin->obj[IGT_SPIN_BATCH];
obj[1].handle = semaphore;
obj[1].offset = SEMAPHORE_ADDR;
obj[1].flags = EXEC_OBJECT_PINNED;
@@ -561,7 +563,7 @@ static void semaphore_resolve(int i915)
memset(obj, 0, sizeof(obj));
obj[0].handle = handle;
obj[0].flags = EXEC_OBJECT_WRITE; /* always after semaphore */
- obj[1] = spin->obj[1];
+ obj[1] = spin->obj[IGT_SPIN_BATCH];
eb.buffer_count = 2;
eb.rsvd1 = 0;
gem_execbuf(i915, &eb);
@@ -587,6 +589,7 @@ static void semaphore_resolve(int i915)
eb.buffer_count = 2;
eb.rsvd1 = inner;
gem_execbuf(i915, &eb);
+ gem_wait(i915, cancel, &poke); /* match sync's WAIT_PRIORITY */
gem_close(i915, cancel);
gem_sync(i915, handle); /* To hang unless cancel runs! */
@@ -636,11 +639,13 @@ static void semaphore_noskip(int i915)
/* Cancel the following spinner */
*cs++ = MI_STORE_DWORD_IMM;
if (gen >= 8) {
- *cs++ = spin->obj[1].offset + offset_in_page(spin->condition);
+ *cs++ = spin->obj[IGT_SPIN_BATCH].offset +
+ offset_in_page(spin->condition);
*cs++ = 0;
} else {
*cs++ = 0;
- *cs++ = spin->obj[1].offset + offset_in_page(spin->condition);
+ *cs++ = spin->obj[IGT_SPIN_BATCH].offset +
+ offset_in_page(spin->condition);
}
*cs++ = MI_BATCH_BUFFER_END;
@@ -649,9 +654,9 @@ static void semaphore_noskip(int i915)
/* port0: implicit semaphore from engine */
memset(obj, 0, sizeof(obj));
- obj[0] = chain->obj[1];
+ obj[0] = chain->obj[IGT_SPIN_BATCH];
obj[0].flags |= EXEC_OBJECT_WRITE;
- obj[1] = spin->obj[1];
+ obj[1] = spin->obj[IGT_SPIN_BATCH];
obj[2].handle = handle;
memset(&eb, 0, sizeof(eb));
eb.buffer_count = 3;
@@ -664,7 +669,7 @@ static void semaphore_noskip(int i915)
memset(obj, 0, sizeof(obj));
obj[0].handle = handle;
obj[0].flags = EXEC_OBJECT_WRITE;
- obj[1] = spin->obj[1];
+ obj[1] = spin->obj[IGT_SPIN_BATCH];
memset(&eb, 0, sizeof(eb));
eb.buffer_count = 2;
eb.buffers_ptr = to_user_pointer(obj);
@@ -840,7 +845,7 @@ static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
} else {
struct drm_i915_gem_execbuffer2 eb = {
.buffer_count = 1,
- .buffers_ptr = to_user_pointer(&spin->obj[1]),
+ .buffers_ptr = to_user_pointer(&spin->obj[IGT_SPIN_BATCH]),
.rsvd1 = ctx,
.flags = other,
};
diff --git a/tests/i915/gem_exec_store.c b/tests/i915/gem_exec_store.c
index a7673489..b74c3d84 100644
--- a/tests/i915/gem_exec_store.c
+++ b/tests/i915/gem_exec_store.c
@@ -37,7 +37,7 @@
#define ENGINE_MASK (I915_EXEC_RING_MASK | LOCAL_I915_EXEC_BSD_MASK)
-static void store_dword(int fd, unsigned ring)
+static void store_dword(int fd, const struct intel_execution_engine2 *e)
{
const int gen = intel_gen(intel_get_drm_devid(fd));
struct drm_i915_gem_exec_object2 obj[2];
@@ -46,14 +46,13 @@ static void store_dword(int fd, unsigned ring)
uint32_t batch[16];
int i;
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
intel_detect_and_clear_missed_interrupts(fd);
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
- execbuf.flags = ring;
+ execbuf.flags = e->flags;
if (gen > 3 && gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
@@ -97,7 +96,8 @@ static void store_dword(int fd, unsigned ring)
}
#define PAGES 1
-static void store_cachelines(int fd, unsigned ring, unsigned int flags)
+static void store_cachelines(int fd, const struct intel_execution_engine2 *e,
+ unsigned int flags)
{
const int gen = intel_gen(intel_get_drm_devid(fd));
struct drm_i915_gem_exec_object2 *obj;
@@ -110,13 +110,12 @@ static void store_cachelines(int fd, unsigned ring, unsigned int flags)
reloc = calloc(NCACHELINES, sizeof(*reloc));
igt_assert(reloc);
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
intel_detect_and_clear_missed_interrupts(fd);
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffer_count = flags & PAGES ? NCACHELINES + 1 : 2;
- execbuf.flags = ring;
+ execbuf.flags = e->flags;
if (gen > 3 && gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
@@ -180,12 +179,13 @@ static void store_all(int fd)
{
const int gen = intel_gen(intel_get_drm_devid(fd));
struct drm_i915_gem_exec_object2 obj[2];
+ struct intel_execution_engine2 *engine;
struct drm_i915_gem_relocation_entry reloc[32];
struct drm_i915_gem_execbuffer2 execbuf;
unsigned engines[16], permuted[16];
uint32_t batch[16];
uint64_t offset;
- unsigned engine, nengine;
+ unsigned nengine;
int value;
int i, j;
@@ -220,14 +220,14 @@ static void store_all(int fd)
nengine = 0;
intel_detect_and_clear_missed_interrupts(fd);
- for_each_physical_engine(fd, engine) {
- if (!gem_can_store_dword(fd, engine))
+ __for_each_physical_engine(fd, engine) {
+ if (!gem_class_can_store_dword(fd, engine->class))
continue;
igt_assert(2*(nengine+1)*sizeof(batch) <= 4096);
execbuf.flags &= ~ENGINE_MASK;
- execbuf.flags |= engine;
+ execbuf.flags |= engine->flags;
j = 2*nengine;
reloc[j].target_handle = obj[0].handle;
@@ -259,7 +259,7 @@ static void store_all(int fd)
execbuf.batch_start_offset = j*sizeof(batch);
gem_execbuf(fd, &execbuf);
- engines[nengine++] = engine;
+ engines[nengine++] = engine->flags;
}
gem_sync(fd, obj[1].handle);
@@ -311,7 +311,7 @@ static int print_welcome(int fd)
igt_main
{
- const struct intel_execution_engine *e;
+ const struct intel_execution_engine2 *e;
int fd;
igt_fixture {
@@ -329,15 +329,15 @@ igt_main
igt_fork_hang_detector(fd);
}
- for (e = intel_execution_engines; e->name; e++) {
+ __for_each_physical_engine(fd, e) {
igt_subtest_f("basic-%s", e->name)
- store_dword(fd, e->exec_id | e->flags);
+ store_dword(fd, e);
igt_subtest_f("cachelines-%s", e->name)
- store_cachelines(fd, e->exec_id | e->flags, 0);
+ store_cachelines(fd, e, 0);
igt_subtest_f("pages-%s", e->name)
- store_cachelines(fd, e->exec_id | e->flags, PAGES);
+ store_cachelines(fd, e, PAGES);
}
igt_subtest("basic-all")
diff --git a/tests/i915/gem_exec_whisper.c b/tests/i915/gem_exec_whisper.c
index 6c3b5375..de7a14da 100644
--- a/tests/i915/gem_exec_whisper.c
+++ b/tests/i915/gem_exec_whisper.c
@@ -44,15 +44,6 @@
#define VERIFY 0
-static void write_seqno(int dir, unsigned offset)
-{
- uint32_t seqno = UINT32_MAX - offset;
-
- igt_sysfs_printf(dir, "i915_next_seqno", "0x%x", seqno);
-
- igt_debug("next seqno set to: 0x%x\n", seqno);
-}
-
static void check_bo(int fd, uint32_t handle, int pass)
{
uint32_t *map;
@@ -87,6 +78,8 @@ static void verify_reloc(int fd, uint32_t handle,
#define HANG 0x20
#define SYNC 0x40
#define PRIORITY 0x80
+#define ALL 0x100
+#define QUEUES 0x200
struct hang {
struct drm_i915_gem_exec_object2 obj;
@@ -171,7 +164,7 @@ static void ctx_set_random_priority(int fd, uint32_t ctx)
{
int prio = hars_petruska_f54_1_random_unsafe_max(1024) - 512;
gem_context_set_priority(fd, ctx, prio);
-};
+}
static void whisper(int fd, unsigned engine, unsigned flags)
{
@@ -198,6 +191,7 @@ static void whisper(int fd, unsigned engine, unsigned flags)
uint64_t old_offset;
int i, n, loc;
int debugfs;
+ int nchild;
if (flags & PRIORITY) {
igt_require(gem_scheduler_enabled(fd));
@@ -214,6 +208,7 @@ static void whisper(int fd, unsigned engine, unsigned flags)
engines[nengine++] = engine;
}
} else {
+ igt_assert(!(flags & ALL));
igt_require(gem_has_ring(fd, engine));
igt_require(gem_can_store_dword(fd, engine));
engines[nengine++] = engine;
@@ -226,14 +221,28 @@ static void whisper(int fd, unsigned engine, unsigned flags)
if (flags & CONTEXTS)
gem_require_contexts(fd);
+ if (flags & QUEUES)
+ igt_require(gem_has_queues(fd));
+
if (flags & HANG)
init_hang(&hang);
+ nchild = 1;
+ if (flags & FORKED)
+ nchild *= sysconf(_SC_NPROCESSORS_ONLN);
+ if (flags & ALL)
+ nchild *= nengine;
+
intel_detect_and_clear_missed_interrupts(fd);
gpu_power_read(&power, &sample[0]);
- igt_fork(child, flags & FORKED ? sysconf(_SC_NPROCESSORS_ONLN) : 1) {
+ igt_fork(child, nchild) {
unsigned int pass;
+ if (flags & ALL) {
+ engines[0] = engines[child % nengine];
+ nengine = 1;
+ }
+
memset(&scratch, 0, sizeof(scratch));
scratch.handle = gem_create(fd, 4096);
scratch.flags = EXEC_OBJECT_WRITE;
@@ -290,6 +299,10 @@ static void whisper(int fd, unsigned engine, unsigned flags)
for (n = 0; n < 64; n++)
contexts[n] = gem_context_create(fd);
}
+ if (flags & QUEUES) {
+ for (n = 0; n < 64; n++)
+ contexts[n] = gem_queue_create(fd);
+ }
if (flags & FDS) {
for (n = 0; n < 64; n++)
fds[n] = drm_open_driver(DRIVER_INTEL);
@@ -333,9 +346,6 @@ static void whisper(int fd, unsigned engine, unsigned flags)
igt_until_timeout(150) {
uint64_t offset;
- if (!(flags & FORKED))
- write_seqno(debugfs, pass);
-
if (flags & HANG)
submit_hang(&hang, engines, nengine, flags);
@@ -374,8 +384,8 @@ static void whisper(int fd, unsigned engine, unsigned flags)
gem_write(fd, batches[1023].handle, loc, &pass, sizeof(pass));
for (n = 1024; --n >= 1; ) {
+ uint32_t handle[2] = {};
int this_fd = fd;
- uint32_t handle[2];
execbuf.buffers_ptr = to_user_pointer(&batches[n-1]);
reloc_migrations += batches[n-1].offset != inter[n].presumed_offset;
@@ -403,7 +413,7 @@ static void whisper(int fd, unsigned engine, unsigned flags)
execbuf.flags &= ~ENGINE_MASK;
execbuf.flags |= engines[rand() % nengine];
}
- if (flags & CONTEXTS) {
+ if (flags & (CONTEXTS | QUEUES)) {
execbuf.rsvd1 = contexts[rand() % 64];
if (flags & PRIORITY)
ctx_set_random_priority(this_fd, execbuf.rsvd1);
@@ -486,7 +496,7 @@ static void whisper(int fd, unsigned engine, unsigned flags)
for (n = 0; n < 64; n++)
close(fds[n]);
}
- if (flags & CONTEXTS) {
+ if (flags & (CONTEXTS | QUEUES)) {
for (n = 0; n < 64; n++)
gem_context_destroy(fd, contexts[n]);
}
@@ -522,21 +532,27 @@ igt_main
{ "chain-forked", CHAIN | FORKED },
{ "chain-interruptible", CHAIN | INTERRUPTIBLE },
{ "chain-sync", CHAIN | SYNC },
- { "contexts", CONTEXTS },
- { "contexts-interruptible", CONTEXTS | INTERRUPTIBLE},
- { "contexts-forked", CONTEXTS | FORKED},
- { "contexts-priority", CONTEXTS | FORKED | PRIORITY },
- { "contexts-chain", CONTEXTS | CHAIN },
- { "contexts-sync", CONTEXTS | SYNC },
{ "fds", FDS },
{ "fds-interruptible", FDS | INTERRUPTIBLE},
{ "fds-forked", FDS | FORKED},
{ "fds-priority", FDS | FORKED | PRIORITY },
{ "fds-chain", FDS | CHAIN},
{ "fds-sync", FDS | SYNC},
+ { "contexts", CONTEXTS },
+ { "contexts-interruptible", CONTEXTS | INTERRUPTIBLE},
+ { "contexts-forked", CONTEXTS | FORKED},
+ { "contexts-priority", CONTEXTS | FORKED | PRIORITY },
+ { "contexts-chain", CONTEXTS | CHAIN },
+ { "contexts-sync", CONTEXTS | SYNC },
+ { "queues", QUEUES },
+ { "queues-interruptible", QUEUES | INTERRUPTIBLE},
+ { "queues-forked", QUEUES | FORKED},
+ { "queues-priority", QUEUES | FORKED | PRIORITY },
+ { "queues-chain", QUEUES | CHAIN },
+ { "queues-sync", QUEUES | SYNC },
{ NULL }
};
- int fd;
+ int fd = -1;
igt_fixture {
fd = drm_open_driver_master(DRIVER_INTEL);
@@ -547,9 +563,12 @@ igt_main
igt_fork_hang_detector(fd);
}
- for (const struct mode *m = modes; m->name; m++)
+ for (const struct mode *m = modes; m->name; m++) {
igt_subtest_f("%s", m->name)
whisper(fd, ALL_ENGINES, m->flags);
+ igt_subtest_f("%s-all", m->name)
+ whisper(fd, ALL_ENGINES, m->flags | ALL);
+ }
for (const struct intel_execution_engine *e = intel_execution_engines;
e->name; e++) {
diff --git a/tests/i915/gem_gtt_speed.c b/tests/i915/gem_gtt_speed.c
index 3d726c4e..dfa7216c 100644
--- a/tests/i915/gem_gtt_speed.c
+++ b/tests/i915/gem_gtt_speed.c
@@ -86,26 +86,35 @@ static void streaming_load(void *src, int len)
}
#endif
-int main(int argc, char **argv)
+int size = OBJECT_SIZE;
+
+static int opt_handler(int opt, int opt_index, void *data)
+{
+ switch (opt) {
+ case 's':
+ size = atoi(optarg);
+ break;
+ default:
+ return IGT_OPT_HANDLER_ERROR;
+ }
+
+ return IGT_OPT_HANDLER_SUCCESS;
+}
+
+const char *help_str = " -s\tObject size in bytes\n";
+
+igt_simple_main_args("s:", NULL, help_str, opt_handler, NULL)
{
struct timeval start, end;
uint8_t *buf;
uint32_t handle;
unsigned cpu = x86_64_features();
- int size = OBJECT_SIZE;
int loop, i, tiling;
int fd;
- igt_simple_init(argc, argv);
-
igt_skip_on_simulation();
- if (argc > 1)
- size = atoi(argv[1]);
- if (size == 0) {
- igt_warn("Invalid object size specified\n");
- return 1;
- }
+ igt_assert_f(size != 0, "Invalid object size specified\n");
if (cpu) {
char str[1024];
@@ -505,6 +514,4 @@ int main(int argc, char **argv)
gem_close(fd, handle);
close(fd);
-
- igt_exit();
}
diff --git a/tests/i915/gem_hang.c b/tests/i915/gem_hang.c
index f506fc70..2c61cac0 100644
--- a/tests/i915/gem_hang.c
+++ b/tests/i915/gem_hang.c
@@ -64,17 +64,24 @@ gpu_hang(void)
intel_batchbuffer_flush(batch);
}
-int main(int argc, char **argv)
+static int opt_handler(int opt, int opt_index, void *data)
{
- int fd;
-
- igt_simple_init(argc, argv);
+ switch (opt) {
+ case 'p':
+ bad_pipe = atoi(optarg);
+ break;
+ default:
+ return IGT_OPT_HANDLER_ERROR;
+ }
+
+ return IGT_OPT_HANDLER_SUCCESS;
+}
- igt_assert_f(argc == 2,
- "usage: %s <disabled pipe number>\n",
- argv[0]);
+const char *help_str = " -p\tDisabled pipe number\n";
- bad_pipe = atoi(argv[1]);
+igt_simple_main_args("p:", NULL, help_str, opt_handler, NULL)
+{
+ int fd;
fd = drm_open_driver(DRIVER_INTEL);
@@ -88,6 +95,4 @@ int main(int argc, char **argv)
drm_intel_bufmgr_destroy(bufmgr);
close(fd);
-
- igt_exit();
}
diff --git a/tests/i915/gem_linear_blits.c b/tests/i915/gem_linear_blits.c
index 6afa4e9c..a5359288 100644
--- a/tests/i915/gem_linear_blits.c
+++ b/tests/i915/gem_linear_blits.c
@@ -220,12 +220,10 @@ static void run_test(int fd, int count)
#define MAX_32b ((1ull << 32) - 4096)
-int main(int argc, char **argv)
+igt_main
{
int fd = 0;
- igt_subtest_init(argc, argv);
-
igt_fixture {
fd = drm_open_driver(DRIVER_INTEL);
igt_require_gem(fd);
@@ -261,6 +259,4 @@ int main(int argc, char **argv)
run_test(fd, count);
igt_stop_signal_helper();
}
-
- igt_exit();
}
diff --git a/tests/i915/gem_mmap.c b/tests/i915/gem_mmap.c
index d1b10013..f8cf052e 100644
--- a/tests/i915/gem_mmap.c
+++ b/tests/i915/gem_mmap.c
@@ -158,21 +158,22 @@ igt_main
igt_subtest("bad-object") {
uint32_t real_handle = gem_create(fd, 4096);
uint32_t handles[20];
- int i = 0;
+ size_t i = 0, len;
handles[i++] = 0xdeadbeef;
for(int bit = 0; bit < 16; bit++)
handles[i++] = real_handle | (1 << (bit + 16));
- handles[i] = real_handle + 1;
+ handles[i++] = real_handle + 1;
+ len = i;
- for (; i < 0; i--) {
+ for (i = 0; i < len; ++i) {
struct drm_i915_gem_mmap arg = {
.handle = handles[i],
.size = 4096,
};
igt_debug("Trying MMAP IOCTL with handle %x\n", handles[i]);
- igt_assert_eq(mmap_ioctl(fd, &arg), -EINVAL);
+ igt_assert_eq(mmap_ioctl(fd, &arg), -ENOENT);
}
gem_close(fd, real_handle);
diff --git a/tests/i915/gem_mmap_gtt.c b/tests/i915/gem_mmap_gtt.c
index 9a670f03..6f3a9c36 100644
--- a/tests/i915/gem_mmap_gtt.c
+++ b/tests/i915/gem_mmap_gtt.c
@@ -656,6 +656,9 @@ test_huge_copy(int fd, int huge, int tiling_a, int tiling_b, int ncpus)
uint64_t huge_object_size, i;
unsigned mode = CHECK_RAM;
+ igt_fail_on_f(intel_gen(devid) >= 11 && ncpus > 1,
+ "Please adjust your expectations, https://bugs.freedesktop.org/show_bug.cgi?id=110882\n");
+
switch (huge) {
case -2:
huge_object_size = gem_mappable_aperture_size() / 4;
@@ -886,14 +889,15 @@ igt_main
igt_subtest("bad-object") {
uint32_t real_handle = gem_create(fd, 4096);
uint32_t handles[20];
- int i = 0;
+ size_t i = 0, len;
handles[i++] = 0xdeadbeef;
for(int bit = 0; bit < 16; bit++)
handles[i++] = real_handle | (1 << (bit + 16));
- handles[i] = real_handle + 1;
+ handles[i++] = real_handle + 1;
+ len = i;
- for (; i < 0; i--) {
+ for (i = 0; i < len; ++i) {
struct drm_i915_gem_mmap_gtt arg = {
.handle = handles[i],
};
diff --git a/tests/i915/gem_mmap_wc.c b/tests/i915/gem_mmap_wc.c
index 159eedbf..c9e5bf23 100644
--- a/tests/i915/gem_mmap_wc.c
+++ b/tests/i915/gem_mmap_wc.c
@@ -496,20 +496,21 @@ igt_main
igt_subtest("bad-object") {
uint32_t real_handle = gem_create(fd, 4096);
uint32_t handles[20];
- int i = 0;
+ size_t i = 0, len;
handles[i++] = 0xdeadbeef;
for(int bit = 0; bit < 16; bit++)
handles[i++] = real_handle | (1 << (bit + 16));
- handles[i] = real_handle + 1;
+ handles[i++] = real_handle + 1;
+ len = i;
- for (; i < 0; i--) {
+ for (i = 0; i < len; ++i) {
struct drm_i915_gem_mmap arg = {
.handle = handles[i],
.size = 4096,
.flags = I915_MMAP_WC,
};
- igt_assert_eq(mmap_ioctl(fd, &arg), -EINVAL);
+ igt_assert_eq(mmap_ioctl(fd, &arg), -ENOENT);
}
gem_close(fd, real_handle);
diff --git a/tests/i915/gem_ppgtt.c b/tests/i915/gem_ppgtt.c
index ae9869c2..0d40a7b7 100644
--- a/tests/i915/gem_ppgtt.c
+++ b/tests/i915/gem_ppgtt.c
@@ -289,51 +289,9 @@ static void flink_and_close(void)
close(fd2);
}
-static void flink_and_exit(void)
-{
- uint32_t fd, fd2, fd3;
- uint32_t bo, flinked_bo, name;
- char match[20];
-
- fd = drm_open_driver(DRIVER_INTEL);
- igt_require(gem_uses_full_ppgtt(fd));
-
- bo = gem_create(fd, 4096);
- name = gem_flink(fd, bo);
- snprintf(match, sizeof(match), "(name: %u)", name);
-
- fd2 = drm_open_driver(DRIVER_INTEL);
- flinked_bo = gem_open(fd2, name);
-
- /* Verify VMA is not there yet. */
- igt_assert(!igt_debugfs_search(fd, "i915_gem_gtt", match));
-
- exec_and_get_offset(fd2, flinked_bo);
-
- /* Verify VMA has been created. */
- igt_assert(igt_debugfs_search(fd, "i915_gem_gtt", match));
-
- /* Close the context. */
- close(fd2);
-
- /* Execute a different and unrelated (wrt object sharing) context to
- * ensure engine drops its last context reference.
- */
- fd3 = drm_open_driver(DRIVER_INTEL);
- exec_and_get_offset(fd3, gem_create(fd3, 4096));
- close(fd3);
-
- igt_drop_caches_set(fd, DROP_ACTIVE | DROP_RETIRE | DROP_IDLE);
- igt_assert(!igt_debugfs_search(fd, "i915_gem_gtt", match));
-
- close(fd);
-}
-
#define N_CHILD 8
-int main(int argc, char **argv)
+igt_main
{
- igt_subtest_init(argc, argv);
-
igt_fixture {
int fd = drm_open_driver(DRIVER_INTEL);
igt_require_gem(fd);
@@ -366,9 +324,4 @@ int main(int argc, char **argv)
igt_subtest("flink-and-close-vma-leak")
flink_and_close();
-
- igt_subtest("flink-and-exit-vma-leak")
- flink_and_exit();
-
- igt_exit();
}
diff --git a/tests/i915/gem_pread.c b/tests/i915/gem_pread.c
index 83d878ee..c6478765 100644
--- a/tests/i915/gem_pread.c
+++ b/tests/i915/gem_pread.c
@@ -114,10 +114,25 @@ uint32_t *src, dst;
uint32_t *dst_user, src_stolen, large_stolen;
uint32_t *stolen_pf_user, *stolen_nopf_user;
int fd, count;
+int object_size = 0;
-int main(int argc, char **argv)
+static int opt_handler(int opt, int opt_index, void *data)
+{
+ switch (opt) {
+ case 's':
+ object_size = atoi(optarg);
+ break;
+ default:
+ return IGT_OPT_HANDLER_ERROR;
+ }
+
+ return IGT_OPT_HANDLER_SUCCESS;
+}
+
+const char *help_str = " -s\tObject size in bytes\n";
+
+igt_main_args("s:", NULL, help_str, opt_handler, NULL)
{
- int object_size = 0;
double usecs;
char buf[100];
const char* bps;
@@ -131,10 +146,6 @@ int main(int argc, char **argv)
{ -1 },
}, *c;
- igt_subtest_init(argc, argv);
-
- if (argc > 1 && atoi(argv[1]))
- object_size = atoi(argv[1]);
if (object_size == 0)
object_size = OBJECT_SIZE;
object_size = (object_size + 3) & -4;
@@ -278,6 +289,4 @@ int main(int argc, char **argv)
close(fd);
}
-
- igt_exit();
}
diff --git a/tests/i915/gem_pwrite.c b/tests/i915/gem_pwrite.c
index 3fd0ef66..97703a2a 100644
--- a/tests/i915/gem_pwrite.c
+++ b/tests/i915/gem_pwrite.c
@@ -240,10 +240,25 @@ static void test_big_gtt(int fd, int scale, unsigned flags)
uint32_t *src, dst;
uint32_t *src_user, dst_stolen;
int fd;
+int object_size = 0;
-int main(int argc, char **argv)
+static int opt_handler(int opt, int opt_index, void *data)
+{
+ switch (opt) {
+ case 's':
+ object_size = atoi(optarg);
+ break;
+ default:
+ return IGT_OPT_HANDLER_ERROR;
+ }
+
+ return IGT_OPT_HANDLER_SUCCESS;
+}
+
+const char *help_str = " -s\tObject size in bytes\n";
+
+igt_main_args("s:", NULL, help_str, opt_handler, NULL)
{
- int object_size = 0;
double usecs;
const char* bps;
char buf[100];
@@ -258,10 +273,6 @@ int main(int argc, char **argv)
{ -1 },
}, *c;
- igt_subtest_init(argc, argv);
-
- if (argc > 1 && atoi(argv[1]))
- object_size = atoi(argv[1]);
if (object_size == 0)
object_size = OBJECT_SIZE;
object_size = (object_size + 3) & -4;
@@ -388,6 +399,4 @@ int main(int argc, char **argv)
igt_fixture
close(fd);
-
- igt_exit();
}
diff --git a/tests/i915/gem_pwrite_pread.c b/tests/i915/gem_pwrite_pread.c
index f91fc7c4..3a58eae6 100644
--- a/tests/i915/gem_pwrite_pread.c
+++ b/tests/i915/gem_pwrite_pread.c
@@ -252,18 +252,30 @@ static const char *bytes_per_sec(char *buf, double v)
uint32_t *tmp, src, dst;
int fd;
+int object_size = 0;
-int main(int argc, char **argv)
+static int opt_handler(int opt, int opt_index, void *data)
+{
+ switch (opt) {
+ case 's':
+ object_size = atoi(optarg);
+ break;
+ default:
+ return IGT_OPT_HANDLER_ERROR;
+ }
+
+ return IGT_OPT_HANDLER_SUCCESS;
+}
+
+const char *help_str = " -s\tObject size in bytes\n";
+
+igt_main_args("s:", NULL, help_str, opt_handler, NULL)
{
- int object_size = 0;
uint32_t buf[20];
int count;
- igt_subtest_init(argc, argv);
igt_skip_on_simulation();
- if (argc > 1)
- object_size = atoi(argv[1]);
if (object_size == 0)
object_size = OBJECT_SIZE;
object_size = (object_size + 3) & -4;
@@ -405,6 +417,4 @@ int main(int argc, char **argv)
close(fd);
}
-
- igt_exit();
}
diff --git a/tests/i915/gem_render_copy.c b/tests/i915/gem_render_copy.c
index b5d1f45f..b8149483 100644
--- a/tests/i915/gem_render_copy.c
+++ b/tests/i915/gem_render_copy.c
@@ -678,24 +678,29 @@ static void test(data_t *data, uint32_t tiling, uint64_t ccs_modifier)
static int opt_handler(int opt, int opt_index, void *data)
{
- if (opt == 'd') {
+ switch (opt) {
+ case 'd':
opt_dump_png = true;
- }
-
- if (opt == 'a') {
+ break;
+ case 'a':
check_all_pixels = true;
+ break;
+ default:
+ return IGT_OPT_HANDLER_ERROR;
}
- return 0;
+ return IGT_OPT_HANDLER_SUCCESS;
}
-int main(int argc, char **argv)
+const char *help_str =
+ " -d\tDump PNG\n"
+ " -a\tCheck all pixels\n"
+ ;
+
+igt_main_args("da", NULL, help_str, opt_handler, NULL)
{
data_t data = {0, };
- igt_subtest_init_parse_opts(&argc, argv, "da", NULL, NULL,
- opt_handler, NULL);
-
igt_fixture {
data.drm_fd = drm_open_driver_render(DRIVER_INTEL);
data.devid = intel_get_drm_devid(data.drm_fd);
@@ -743,6 +748,4 @@ int main(int argc, char **argv)
intel_batchbuffer_free(data.batch);
drm_intel_bufmgr_destroy(data.bufmgr);
}
-
- igt_exit();
}
diff --git a/tests/i915/gem_render_copy_redux.c b/tests/i915/gem_render_copy_redux.c
index 24b838ba..ef601c22 100644
--- a/tests/i915/gem_render_copy_redux.c
+++ b/tests/i915/gem_render_copy_redux.c
@@ -202,12 +202,10 @@ static void copy_flink(data_t *data)
data_fini(&local);
}
-int main(int argc, char **argv)
+igt_main
{
data_t data = {0, };
- igt_subtest_init(argc, argv);
-
igt_fixture {
data_init(&data);
igt_require_gem(data.fd);
@@ -240,6 +238,4 @@ int main(int argc, char **argv)
copy_flink(&data);
igt_stop_signal_helper();
}
-
- igt_exit();
}
diff --git a/tests/i915/gem_request_retire.c b/tests/i915/gem_request_retire.c
index ea1c7327..304882e2 100644
--- a/tests/i915/gem_request_retire.c
+++ b/tests/i915/gem_request_retire.c
@@ -220,10 +220,8 @@ test_retire_vma_not_inactive(int fd)
int fd;
-int main(int argc, char **argv)
+igt_main
{
- igt_subtest_init(argc, argv);
-
igt_fixture {
fd = drm_open_driver(DRIVER_INTEL);
igt_require_gem(fd);
@@ -233,6 +231,4 @@ int main(int argc, char **argv)
igt_subtest("retire-vma-not-inactive")
test_retire_vma_not_inactive(fd);
-
- igt_exit();
}
diff --git a/tests/i915/gem_softpin.c b/tests/i915/gem_softpin.c
index 336008b8..42650e04 100644
--- a/tests/i915/gem_softpin.c
+++ b/tests/i915/gem_softpin.c
@@ -360,7 +360,7 @@ static void test_evict_hang(int fd)
execbuf.buffer_count = 1;
hang = igt_hang_ctx(fd, 0, 0, 0);
- expected = hang.spin->obj[1].offset;
+ expected = hang.spin->obj[IGT_SPIN_BATCH].offset;
/* Replace the hung batch with ourselves, forcing an eviction */
object.offset = expected;
diff --git a/tests/i915/gem_spin_batch.c b/tests/i915/gem_spin_batch.c
index a92672b8..3b4f9073 100644
--- a/tests/i915/gem_spin_batch.c
+++ b/tests/i915/gem_spin_batch.c
@@ -79,7 +79,7 @@ static void spin_resubmit(int fd, unsigned int engine, unsigned int flags)
struct drm_i915_gem_execbuffer2 eb = {
.buffer_count = 1,
- .buffers_ptr = to_user_pointer(&spin->obj[1]),
+ .buffers_ptr = to_user_pointer(&spin->obj[IGT_SPIN_BATCH]),
.rsvd1 = ctx1,
};
@@ -98,7 +98,7 @@ static void spin_resubmit(int fd, unsigned int engine, unsigned int flags)
igt_spin_end(spin);
- gem_sync(fd, spin->obj[1].handle);
+ gem_sync(fd, spin->handle);
igt_spin_free(fd, spin);
diff --git a/tests/i915/gem_stress.c b/tests/i915/gem_stress.c
index ef8316f2..57e2909c 100644
--- a/tests/i915/gem_stress.c
+++ b/tests/i915/gem_stress.c
@@ -112,13 +112,30 @@ struct option_struct {
int use_signal_helper;
};
-struct option_struct options;
-
#define MAX_BUFS 4096
#define SCRATCH_BUF_SIZE 1024*1024
#define BUSY_BUF_SIZE (256*4096)
#define TILE_BYTES(size) ((size)*(size)*sizeof(uint32_t))
+struct option_struct options = {
+ .scratch_buf_size = BUSY_BUF_SIZE,
+ .no_hw = 0,
+ .use_signal_helper = 1,
+ .gpu_busy_load = 0,
+ .num_buffers = 0,
+ .trace_tile = -1,
+ .use_render = 1,
+ .use_blt = 1,
+ .forced_tiling = -1,
+ .use_cpu_maps = 0,
+ .total_rounds = 512,
+ .fail = 1,
+ .ducttape = 1,
+ .tile_size = 16,
+ .tiles_per_buf = BUSY_BUF_SIZE / TILE_BYTES(16),
+ .check_render_cpyfn = 0,
+};
+
static struct igt_buf buffers[2][MAX_BUFS];
/* tile i is at logical position tile_permutation[i] */
static unsigned *tile_permutation;
@@ -627,93 +644,95 @@ static int parse_options(int opt, int opt_index, void *data)
int tmp;
switch(opt) {
- case 'd':
- options.no_hw = 1;
- igt_info("no-hw debug mode\n");
- break;
- case 'S':
- options.use_signal_helper = 0;
- igt_info("disabling that pesky nuisance who keeps interrupting us\n");
- break;
- case 's':
- tmp = atoi(optarg);
- if (tmp < options.tile_size*8192)
- igt_info("scratch buffer size needs to be at least %i\n", options.tile_size * 8192);
- else if (tmp & (tmp - 1)) {
- igt_info("scratch buffer size needs to be a power-of-two\n");
- } else {
- igt_info("fixed scratch buffer size to %u\n", tmp);
- options.scratch_buf_size = tmp;
- sanitize_tiles_per_buf();
- }
- break;
- case 'g':
- tmp = atoi(optarg);
- if (tmp < 0 || tmp > 10)
- igt_info("gpu busy load needs to be bigger than 0 and smaller than 10\n");
- else {
- igt_info("gpu busy load factor set to %i\n", tmp);
- gpu_busy_load = options.gpu_busy_load = tmp;
- }
- break;
- case 'c':
- options.num_buffers = atoi(optarg);
- igt_info("buffer count set to %i\n", options.num_buffers);
- break;
- case 't':
- options.trace_tile = atoi(optarg);
- igt_info("tracing tile %i\n", options.trace_tile);
- break;
- case 'r':
- options.use_render = 0;
- igt_info("disabling render copy\n");
- break;
- case 'b':
- options.use_blt = 0;
- igt_info("disabling blt copy\n");
- break;
- case 'u':
- options.forced_tiling = I915_TILING_NONE;
- igt_info("disabling tiling\n");
- break;
- case 'x':
- if (options.use_cpu_maps) {
- igt_info("tiling not possible with cpu maps\n");
- } else {
- options.forced_tiling = I915_TILING_X;
- igt_info("using only X-tiling\n");
- }
- break;
- case 'm':
- options.use_cpu_maps = 1;
- options.forced_tiling = I915_TILING_NONE;
- igt_info("disabling tiling\n");
- break;
- case 'o':
- options.total_rounds = atoi(optarg);
- igt_info("total rounds %i\n", options.total_rounds);
- break;
- case 'f':
- options.fail = 0;
- igt_info("not failing when detecting errors\n");
- break;
- case 'p':
- options.tiles_per_buf = atoi(optarg);
- igt_info("tiles per buffer %i\n", options.tiles_per_buf);
- break;
- case DUCTAPE:
- options.ducttape = 0;
- igt_info("applying duct-tape\n");
- break;
- case TILESZ:
- options.tile_size = atoi(optarg);
+ case 'd':
+ options.no_hw = 1;
+ igt_info("no-hw debug mode\n");
+ break;
+ case 'S':
+ options.use_signal_helper = 0;
+ igt_info("disabling that pesky nuisance who keeps interrupting us\n");
+ break;
+ case 's':
+ tmp = atoi(optarg);
+ if (tmp < options.tile_size*8192)
+ igt_info("scratch buffer size needs to be at least %i\n", options.tile_size * 8192);
+ else if (tmp & (tmp - 1)) {
+ igt_info("scratch buffer size needs to be a power-of-two\n");
+ } else {
+ igt_info("fixed scratch buffer size to %u\n", tmp);
+ options.scratch_buf_size = tmp;
sanitize_tiles_per_buf();
- igt_info("til size %i\n", options.tile_size);
- break;
- case CHCK_RENDER:
- options.check_render_cpyfn = 1;
- igt_info("checking render copy function\n");
- break;
+ }
+ break;
+ case 'g':
+ tmp = atoi(optarg);
+ if (tmp < 0 || tmp > 10)
+ igt_info("gpu busy load needs to be bigger than 0 and smaller than 10\n");
+ else {
+ igt_info("gpu busy load factor set to %i\n", tmp);
+ gpu_busy_load = options.gpu_busy_load = tmp;
+ }
+ break;
+ case 'c':
+ options.num_buffers = atoi(optarg);
+ igt_info("buffer count set to %i\n", options.num_buffers);
+ break;
+ case 't':
+ options.trace_tile = atoi(optarg);
+ igt_info("tracing tile %i\n", options.trace_tile);
+ break;
+ case 'r':
+ options.use_render = 0;
+ igt_info("disabling render copy\n");
+ break;
+ case 'b':
+ options.use_blt = 0;
+ igt_info("disabling blt copy\n");
+ break;
+ case 'u':
+ options.forced_tiling = I915_TILING_NONE;
+ igt_info("disabling tiling\n");
+ break;
+ case 'x':
+ if (options.use_cpu_maps) {
+ igt_info("tiling not possible with cpu maps\n");
+ } else {
+ options.forced_tiling = I915_TILING_X;
+ igt_info("using only X-tiling\n");
+ }
+ break;
+ case 'm':
+ options.use_cpu_maps = 1;
+ options.forced_tiling = I915_TILING_NONE;
+ igt_info("disabling tiling\n");
+ break;
+ case 'o':
+ options.total_rounds = atoi(optarg);
+ igt_info("total rounds %i\n", options.total_rounds);
+ break;
+ case 'f':
+ options.fail = 0;
+ igt_info("not failing when detecting errors\n");
+ break;
+ case 'p':
+ options.tiles_per_buf = atoi(optarg);
+ igt_info("tiles per buffer %i\n", options.tiles_per_buf);
+ break;
+ case DUCTAPE:
+ options.ducttape = 0;
+ igt_info("applying duct-tape\n");
+ break;
+ case TILESZ:
+ options.tile_size = atoi(optarg);
+ sanitize_tiles_per_buf();
+ igt_info("til size %i\n", options.tile_size);
+ break;
+ case CHCK_RENDER:
+ options.check_render_cpyfn = 1;
+ igt_info("checking render copy function\n");
+ break;
+ default:
+ return IGT_OPT_HANDLER_ERROR;
}
/* actually 32767, according to docs, but that kills our nice pot calculations. */
@@ -726,7 +745,7 @@ static int parse_options(int opt, int opt_index, void *data)
}
igt_info("Limiting buffer to %dx%d\n", options.max_dimension, options.max_dimension);
- return 0;
+ return IGT_OPT_HANDLER_SUCCESS;
}
static void init(void)
@@ -809,51 +828,32 @@ static void check_render_copyfunc(void)
}
}
+static struct option long_options[] = {
+ {"no-hw", 0, 0, 'd'},
+ {"buf-size", 1, 0, 's'},
+ {"gpu-busy-load", 1, 0, 'g'},
+ {"no-signals", 0, 0, 'S'},
+ {"buffer-count", 1, 0, 'c'},
+ {"trace-tile", 1, 0, 't'},
+ {"disable-blt", 0, 0, 'b'},
+ {"disable-render", 0, 0, 'r'},
+ {"untiled", 0, 0, 'u'},
+ {"x-tiled", 0, 0, 'x'},
+ {"use-cpu-maps", 0, 0, 'm'},
+ {"rounds", 1, 0, 'o'},
+ {"no-fail", 0, 0, 'f'},
+ {"tiles-per-buf", 0, 0, 'p'},
+ {"remove-duct-tape", 0, 0, DUCTAPE},
+ {"tile-size", 1, 0, TILESZ},
+ {"check-render-cpyfn", 0, 0, CHCK_RENDER},
+ {NULL, 0, 0, 0},
+};
-int main(int argc, char **argv)
+igt_simple_main_args("ds:g:c:t:rbuxmo:fp:",
+ long_options, NULL, parse_options, NULL)
{
int i, j;
unsigned *current_permutation, *tmp_permutation;
- static struct option long_options[] = {
- {"no-hw", 0, 0, 'd'},
- {"buf-size", 1, 0, 's'},
- {"gpu-busy-load", 1, 0, 'g'},
- {"no-signals", 0, 0, 'S'},
- {"buffer-count", 1, 0, 'c'},
- {"trace-tile", 1, 0, 't'},
- {"disable-blt", 0, 0, 'b'},
- {"disable-render", 0, 0, 'r'},
- {"untiled", 0, 0, 'u'},
- {"x-tiled", 0, 0, 'x'},
- {"use-cpu-maps", 0, 0, 'm'},
- {"rounds", 1, 0, 'o'},
- {"no-fail", 0, 0, 'f'},
- {"tiles-per-buf", 0, 0, 'p'},
- {"remove-duct-tape", 0, 0, DUCTAPE},
- {"tile-size", 1, 0, TILESZ},
- {"check-render-cpyfn", 0, 0, CHCK_RENDER},
- {NULL, 0, 0, 0},
- };
-
- options.scratch_buf_size = 256*4096;
- options.no_hw = 0;
- options.use_signal_helper = 1;
- options.gpu_busy_load = 0;
- options.num_buffers = 0;
- options.trace_tile = -1;
- options.use_render = 1;
- options.use_blt = 1;
- options.forced_tiling = -1;
- options.use_cpu_maps = 0;
- options.total_rounds = 512;
- options.fail = 1;
- options.ducttape = 1;
- options.tile_size = 16;
- options.tiles_per_buf = options.scratch_buf_size / TILE_BYTES(options.tile_size);
- options.check_render_cpyfn = 0;
-
- igt_simple_init_parse_opts(&argc, argv,"ds:g:c:t:rbuxmo:fp:",
- long_options, NULL, parse_options, NULL);
drm_fd = drm_open_driver(DRIVER_INTEL);
devid = intel_get_drm_devid(drm_fd);
@@ -910,6 +910,4 @@ int main(int argc, char **argv)
close(drm_fd);
igt_stop_signal_helper();
-
- igt_exit();
}
diff --git a/tests/i915/gem_tiled_blits.c b/tests/i915/gem_tiled_blits.c
index 51c1b584..28861d0b 100644
--- a/tests/i915/gem_tiled_blits.c
+++ b/tests/i915/gem_tiled_blits.c
@@ -198,10 +198,8 @@ static void run_test(int count)
int fd;
-int main(int argc, char **argv)
+igt_main
{
- igt_subtest_init(argc, argv);
-
igt_fixture {
fd = drm_open_driver(DRIVER_INTEL);
igt_require_gem(fd);
@@ -249,6 +247,4 @@ int main(int argc, char **argv)
close(fd);
}
-
- igt_exit();
}
diff --git a/tests/i915/gem_userptr_blits.c b/tests/i915/gem_userptr_blits.c
index 8f8ddf43..1373f160 100644
--- a/tests/i915/gem_userptr_blits.c
+++ b/tests/i915/gem_userptr_blits.c
@@ -1776,13 +1776,25 @@ uint64_t total_ram;
uint64_t aperture_size;
int fd, count;
+static int opt_handler(int opt, int opt_index, void *data)
+{
+ switch (opt) {
+ case 'c':
+ count = atoi(optarg);
+ break;
+ default:
+ return IGT_OPT_HANDLER_ERROR;
+ }
+
+ return IGT_OPT_HANDLER_SUCCESS;
+}
-int main(int argc, char **argv)
+const char *help_str = " -c\tBuffer count\n";
+
+igt_main_args("c:", NULL, help_str, opt_handler, NULL)
{
int size = sizeof(linear);
- igt_subtest_init(argc, argv);
-
igt_fixture {
fd = drm_open_driver(DRIVER_INTEL);
igt_assert(fd >= 0);
@@ -1793,8 +1805,6 @@ int main(int argc, char **argv)
aperture_size = gem_aperture_size(fd);
igt_info("Aperture size is %lu MiB\n", (long)(aperture_size / (1024*1024)));
- if (argc > 1)
- count = atoi(argv[1]);
if (count == 0)
count = 2 * aperture_size / (1024*1024) / 3;
@@ -2044,6 +2054,4 @@ int main(int argc, char **argv)
igt_subtest("access-control")
test_access_control(fd);
-
- igt_exit();
}
diff --git a/tests/i915/gem_vm_create.c b/tests/i915/gem_vm_create.c
new file mode 100644
index 00000000..cbd273d9
--- /dev/null
+++ b/tests/i915/gem_vm_create.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "igt.h"
+#include "igt_dummyload.h"
+#include "i915/gem_vm.h"
+
+static int vm_create_ioctl(int i915, struct drm_i915_gem_vm_control *ctl)
+{
+ int err = 0;
+ if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_VM_CREATE, ctl)) {
+ err = -errno;
+ igt_assume(err);
+ }
+ errno = 0;
+ return err;
+}
+
+static int vm_destroy_ioctl(int i915, struct drm_i915_gem_vm_control *ctl)
+{
+ int err = 0;
+ if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_VM_DESTROY, ctl)) {
+ err = -errno;
+ igt_assume(err);
+ }
+ errno = 0;
+ return err;
+}
+
+static int ctx_create_ioctl(int i915,
+ struct drm_i915_gem_context_create_ext *arg)
+{
+ int err = 0;
+ if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, arg)) {
+ err = -errno;
+ igt_assume(err);
+ }
+ errno = 0;
+ return err;
+}
+
+static bool has_vm(int i915)
+{
+ struct drm_i915_gem_vm_control ctl = {};
+ int err;
+
+ err = vm_create_ioctl(i915, &ctl);
+ switch (err) {
+ case -EINVAL: /* unknown ioctl */
+ case -ENODEV: /* !full-ppgtt */
+ return false;
+
+ case 0:
+ gem_vm_destroy(i915, ctl.vm_id);
+ return true;
+
+ default:
+ igt_fail_on_f(err, "Unknown response from VM_CREATE\n");
+ return false;
+ }
+}
+
+static void invalid_create(int i915)
+{
+ struct drm_i915_gem_vm_control ctl = {};
+ struct i915_user_extension ext = { .name = -1 };
+
+ igt_assert_eq(vm_create_ioctl(i915, &ctl), 0);
+ gem_vm_destroy(i915, ctl.vm_id);
+
+ ctl.vm_id = 0xdeadbeef;
+ igt_assert_eq(vm_create_ioctl(i915, &ctl), 0);
+ gem_vm_destroy(i915, ctl.vm_id);
+ ctl.vm_id = 0;
+
+ ctl.flags = -1;
+ igt_assert_eq(vm_create_ioctl(i915, &ctl), -EINVAL);
+ ctl.flags = 0;
+
+ ctl.extensions = -1;
+ igt_assert_eq(vm_create_ioctl(i915, &ctl), -EFAULT);
+ ctl.extensions = to_user_pointer(&ext);
+ igt_assert_eq(vm_create_ioctl(i915, &ctl), -EINVAL);
+ ctl.extensions = 0;
+}
+
+static void invalid_destroy(int i915)
+{
+ struct drm_i915_gem_vm_control ctl = {};
+
+ igt_assert_eq(vm_destroy_ioctl(i915, &ctl), -ENOENT);
+
+ igt_assert_eq(vm_create_ioctl(i915, &ctl), 0);
+ igt_assert_eq(vm_destroy_ioctl(i915, &ctl), 0);
+ igt_assert_eq(vm_destroy_ioctl(i915, &ctl), -ENOENT);
+
+ igt_assert_eq(vm_create_ioctl(i915, &ctl), 0);
+ ctl.vm_id = ctl.vm_id + 1; /* assumes no one else allocated */
+ igt_assert_eq(vm_destroy_ioctl(i915, &ctl), -ENOENT);
+ ctl.vm_id = ctl.vm_id - 1;
+ igt_assert_eq(vm_destroy_ioctl(i915, &ctl), 0);
+
+ igt_assert_eq(vm_create_ioctl(i915, &ctl), 0);
+ ctl.flags = -1;
+ igt_assert_eq(vm_destroy_ioctl(i915, &ctl), -EINVAL);
+ ctl.flags = 0;
+ igt_assert_eq(vm_destroy_ioctl(i915, &ctl), 0);
+
+ igt_assert_eq(vm_create_ioctl(i915, &ctl), 0);
+ ctl.extensions = -1;
+ igt_assert_eq(vm_destroy_ioctl(i915, &ctl), -EINVAL);
+ ctl.extensions = 0;
+ igt_assert_eq(vm_destroy_ioctl(i915, &ctl), 0);
+}
+
+static uint32_t __batch_create(int i915, uint32_t offset)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ uint32_t handle;
+
+ handle = gem_create(i915, ALIGN(offset + 4, 4096));
+ gem_write(i915, handle, offset, &bbe, sizeof(bbe));
+
+ return handle;
+}
+
+static uint32_t batch_create(int i915)
+{
+ return __batch_create(i915, 0);
+}
+
+static void check_same_vm(int i915, uint32_t ctx_a, uint32_t ctx_b)
+{
+ struct drm_i915_gem_exec_object2 batch = {
+ .handle = batch_create(i915),
+ };
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ };
+
+ /* First verify that we try to use "softpinning" by default */
+ batch.offset = 48 << 20;
+ eb.rsvd1 = ctx_a;
+ gem_execbuf(i915, &eb);
+ igt_assert_eq_u64(batch.offset, 48 << 20);
+
+ /* An already active VMA will try to keep its offset */
+ batch.offset = 0;
+ eb.rsvd1 = ctx_b;
+ gem_execbuf(i915, &eb);
+ igt_assert_eq_u64(batch.offset, 48 << 20);
+
+ gem_sync(i915, batch.handle);
+ gem_close(i915, batch.handle);
+}
+
+static void create_ext(int i915)
+{
+ struct drm_i915_gem_context_create_ext_setparam ext = {
+ { .name = I915_CONTEXT_CREATE_EXT_SETPARAM },
+ { .param = I915_CONTEXT_PARAM_VM }
+ };
+ struct drm_i915_gem_context_create_ext create = {
+ .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS
+ };
+ uint32_t ctx[2];
+
+ igt_require(ctx_create_ioctl(i915, &create) == 0);
+ gem_context_destroy(i915, create.ctx_id);
+
+ create.extensions = to_user_pointer(&ext);
+
+ ext.param.value = gem_vm_create(i915);
+
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+ ctx[0] = create.ctx_id;
+
+ igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
+ ctx[1] = create.ctx_id;
+
+ gem_vm_destroy(i915, ext.param.value);
+
+ check_same_vm(i915, ctx[0], ctx[1]);
+
+ gem_context_destroy(i915, ctx[1]);
+ gem_context_destroy(i915, ctx[0]);
+}
+
+static void execbuf(int i915)
+{
+ struct drm_i915_gem_exec_object2 batch = {
+ .handle = batch_create(i915),
+ };
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ };
+ struct drm_i915_gem_context_param arg = {
+ .param = I915_CONTEXT_PARAM_VM,
+ };
+
+ /* First verify that we try to use "softpinning" by default */
+ batch.offset = 48 << 20;
+ gem_execbuf(i915, &eb);
+ igt_assert_eq_u64(batch.offset, 48 << 20);
+
+ arg.value = gem_vm_create(i915);
+ gem_context_set_param(i915, &arg);
+ gem_execbuf(i915, &eb);
+ igt_assert_eq_u64(batch.offset, 48 << 20);
+ gem_vm_destroy(i915, arg.value);
+
+ arg.value = gem_vm_create(i915);
+ gem_context_set_param(i915, &arg);
+ batch.offset = 0;
+ gem_execbuf(i915, &eb);
+ igt_assert_eq_u64(batch.offset, 0);
+ gem_vm_destroy(i915, arg.value);
+
+ gem_sync(i915, batch.handle);
+ gem_close(i915, batch.handle);
+}
+
+static void
+write_to_address(int fd, uint32_t ctx, uint64_t addr, uint32_t value)
+{
+ const int gen = intel_gen(intel_get_drm_devid(fd));
+ struct drm_i915_gem_exec_object2 batch = {
+ .handle = gem_create(fd, 4096)
+ };
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ .rsvd1 = ctx,
+ };
+ uint32_t cs[16];
+ int i;
+
+ i = 0;
+ cs[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+ if (gen >= 8) {
+ cs[++i] = addr;
+ cs[++i] = addr >> 32;
+ } else if (gen >= 4) {
+ cs[++i] = 0;
+ cs[++i] = addr;
+ } else {
+ cs[i]--;
+ cs[++i] = addr;
+ }
+ cs[++i] = value;
+ cs[++i] = MI_BATCH_BUFFER_END;
+ gem_write(fd, batch.handle, 0, cs, sizeof(cs));
+
+ gem_execbuf(fd, &eb);
+ igt_assert(batch.offset != addr);
+
+ gem_sync(fd, batch.handle);
+ gem_close(fd, batch.handle);
+}
+
+static void isolation(int i915)
+{
+ struct drm_i915_gem_exec_object2 obj[2] = {
+ {
+ .handle = gem_create(i915, 4096),
+ .offset = 1 << 20
+ },
+ { .handle = batch_create(i915), }
+ };
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffers_ptr = to_user_pointer(obj),
+ .buffer_count = 2,
+ };
+ struct drm_i915_gem_context_param arg = {
+ .param = I915_CONTEXT_PARAM_VM,
+ };
+ int other = gem_reopen_driver(i915);
+ uint32_t ctx[2], vm[2], result;
+ int loops = 4096;
+
+ /* An vm_id on one fd is not the same on another fd */
+ igt_assert_neq(i915, other);
+
+ ctx[0] = gem_context_create(i915);
+ ctx[1] = gem_context_create(other);
+
+ vm[0] = gem_vm_create(i915);
+ do {
+ vm[1] = gem_vm_create(other);
+ } while (vm[1] != vm[0] && loops-- > 0);
+ igt_assert(loops);
+
+ arg.ctx_id = ctx[0];
+ arg.value = vm[0];
+ gem_context_set_param(i915, &arg);
+
+ arg.ctx_id = ctx[1];
+ arg.value = vm[1];
+ gem_context_set_param(other, &arg);
+
+ eb.rsvd1 = ctx[0];
+ gem_execbuf(i915, &eb); /* bind object into vm[0] */
+
+ /* Verify the trick with the assumed target address works */
+ write_to_address(i915, ctx[0], obj[0].offset, 1);
+ gem_read(i915, obj[0].handle, 0, &result, sizeof(result));
+ igt_assert_eq(result, 1);
+
+ /* Now check that we can't write to vm[0] from second fd/vm */
+ write_to_address(other, ctx[1], obj[0].offset, 2);
+ gem_read(i915, obj[0].handle, 0, &result, sizeof(result));
+ igt_assert_eq(result, 1);
+
+ close(other);
+
+ gem_close(i915, obj[1].handle);
+ gem_close(i915, obj[0].handle);
+
+ gem_context_destroy(i915, ctx[0]);
+ gem_vm_destroy(i915, vm[0]);
+}
+
+static void async_destroy(int i915)
+{
+ struct drm_i915_gem_context_param arg = {
+ .ctx_id = gem_context_create(i915),
+ .value = gem_vm_create(i915),
+ .param = I915_CONTEXT_PARAM_VM,
+ };
+ igt_spin_t *spin[2];
+
+ spin[0] = igt_spin_new(i915,
+ .ctx = arg.ctx_id,
+ .flags = IGT_SPIN_POLL_RUN);
+ igt_spin_busywait_until_started(spin[0]);
+
+ gem_context_set_param(i915, &arg);
+ spin[1] = __igt_spin_new(i915, .ctx = arg.ctx_id);
+
+ igt_spin_end(spin[0]);
+ gem_sync(i915, spin[0]->handle);
+
+ gem_vm_destroy(i915, arg.value);
+ gem_context_destroy(i915, arg.ctx_id);
+
+ igt_spin_end(spin[1]);
+ gem_sync(i915, spin[1]->handle);
+
+ for (int i = 0; i < ARRAY_SIZE(spin); i++)
+ igt_spin_free(i915, spin[i]);
+}
+
+igt_main
+{
+ int i915 = -1;
+
+ igt_fixture {
+ i915 = drm_open_driver(DRIVER_INTEL);
+ igt_require_gem(i915);
+ igt_require(has_vm(i915));
+ }
+
+ igt_subtest("invalid-create")
+ invalid_create(i915);
+
+ igt_subtest("invalid-destroy")
+ invalid_destroy(i915);
+
+ igt_subtest_group {
+ igt_fixture {
+ gem_context_require_param(i915, I915_CONTEXT_PARAM_VM);
+ }
+
+ igt_subtest("execbuf")
+ execbuf(i915);
+
+ igt_subtest("isolation")
+ isolation(i915);
+
+ igt_subtest("create-ext")
+ create_ext(i915);
+
+ igt_subtest("async-destroy")
+ async_destroy(i915);
+ }
+
+ igt_fixture {
+ close(i915);
+ }
+}
diff --git a/tests/i915/gem_wait.c b/tests/i915/gem_wait.c
index ee2ecfa0..b5e32219 100644
--- a/tests/i915/gem_wait.c
+++ b/tests/i915/gem_wait.c
@@ -134,7 +134,7 @@ static void basic(int fd, unsigned engine, unsigned flags)
igt_main
{
- const struct intel_execution_engine *e;
+ const struct intel_execution_engine2 *e;
int fd = -1;
igt_skip_on_simulation();
@@ -177,27 +177,27 @@ igt_main
basic(fd, ALL_ENGINES, WRITE);
}
- for (e = intel_execution_engines; e->name; e++) {
+ __for_each_physical_engine(fd, e) {
igt_subtest_group {
igt_subtest_f("busy-%s", e->name) {
gem_quiescent_gpu(fd);
- basic(fd, e->exec_id | e->flags, BUSY);
+ basic(fd, e->flags, BUSY);
}
igt_subtest_f("wait-%s", e->name) {
gem_quiescent_gpu(fd);
- basic(fd, e->exec_id | e->flags, 0);
+ basic(fd, e->flags, 0);
}
igt_subtest_f("await-%s", e->name) {
gem_quiescent_gpu(fd);
- basic(fd, e->exec_id | e->flags, AWAIT);
+ basic(fd, e->flags, AWAIT);
}
igt_subtest_f("write-busy-%s", e->name) {
gem_quiescent_gpu(fd);
- basic(fd, e->exec_id | e->flags, BUSY | WRITE);
+ basic(fd, e->flags, BUSY | WRITE);
}
igt_subtest_f("write-wait-%s", e->name) {
gem_quiescent_gpu(fd);
- basic(fd, e->exec_id | e->flags, WRITE);
+ basic(fd, e->flags, WRITE);
}
}
}
@@ -234,22 +234,22 @@ igt_main
basic(fd, ALL_ENGINES, WRITE | HANG);
}
- for (e = intel_execution_engines; e->name; e++) {
+ __for_each_physical_engine(fd, e) {
igt_subtest_f("hang-busy-%s", e->name) {
gem_quiescent_gpu(fd);
- basic(fd, e->exec_id | e->flags, HANG | BUSY);
+ basic(fd, e->flags, HANG | BUSY);
}
igt_subtest_f("hang-wait-%s", e->name) {
gem_quiescent_gpu(fd);
- basic(fd, e->exec_id | e->flags, HANG);
+ basic(fd, e->flags, HANG);
}
igt_subtest_f("hang-busy-write-%s", e->name) {
gem_quiescent_gpu(fd);
- basic(fd, e->exec_id | e->flags, HANG | WRITE | BUSY);
+ basic(fd, e->flags, HANG | WRITE | BUSY);
}
igt_subtest_f("hang-wait-write-%s", e->name) {
gem_quiescent_gpu(fd);
- basic(fd, e->exec_id | e->flags, HANG | WRITE);
+ basic(fd, e->flags, HANG | WRITE);
}
}
diff --git a/tests/i915/gem_workarounds.c b/tests/i915/gem_workarounds.c
index 44e3dce8..403863c0 100644
--- a/tests/i915/gem_workarounds.c
+++ b/tests/i915/gem_workarounds.c
@@ -82,14 +82,15 @@ static bool write_only(const uint32_t addr)
#define MI_STORE_REGISTER_MEM (0x24 << 23)
-static int workaround_fail_count(int fd, uint32_t ctx)
+static int workaround_fail_count(int i915, uint32_t ctx)
{
struct drm_i915_gem_exec_object2 obj[2];
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_execbuffer2 execbuf;
uint32_t result_sz, batch_sz;
uint32_t *base, *out;
- int fail_count = 0;
+ igt_spin_t *spin;
+ int fw, fail = 0;
reloc = calloc(num_wa_regs, sizeof(*reloc));
igt_assert(reloc);
@@ -101,13 +102,14 @@ static int workaround_fail_count(int fd, uint32_t ctx)
batch_sz = PAGE_ALIGN(batch_sz);
memset(obj, 0, sizeof(obj));
- obj[0].handle = gem_create(fd, result_sz);
- gem_set_caching(fd, obj[0].handle, I915_CACHING_CACHED);
- obj[1].handle = gem_create(fd, batch_sz);
+ obj[0].handle = gem_create(i915, result_sz);
+ gem_set_caching(i915, obj[0].handle, I915_CACHING_CACHED);
+ obj[1].handle = gem_create(i915, batch_sz);
obj[1].relocs_ptr = to_user_pointer(reloc);
obj[1].relocation_count = num_wa_regs;
- out = base = gem_mmap__cpu(fd, obj[1].handle, 0, batch_sz, PROT_WRITE);
+ out = base =
+ gem_mmap__cpu(i915, obj[1].handle, 0, batch_sz, PROT_WRITE);
for (int i = 0; i < num_wa_regs; i++) {
*out++ = MI_STORE_REGISTER_MEM | ((gen >= 8 ? 4 : 2) - 2);
*out++ = wa_regs[i].addr;
@@ -127,17 +129,21 @@ static int workaround_fail_count(int fd, uint32_t ctx)
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
execbuf.rsvd1 = ctx;
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
- gem_set_domain(fd, obj[0].handle, I915_GEM_DOMAIN_CPU, 0);
+ gem_set_domain(i915, obj[0].handle, I915_GEM_DOMAIN_CPU, 0);
+
+ spin = igt_spin_new(i915, .ctx = ctx, .flags = IGT_SPIN_POLL_RUN);
+ igt_spin_busywait_until_started(spin);
+
+ fw = igt_open_forcewake_handle(i915);
+ if (fw < 0)
+ igt_debug("Unable to obtain i915_user_forcewake!\n");
igt_debug("Address\tval\t\tmask\t\tread\t\tresult\n");
- out = gem_mmap__cpu(fd, obj[0].handle, 0, result_sz, PROT_READ);
+ out = gem_mmap__cpu(i915, obj[0].handle, 0, result_sz, PROT_READ);
for (int i = 0; i < num_wa_regs; i++) {
- const bool ok =
- (wa_regs[i].value & wa_regs[i].mask) ==
- (out[i] & wa_regs[i].mask);
char buf[80];
snprintf(buf, sizeof(buf),
@@ -145,33 +151,30 @@ static int workaround_fail_count(int fd, uint32_t ctx)
wa_regs[i].addr, wa_regs[i].value, wa_regs[i].mask,
out[i]);
- if (ok) {
+ /* If the SRM failed, fill in the result using mmio */
+ if (out[i] == 0)
+ out[i] = *(volatile uint32_t *)(igt_global_mmio + wa_regs[i].addr);
+
+ if ((wa_regs[i].value & wa_regs[i].mask) ==
+ (out[i] & wa_regs[i].mask)) {
igt_debug("%s\tOK\n", buf);
} else if (write_only(wa_regs[i].addr)) {
igt_debug("%s\tIGNORED (w/o)\n", buf);
} else {
igt_warn("%s\tFAIL\n", buf);
- fail_count++;
+ fail++;
}
}
munmap(out, result_sz);
- gem_close(fd, obj[1].handle);
- gem_close(fd, obj[0].handle);
- free(reloc);
-
- return fail_count;
-}
-
-static int reopen(int fd)
-{
- char path[256];
+ close(fw);
+ igt_spin_free(i915, spin);
- snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
- fd = open(path, O_RDWR);
- igt_assert_lte(0, fd);
+ gem_close(i915, obj[1].handle);
+ gem_close(i915, obj[0].handle);
+ free(reloc);
- return fd;
+ return fail;
}
#define CONTEXT 0x1
@@ -181,7 +184,7 @@ static void check_workarounds(int fd, enum operation op, unsigned int flags)
uint32_t ctx = 0;
if (flags & FD)
- fd = reopen(fd);
+ fd = gem_reopen_driver(fd);
if (flags & CONTEXT) {
gem_require_contexts(fd);
@@ -252,6 +255,8 @@ igt_main
device = drm_open_driver(DRIVER_INTEL);
igt_require_gem(device);
+ intel_mmio_use_pci_bar(intel_get_pci_device());
+
gen = intel_gen(intel_get_drm_devid(device));
fd = igt_debugfs_open(device, "i915_wa_registers", O_RDONLY);
diff --git a/tests/i915/gen3_mixed_blits.c b/tests/i915/gen3_mixed_blits.c
index 447d2e5d..03a289df 100644
--- a/tests/i915/gen3_mixed_blits.c
+++ b/tests/i915/gen3_mixed_blits.c
@@ -442,21 +442,33 @@ check_bo(int fd, uint32_t handle, uint32_t val)
munmap(v, WIDTH*HEIGHT*4);
}
-int main(int argc, char **argv)
+int count;
+
+static int opt_handler(int opt, int opt_index, void *data)
+{
+ switch (opt) {
+ case 'c':
+ count = atoi(optarg);
+ break;
+ default:
+ return IGT_OPT_HANDLER_ERROR;
+ }
+
+ return IGT_OPT_HANDLER_SUCCESS;
+}
+
+const char *help_str = " -c\tBuffer count\n";
+
+igt_simple_main_args("c:", NULL, help_str, opt_handler, NULL)
{
uint32_t *handle, *tiling, *start_val;
uint32_t start = 0;
- int i, fd, count;
-
- igt_simple_init(argc, argv);
+ int i, fd;
fd = drm_open_driver(DRIVER_INTEL);
igt_require(IS_GEN3(intel_get_drm_devid(fd)));
- count = 0;
- if (argc > 1)
- count = atoi(argv[1]);
if (count == 0)
count = 3 * gem_aperture_size(fd) / (1024*1024) / 2;
igt_info("Using %d 1MiB buffers\n", count);
@@ -518,6 +530,4 @@ int main(int argc, char **argv)
for (i = 0; i < count; i++)
check_bo(fd, handle[i], start_val[i]);
igt_info("done\n");
-
- igt_exit();
}
diff --git a/tests/i915/gen3_render_linear_blits.c b/tests/i915/gen3_render_linear_blits.c
index 9d1499a5..d3fc8055 100644
--- a/tests/i915/gen3_render_linear_blits.c
+++ b/tests/i915/gen3_render_linear_blits.c
@@ -315,21 +315,33 @@ check_bo(int fd, uint32_t handle, uint32_t val)
}
}
-int main(int argc, char **argv)
+int count;
+
+static int opt_handler(int opt, int opt_index, void *data)
+{
+ switch (opt) {
+ case 'c':
+ count = atoi(optarg);
+ break;
+ default:
+ return IGT_OPT_HANDLER_ERROR;
+ }
+
+ return IGT_OPT_HANDLER_SUCCESS;
+}
+
+const char *help_str = " -c\tBuffer count\n";
+
+igt_simple_main_args("c:", NULL, help_str, opt_handler, NULL)
{
uint32_t *handle, *start_val;
uint32_t start = 0;
- int i, fd, count;
-
- igt_simple_init(argc, argv);
+ int i, fd;
fd = drm_open_driver(DRIVER_INTEL);
igt_require(IS_GEN3(intel_get_drm_devid(fd)));
- count = 0;
- if (argc > 1)
- count = atoi(argv[1]);
if (count == 0)
count = 3 * gem_aperture_size(fd) / (1024*1024) / 2;
igt_info("Using %d 1MiB buffers\n", count);
@@ -383,6 +395,4 @@ int main(int argc, char **argv)
}
for (i = 0; i < count; i++)
check_bo(fd, handle[i], start_val[i]);
-
- igt_exit();
}
diff --git a/tests/i915/gen3_render_mixed_blits.c b/tests/i915/gen3_render_mixed_blits.c
index 6dd7392b..15895c21 100644
--- a/tests/i915/gen3_render_mixed_blits.c
+++ b/tests/i915/gen3_render_mixed_blits.c
@@ -336,21 +336,33 @@ check_bo(int fd, uint32_t handle, uint32_t val)
munmap(v, WIDTH*HEIGHT*4);
}
-int main(int argc, char **argv)
+int count;
+
+static int opt_handler(int opt, int opt_index, void *data)
+{
+ switch (opt) {
+ case 'c':
+ count = atoi(optarg);
+ break;
+ default:
+ return IGT_OPT_HANDLER_ERROR;
+ }
+
+ return IGT_OPT_HANDLER_SUCCESS;
+}
+
+const char *help_str = " -c\tBuffer count\n";
+
+igt_simple_main_args("c:", NULL, help_str, opt_handler, NULL)
{
uint32_t *handle, *tiling, *start_val;
uint32_t start = 0;
- int i, fd, count;
-
- igt_simple_init(argc, argv);
+ int i, fd;
fd = drm_open_driver(DRIVER_INTEL);
igt_require(IS_GEN3(intel_get_drm_devid(fd)));
- count = 0;
- if (argc > 1)
- count = atoi(argv[1]);
if (count == 0)
count = 3 * gem_aperture_size(fd) / (1024*1024) / 2;
igt_info("Using %d 1MiB buffers\n", count);
@@ -412,6 +424,4 @@ int main(int argc, char **argv)
for (i = 0; i < count; i++)
check_bo(fd, handle[i], start_val[i]);
igt_info("done\n");
-
- igt_exit();
}
diff --git a/tests/i915/gen3_render_tiledx_blits.c b/tests/i915/gen3_render_tiledx_blits.c
index 7efef589..f1a17714 100644
--- a/tests/i915/gen3_render_tiledx_blits.c
+++ b/tests/i915/gen3_render_tiledx_blits.c
@@ -323,21 +323,33 @@ check_bo(int fd, uint32_t handle, uint32_t val)
munmap(v, WIDTH*HEIGHT*4);
}
-int main(int argc, char **argv)
+int count;
+
+static int opt_handler(int opt, int opt_index, void *data)
+{
+ switch (opt) {
+ case 'c':
+ count = atoi(optarg);
+ break;
+ default:
+ return IGT_OPT_HANDLER_ERROR;
+ }
+
+ return IGT_OPT_HANDLER_SUCCESS;
+}
+
+const char *help_str = " -c\tBuffer count\n";
+
+igt_simple_main_args("c:", NULL, help_str, opt_handler, NULL)
{
uint32_t *handle, *start_val;
uint32_t start = 0;
- int i, fd, count;
-
- igt_simple_init(argc, argv);
+ int i, fd;
fd = drm_open_driver(DRIVER_INTEL);
igt_require(IS_GEN3(intel_get_drm_devid(fd)));
- count = 0;
- if (argc > 1)
- count = atoi(argv[1]);
if (count == 0)
count = 3 * gem_aperture_size(fd) / (1024*1024) / 2;
igt_info("Using %d 1MiB buffers\n", count);
@@ -391,6 +403,4 @@ int main(int argc, char **argv)
}
for (i = 0; i < count; i++)
check_bo(fd, handle[i], start_val[i]);
-
- igt_exit();
}
diff --git a/tests/i915/gen3_render_tiledy_blits.c b/tests/i915/gen3_render_tiledy_blits.c
index 6d1d8bca..94127b62 100644
--- a/tests/i915/gen3_render_tiledy_blits.c
+++ b/tests/i915/gen3_render_tiledy_blits.c
@@ -323,21 +323,33 @@ check_bo(int fd, uint32_t handle, uint32_t val)
munmap(v, WIDTH*HEIGHT*4);
}
-int main(int argc, char **argv)
+int count;
+
+static int opt_handler(int opt, int opt_index, void *data)
+{
+ switch (opt) {
+ case 'c':
+ count = atoi(optarg);
+ break;
+ default:
+ return IGT_OPT_HANDLER_ERROR;
+ }
+
+ return IGT_OPT_HANDLER_SUCCESS;
+}
+
+const char *help_str = " -c\tBuffer count\n";
+
+igt_simple_main_args("c:", NULL, help_str, opt_handler, NULL)
{
uint32_t *handle, *start_val;
uint32_t start = 0;
- int i, fd, count;
-
- igt_simple_init(argc, argv);
+ int i, fd;
fd = drm_open_driver(DRIVER_INTEL);
igt_require(IS_GEN3(intel_get_drm_devid(fd)));
- count = 0;
- if (argc > 1)
- count = atoi(argv[1]);
if (count == 0)
count = 3 * gem_aperture_size(fd) / (1024*1024) / 2;
igt_info("Using %d 1MiB buffers\n", count);
@@ -398,6 +410,4 @@ int main(int argc, char **argv)
for (i = 0; i < count; i++)
check_bo(fd, handle[i], start_val[i]);
igt_info("done\n");
-
- igt_exit();
}
diff --git a/tests/i915/i915_hangman.c b/tests/i915/i915_hangman.c
index 9a1d5889..58141fc9 100644
--- a/tests/i915/i915_hangman.c
+++ b/tests/i915/i915_hangman.c
@@ -209,7 +209,7 @@ static void test_error_state_capture(unsigned ring_id,
clear_error_state();
hang = igt_hang_ctx(device, 0, ring_id, HANG_ALLOW_CAPTURE);
- offset = hang.spin->obj[1].offset;
+ offset = hang.spin->obj[IGT_SPIN_BATCH].offset;
batch = gem_mmap__cpu(device, hang.spin->handle, 0, 4096, PROT_READ);
gem_set_domain(device, hang.spin->handle, I915_GEM_DOMAIN_CPU, 0);
@@ -256,7 +256,7 @@ static void hangcheck_unterminated(void)
igt_main
{
- const struct intel_execution_engine *e;
+ const struct intel_execution_engine2 *e;
igt_hang_t hang = {};
igt_skip_on_simulation();
@@ -276,16 +276,9 @@ igt_main
igt_subtest("error-state-basic")
test_error_state_basic();
- for (e = intel_execution_engines; e->name; e++) {
- if (e->exec_id == 0)
- continue;
-
- igt_subtest_f("error-state-capture-%s", e->name) {
- igt_require(gem_ring_has_physical_engine(device, e->exec_id | e->flags));
- test_error_state_capture(e->exec_id | e->flags,
- e->full_name);
- }
- }
+ __for_each_physical_engine(device, e)
+ igt_subtest_f("error-state-capture-%s", e->name)
+ test_error_state_capture(e->flags, e->name);
igt_subtest("hangcheck-unterminated")
hangcheck_unterminated();
diff --git a/tests/i915/i915_pm_rpm.c b/tests/i915/i915_pm_rpm.c
index a2c9d0ed..e2c7ba21 100644
--- a/tests/i915/i915_pm_rpm.c
+++ b/tests/i915/i915_pm_rpm.c
@@ -394,8 +394,9 @@ static void init_mode_set_data(struct mode_set_data *data)
if (data->res) {
igt_assert(data->res->count_connectors <= MAX_CONNECTORS);
for (int i = 0; i < data->res->count_connectors; i++) {
- data->connectors[i] = drmModeGetConnectorCurrent(drm_fd,
- data->res->connectors[i]);
+ data->connectors[i] =
+ drmModeGetConnector(drm_fd,
+ data->res->connectors[i]);
data->edids[i] = get_connector_edid(data->connectors[i], i);
}
@@ -571,33 +572,53 @@ static void assert_drm_infos_equal(struct compare_data *d1,
assert_drm_crtcs_equal(d1->crtcs[i], d2->crtcs[i]);
}
-/* We could check the checksum too, but just the header is probably enough. */
-static bool edid_is_valid(const unsigned char *edid)
+static bool find_i2c_path(const char *connector_name, char *i2c_path)
{
- char edid_header[] = {
- 0x0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0,
- };
+ struct dirent *dirent;
+ DIR *dir;
+ int sysfs_card_fd = igt_sysfs_open(drm_fd);
+ int connector_fd = -1;
+ bool found_i2c_file = false;
- return (memcmp(edid, edid_header, sizeof(edid_header)) == 0);
-}
+ dir = fdopendir(sysfs_card_fd);
+ igt_assert(dir);
-static int count_drm_valid_edids(struct mode_set_data *data)
-{
- int ret = 0;
+ while ((dirent = readdir(dir))) {
+ /* Skip "cardx-" prefix */
+ char *dirname = strchr(dirent->d_name, '-');
+ if (dirname==NULL)
+ continue;
+ ++dirname;
- if (!data->res)
- return 0;
+ if (strcmp(dirname, connector_name) == 0) {
+ connector_fd = openat(sysfs_card_fd, dirent->d_name, O_RDONLY);
+ break;
+ }
+ }
+ closedir(dir);
- for (int i = 0; i < data->res->count_connectors; i++)
- if (data->edids[i] && edid_is_valid(data->edids[i]->data))
- ret++;
- return ret;
+ if (connector_fd < 0)
+ return false;
+
+ dir = fdopendir(connector_fd);
+ igt_assert(dir);
+
+ while ((dirent = readdir(dir))) {
+ if (strncmp(dirent->d_name, "i2c-", 4) == 0) {
+ sprintf(i2c_path, "/dev/%s", dirent->d_name);
+ found_i2c_file = true;
+ }
+ }
+ closedir(dir);
+ return found_i2c_file;
}
-static bool i2c_edid_is_valid(int fd)
+
+static bool i2c_read_edid(const char *connector_name, unsigned char *edid)
{
- int rc;
- unsigned char edid[128] = {};
+ char i2c_path[PATH_MAX];
+ bool result;
+ int rc, fd;
struct i2c_msg msgs[] = {
{ /* Start at 0. */
.addr = 0x50,
@@ -616,69 +637,92 @@ static bool i2c_edid_is_valid(int fd)
.nmsgs = 2,
};
+ result = find_i2c_path(connector_name, i2c_path);
+ if (!result)
+ return false;
+
+ fd = open(i2c_path, O_RDWR);
+ igt_assert_neq(fd, -1);
+
rc = ioctl(fd, I2C_RDWR, &msgset);
- return (rc >= 0) ? edid_is_valid(edid) : false;
+ if (rc==-1) {
+ igt_debug("I2C access failed with errno %d, %s\n",
+ errno, strerror(errno));
+ errno = 0;
+ }
+
+ close(fd);
+ return rc >= 0;
}
-static int count_i2c_valid_edids(void)
+static void format_hex_string(const unsigned char edid[static EDID_LENGTH],
+ char buf[static EDID_LENGTH * 5 + 1])
{
- int fd, ret = 0;
- DIR *dir;
+ for (int i = 0; i < EDID_LENGTH; ++i)
+ sprintf(buf+i*5, "0x%02x ", edid[i]);
+}
- struct dirent *dirent;
- char full_name[PATH_MAX];
+static void test_i2c(struct mode_set_data *data)
+{
+ bool edid_mistmach_i2c_vs_drm = false;
+ igt_display_t display;
+ igt_display_require(&display, drm_fd);
- dir = opendir("/dev/");
- igt_assert(dir);
+ for (int i = 0; i < data->res->count_connectors; i++) {
+ unsigned char *drm_edid = data->edids[i] ? data->edids[i]->data : NULL;
+ unsigned char i2c_edid[EDID_LENGTH] = {};
- while ((dirent = readdir(dir))) {
- if (strncmp(dirent->d_name, "i2c-", 4) == 0) {
- sprintf(full_name, "/dev/%s", dirent->d_name);
- fd = open(full_name, O_RDWR);
- igt_assert_neq(fd, -1);
- if (i2c_edid_is_valid(fd))
- ret++;
- close(fd);
- }
- }
+ igt_output_t *output = igt_output_from_connector(&display,
+ data->connectors[i]);
+ char *connector_name = (char *) igt_output_name(output);
- closedir(dir);
+ bool got_i2c_edid = i2c_read_edid(connector_name, i2c_edid);
+ bool got_drm_edid = drm_edid != NULL;
+ bool is_vga = data->connectors[i]->connector_type == DRM_MODE_CONNECTOR_VGA;
- return ret;
-}
+ bool edids_equal;
-static int count_vga_outputs(struct mode_set_data *data)
-{
- int count = 0;
+ /* We fail to detect some VGA monitors using our i2c method. If you look
+ * at the dmesg of these cases, you'll see the Kernel complaining about
+ * the EDID reading mostly FFs and then disabling bit-banging. Since we
+ * don't want to reimplement everything the Kernel does, let's just
+ * accept the fact that some VGA outputs won't be properly detected. */
+ if (is_vga)
+ continue;
- if (!data->res)
- return 0;
+ if (!got_i2c_edid && !got_drm_edid)
+ continue;
- for (int i = 0; i < data->res->count_connectors; i++)
- if (data->connectors[i]->connector_type ==
- DRM_MODE_CONNECTOR_VGA)
- count++;
+ if (got_i2c_edid && got_drm_edid)
+ edids_equal = (0 == memcmp(drm_edid, i2c_edid, EDID_LENGTH));
+ else
+ edids_equal = false;
- return count;
-}
-static void test_i2c(struct mode_set_data *data)
-{
- int i2c_edids = count_i2c_valid_edids();
- int drm_edids = count_drm_valid_edids(data);
- int vga_outputs = count_vga_outputs(data);
- int diff;
+ if (!edids_equal) {
+ char buf[5 * EDID_LENGTH + 1];
+ igt_critical("Detected EDID mismatch on connector %s\n",
+ connector_name);
- igt_debug("i2c edids:%d drm edids:%d vga outputs:%d\n",
- i2c_edids, drm_edids, vga_outputs);
+ if(got_i2c_edid)
+ format_hex_string(i2c_edid, buf);
+ else
+ sprintf(buf, "NULL");
- /* We fail to detect some VGA monitors using our i2c method. If you look
- * at the dmesg of these cases, you'll see the Kernel complaining about
- * the EDID reading mostly FFs and then disabling bit-banging. Since we
- * don't want to reimplement everything the Kernel does, let's just
- * accept the fact that some VGA outputs won't be properly detected. */
- diff = drm_edids - i2c_edids;
- igt_assert(diff <= vga_outputs && diff >= 0);
+ igt_critical("i2c: %s\n", buf);
+
+ if(got_drm_edid)
+ format_hex_string(drm_edid, buf);
+ else
+ sprintf(buf, "NULL");
+
+ igt_critical("drm: %s\n", buf);
+
+ edid_mistmach_i2c_vs_drm = true;
+ }
+ }
+ igt_fail_on_f(edid_mistmach_i2c_vs_drm,
+ "There is an EDID mismatch between i2c and DRM!\n");
}
static void setup_pc8(void)
@@ -1950,26 +1994,23 @@ static int opt_handler(int opt, int opt_index, void *data)
stay = true;
break;
default:
- igt_assert(0);
+ return IGT_OPT_HANDLER_ERROR;
}
- return 0;
+ return IGT_OPT_HANDLER_SUCCESS;
}
-int main(int argc, char *argv[])
-{
- const char *help_str =
- " --stress\t\tMake the stress-tests more stressful.\n"
- " --stay\t\tDisable all screen and try to go into runtime pm. Useful for debugging.";
- static struct option long_options[] = {
- {"stress", 0, 0, 'l'},
- {"stay", 0, 0, 's'},
- { 0, 0, 0, 0 }
- };
-
- igt_subtest_init_parse_opts(&argc, argv, "", long_options,
- help_str, opt_handler, NULL);
+const char *help_str =
+ " --stress\t\tMake the stress-tests more stressful.\n"
+ " --stay\t\tDisable all screen and try to go into runtime pm. Useful for debugging.";
+static struct option long_options[] = {
+ {"stress", 0, 0, 'l'},
+ {"stay", 0, 0, 's'},
+ { 0, 0, 0, 0 }
+};
+igt_main_args("", long_options, help_str, opt_handler, NULL)
+{
igt_subtest("basic-rte") {
igt_assert(setup_environment());
basic_subtest();
@@ -2120,6 +2161,4 @@ int main(int argc, char *argv[])
/* Remove our mmio_debugging module */
igt_i915_driver_unload();
}
-
- igt_exit();
}
diff --git a/tests/i915/i915_query.c b/tests/i915/i915_query.c
index 7d0c0e3a..ecbec3ae 100644
--- a/tests/i915/i915_query.c
+++ b/tests/i915/i915_query.c
@@ -483,6 +483,241 @@ test_query_topology_known_pci_ids(int fd, int devid)
free(topo_info);
}
+static bool query_engine_info_supported(int fd)
+{
+ struct drm_i915_query_item item = {
+ .query_id = DRM_I915_QUERY_ENGINE_INFO,
+ };
+
+ return __i915_query_items(fd, &item, 1) == 0 && item.length > 0;
+}
+
+static void engines_invalid(int fd)
+{
+ struct drm_i915_query_engine_info *engines;
+ struct drm_i915_query_item item;
+ unsigned int len;
+
+ /* Flags is MBZ. */
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ item.flags = 1;
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EINVAL);
+
+ /* Length not zero and not greater or equal required size. */
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ item.length = 1;
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EINVAL);
+
+ /* Query correct length. */
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ i915_query_items(fd, &item, 1);
+ igt_assert(item.length >= 0);
+ len = item.length;
+
+ engines = malloc(len);
+ igt_assert(engines);
+
+ /* Ivalid pointer. */
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ item.length = len;
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EFAULT);
+
+ /* All fields in engines query are MBZ and only filled by the kernel. */
+
+ memset(engines, 0, len);
+ engines->num_engines = 1;
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ item.length = len;
+ item.data_ptr = to_user_pointer(engines);
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EINVAL);
+
+ memset(engines, 0, len);
+ engines->rsvd[0] = 1;
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ item.length = len;
+ item.data_ptr = to_user_pointer(engines);
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EINVAL);
+
+ memset(engines, 0, len);
+ engines->rsvd[1] = 1;
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ item.length = len;
+ item.data_ptr = to_user_pointer(engines);
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EINVAL);
+
+ memset(engines, 0, len);
+ engines->rsvd[2] = 1;
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ item.length = len;
+ item.data_ptr = to_user_pointer(engines);
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EINVAL);
+
+ free(engines);
+
+ igt_assert(len <= 4096);
+ engines = mmap(0, 4096, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
+ -1, 0);
+ igt_assert(engines != MAP_FAILED);
+
+ /* PROT_NONE is similar to unmapped area. */
+ memset(engines, 0, len);
+ igt_assert_eq(mprotect(engines, len, PROT_NONE), 0);
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ item.length = len;
+ item.data_ptr = to_user_pointer(engines);
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EFAULT);
+ igt_assert_eq(mprotect(engines, len, PROT_WRITE), 0);
+
+ /* Read-only so kernel cannot fill the data back. */
+ memset(engines, 0, len);
+ igt_assert_eq(mprotect(engines, len, PROT_READ), 0);
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ item.length = len;
+ item.data_ptr = to_user_pointer(engines);
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EFAULT);
+
+ munmap(engines, 4096);
+}
+
+static bool
+has_engine(struct drm_i915_query_engine_info *engines,
+ unsigned class, unsigned instance)
+{
+ unsigned int i;
+
+ for (i = 0; i < engines->num_engines; i++) {
+ struct drm_i915_engine_info *engine =
+ (struct drm_i915_engine_info *)&engines->engines[i];
+
+ if (engine->engine.engine_class == class &&
+ engine->engine.engine_instance == instance)
+ return true;
+ }
+
+ return false;
+}
+
+static void engines(int fd)
+{
+ struct drm_i915_query_engine_info *engines;
+ struct drm_i915_query_item item;
+ unsigned int len, i;
+
+ engines = malloc(4096);
+ igt_assert(engines);
+
+ /* Query required buffer length. */
+ memset(engines, 0, 4096);
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ item.data_ptr = to_user_pointer(engines);
+ i915_query_items(fd, &item, 1);
+ igt_assert(item.length >= 0);
+ igt_assert(item.length <= 4096);
+ len = item.length;
+
+ /* Check length larger than required works and reports same length. */
+ memset(engines, 0, 4096);
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ item.length = 4096;
+ item.data_ptr = to_user_pointer(engines);
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, len);
+
+ /* Actual query. */
+ memset(engines, 0, 4096);
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_ENGINE_INFO;
+ item.length = len;
+ item.data_ptr = to_user_pointer(engines);
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, len);
+
+ /* Every GPU has at least one engine. */
+ igt_assert(engines->num_engines > 0);
+
+ /* MBZ fields. */
+ igt_assert_eq(engines->rsvd[0], 0);
+ igt_assert_eq(engines->rsvd[1], 0);
+ igt_assert_eq(engines->rsvd[2], 0);
+
+ /* Check results match the legacy GET_PARAM (where we can). */
+ for (i = 0; i < engines->num_engines; i++) {
+ struct drm_i915_engine_info *engine =
+ (struct drm_i915_engine_info *)&engines->engines[i];
+
+ igt_debug("%u: class=%u instance=%u flags=%llx capabilities=%llx\n",
+ i,
+ engine->engine.engine_class,
+ engine->engine.engine_instance,
+ engine->flags,
+ engine->capabilities);
+
+ /* MBZ fields. */
+ igt_assert_eq(engine->rsvd0, 0);
+ igt_assert_eq(engine->rsvd1[0], 0);
+ igt_assert_eq(engine->rsvd1[1], 0);
+
+ switch (engine->engine.engine_class) {
+ case I915_ENGINE_CLASS_RENDER:
+ /* Will be tested later. */
+ break;
+ case I915_ENGINE_CLASS_COPY:
+ igt_assert(gem_has_blt(fd));
+ break;
+ case I915_ENGINE_CLASS_VIDEO:
+ switch (engine->engine.engine_instance) {
+ case 0:
+ igt_assert(gem_has_bsd(fd));
+ break;
+ case 1:
+ igt_assert(gem_has_bsd2(fd));
+ break;
+ }
+ break;
+ case I915_ENGINE_CLASS_VIDEO_ENHANCE:
+ igt_assert(gem_has_vebox(fd));
+ break;
+ default:
+ igt_assert(0);
+ }
+ }
+
+ /* Reverse check to the above - all GET_PARAM engines are present. */
+ igt_assert(has_engine(engines, I915_ENGINE_CLASS_RENDER, 0));
+ if (gem_has_blt(fd))
+ igt_assert(has_engine(engines, I915_ENGINE_CLASS_COPY, 0));
+ if (gem_has_bsd(fd))
+ igt_assert(has_engine(engines, I915_ENGINE_CLASS_VIDEO, 0));
+ if (gem_has_bsd2(fd))
+ igt_assert(has_engine(engines, I915_ENGINE_CLASS_VIDEO, 1));
+ if (gem_has_vebox(fd))
+ igt_assert(has_engine(engines, I915_ENGINE_CLASS_VIDEO_ENHANCE,
+ 0));
+
+ free(engines);
+}
+
igt_main
{
int fd = -1;
@@ -530,6 +765,18 @@ igt_main
test_query_topology_known_pci_ids(fd, devid);
}
+ igt_subtest_group {
+ igt_fixture {
+ igt_require(query_engine_info_supported(fd));
+ }
+
+ igt_subtest("engine-info-invalid")
+ engines_invalid(fd);
+
+ igt_subtest("engine-info")
+ engines(fd);
+ }
+
igt_fixture {
close(fd);
}
diff --git a/tests/intel-ci/blacklist.txt b/tests/intel-ci/blacklist.txt
index fb182663..2a5893ce 100644
--- a/tests/intel-ci/blacklist.txt
+++ b/tests/intel-ci/blacklist.txt
@@ -24,6 +24,7 @@ igt@gem_concurrent_blit(@.*)?
igt@gem_cs_prefetch(@.*)?
igt@gem_ctx_create@(?!.*basic).*
igt@gem_ctx_exec@(?!.*basic).*
+igt@gem_ctx_shared@*exhaust*
igt@gem_ctx_switch@(?!.*basic).*
igt@gem_ctx_thrash(@.*)?
igt@gem_evict_alignment(@.*)?
diff --git a/tests/intel-ci/fast-feedback.testlist b/tests/intel-ci/fast-feedback.testlist
index 40475b1a..58e6b5c5 100644
--- a/tests/intel-ci/fast-feedback.testlist
+++ b/tests/intel-ci/fast-feedback.testlist
@@ -5,7 +5,7 @@ igt@debugfs_test@read_all_entries
igt@gem_basic@bad-close
igt@gem_basic@create-close
igt@gem_basic@create-fd-close
-igt@gem_busy@basic-busy-default
+igt@gem_busy@busy-all
igt@gem_close_race@basic-process
igt@gem_close_race@basic-threads
igt@gem_cpu_reloc@basic
@@ -15,27 +15,7 @@ igt@gem_ctx_exec@basic
igt@gem_ctx_param@basic
igt@gem_ctx_param@basic-default
igt@gem_ctx_switch@basic-default
-igt@gem_exec_basic@basic-blt
-igt@gem_exec_basic@basic-bsd
-igt@gem_exec_basic@basic-bsd1
-igt@gem_exec_basic@basic-bsd2
-igt@gem_exec_basic@basic-default
-igt@gem_exec_basic@basic-render
-igt@gem_exec_basic@basic-vebox
-igt@gem_exec_basic@gtt-blt
-igt@gem_exec_basic@gtt-bsd
-igt@gem_exec_basic@gtt-bsd1
-igt@gem_exec_basic@gtt-bsd2
-igt@gem_exec_basic@gtt-default
-igt@gem_exec_basic@gtt-render
-igt@gem_exec_basic@gtt-vebox
-igt@gem_exec_basic@readonly-blt
-igt@gem_exec_basic@readonly-bsd
-igt@gem_exec_basic@readonly-bsd1
-igt@gem_exec_basic@readonly-bsd2
-igt@gem_exec_basic@readonly-default
-igt@gem_exec_basic@readonly-render
-igt@gem_exec_basic@readonly-vebox
+igt@gem_exec_basic@basic-all
igt@gem_exec_create@basic
igt@gem_exec_fence@basic-busy-default
igt@gem_exec_fence@basic-wait-default
@@ -74,13 +54,6 @@ igt@gem_exec_reloc@basic-write-gtt-active
igt@gem_exec_reloc@basic-write-read-active
igt@gem_exec_reloc@basic-softpin
igt@gem_exec_store@basic-all
-igt@gem_exec_store@basic-blt
-igt@gem_exec_store@basic-bsd
-igt@gem_exec_store@basic-bsd1
-igt@gem_exec_store@basic-bsd2
-igt@gem_exec_store@basic-default
-igt@gem_exec_store@basic-render
-igt@gem_exec_store@basic-vebox
igt@gem_exec_suspend@basic
igt@gem_exec_suspend@basic-s3
igt@gem_exec_suspend@basic-s4-devices
diff --git a/tests/kms_available_modes_crc.c b/tests/kms_available_modes_crc.c
index 50b5522a..07772767 100644
--- a/tests/kms_available_modes_crc.c
+++ b/tests/kms_available_modes_crc.c
@@ -127,7 +127,6 @@ static const struct {
BYTES_PP_4 = 4} bpp;
uint32_t value;
} fillers[] = {
- { DRM_FORMAT_C8, 0, BYTES_PP_1, 0xff},
{ DRM_FORMAT_XBGR2101010, 0, BYTES_PP_4, 0xffffffff},
{ 0, 0, 0, 0 }
};
diff --git a/tests/kms_big_fb.c b/tests/kms_big_fb.c
new file mode 100644
index 00000000..c3498c67
--- /dev/null
+++ b/tests/kms_big_fb.c
@@ -0,0 +1,713 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "igt.h"
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+IGT_TEST_DESCRIPTION("Test big framebuffers");
+
+typedef struct {
+ int drm_fd;
+ uint32_t devid;
+ igt_display_t display;
+ enum pipe pipe;
+ igt_output_t *output;
+ igt_plane_t *plane;
+ igt_pipe_crc_t *pipe_crc;
+ struct igt_fb small_fb, big_fb;
+ uint32_t format;
+ uint64_t modifier;
+ int width, height;
+ igt_rotation_t rotation;
+ int max_fb_width, max_fb_height;
+ int big_fb_width, big_fb_height;
+ uint64_t ram_size, aper_size, mappable_size;
+ igt_render_copyfunc_t render_copy;
+ drm_intel_bufmgr *bufmgr;
+ struct intel_batchbuffer *batch;
+} data_t;
+
+static void init_buf(data_t *data,
+ struct igt_buf *buf,
+ const struct igt_fb *fb,
+ const char *name)
+{
+ igt_assert_eq(fb->offsets[0], 0);
+
+ buf->bo = gem_handle_to_libdrm_bo(data->bufmgr, data->drm_fd,
+ name, fb->gem_handle);
+ buf->tiling = igt_fb_mod_to_tiling(fb->modifier);
+ buf->stride = fb->strides[0];
+ buf->bpp = fb->plane_bpp[0];
+ buf->size = fb->size;
+}
+
+static void fini_buf(struct igt_buf *buf)
+{
+ drm_intel_bo_unreference(buf->bo);
+}
+
+static void copy_pattern(data_t *data,
+ struct igt_fb *dst_fb, int dx, int dy,
+ struct igt_fb *src_fb, int sx, int sy,
+ int w, int h)
+{
+ struct igt_buf src = {}, dst = {};
+
+ init_buf(data, &src, src_fb, "big fb src");
+ init_buf(data, &dst, dst_fb, "big fb dst");
+
+ gem_set_domain(data->drm_fd, dst_fb->gem_handle,
+ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ gem_set_domain(data->drm_fd, src_fb->gem_handle,
+ I915_GEM_DOMAIN_GTT, 0);
+
+ /*
+ * We expect the kernel to limit the max fb
+ * size/stride to something that can still
+ * rendered with the blitter/render engine.
+ */
+ if (data->render_copy) {
+ data->render_copy(data->batch, NULL, &src, sx, sy, w, h, &dst, dx, dy);
+ } else {
+ w = min(w, src_fb->width - sx);
+ w = min(w, dst_fb->width - dx);
+
+ h = min(h, src_fb->height - sy);
+ h = min(h, dst_fb->height - dy);
+
+ intel_blt_copy(data->batch, src.bo, sx, sy, src.stride,
+ dst.bo, dx, dy, dst.stride, w, h, dst.bpp);
+ }
+
+ fini_buf(&dst);
+ fini_buf(&src);
+}
+
+static void generate_pattern(data_t *data,
+ struct igt_fb *fb,
+ int w, int h)
+{
+ struct igt_fb pat_fb;
+
+ igt_create_pattern_fb(data->drm_fd, w, h,
+ data->format, data->modifier,
+ &pat_fb);
+
+ for (int y = 0; y < fb->height; y += h) {
+ for (int x = 0; x < fb->width; x += w) {
+ copy_pattern(data, fb, x, y,
+ &pat_fb, 0, 0,
+ pat_fb.width, pat_fb.height);
+ w++;
+ h++;
+ }
+ }
+
+ igt_remove_fb(data->drm_fd, &pat_fb);
+}
+
+static bool size_ok(data_t *data, uint64_t size)
+{
+ /*
+ * The kernel limits scanout to the
+ * mappable portion of ggtt on gmch platforms.
+ */
+ if ((intel_gen(data->devid) < 5 ||
+ IS_VALLEYVIEW(data->devid) ||
+ IS_CHERRYVIEW(data->devid)) &&
+ size > data->mappable_size / 2)
+ return false;
+
+ /*
+ * Limit the big fb size to at most half the RAM or half
+ * the aperture size. Could go a bit higher I suppose since
+ * we shouldn't need more than one big fb at a time.
+ */
+ if (size > data->ram_size / 2 || size > data->aper_size / 2)
+ return false;
+
+ return true;
+}
+
+
+static void max_fb_size(data_t *data, int *width, int *height,
+ uint32_t format, uint64_t modifier)
+{
+ unsigned int stride;
+ uint64_t size;
+ int i = 0;
+
+ *width = data->max_fb_width;
+ *height = data->max_fb_height;
+
+ /* max fence stride is only 8k bytes on gen3 */
+ if (intel_gen(data->devid) < 4 &&
+ format == DRM_FORMAT_XRGB8888)
+ *width = min(*width, 8192 / 4);
+
+ igt_calc_fb_size(data->drm_fd, *width, *height,
+ format, modifier, &size, &stride);
+
+ while (!size_ok(data, size)) {
+ if (i++ & 1)
+ *width >>= 1;
+ else
+ *height >>= 1;
+
+ igt_calc_fb_size(data->drm_fd, *width, *height,
+ format, modifier, &size, &stride);
+ }
+
+ igt_info("Max usable framebuffer size for format "IGT_FORMAT_FMT" / modifier 0x%"PRIx64": %dx%d\n",
+ IGT_FORMAT_ARGS(format), modifier,
+ *width, *height);
+}
+
+static void prep_fb(data_t *data)
+{
+ if (data->big_fb.fb_id)
+ return;
+
+ igt_create_fb(data->drm_fd,
+ data->big_fb_width, data->big_fb_height,
+ data->format, data->modifier,
+ &data->big_fb);
+
+ generate_pattern(data, &data->big_fb, 640, 480);
+}
+
+static void cleanup_fb(data_t *data)
+{
+ igt_remove_fb(data->drm_fd, &data->big_fb);
+ data->big_fb.fb_id = 0;
+}
+
+static void set_c8_lut(data_t *data)
+{
+ igt_pipe_t *pipe = &data->display.pipes[data->pipe];
+ struct drm_color_lut *lut;
+ int i, lut_size = 256;
+
+ lut = calloc(lut_size, sizeof(lut[0]));
+
+ /* igt_fb uses RGB332 for C8 */
+ for (i = 0; i < lut_size; i++) {
+ lut[i].red = ((i & 0xe0) >> 5) * 0xffff / 0x7;
+ lut[i].green = ((i & 0x1c) >> 2) * 0xffff / 0x7;
+ lut[i].blue = ((i & 0x03) >> 0) * 0xffff / 0x3;
+ }
+
+ igt_pipe_obj_replace_prop_blob(pipe, IGT_CRTC_GAMMA_LUT, lut,
+ lut_size * sizeof(lut[0]));
+
+ free(lut);
+}
+
+static void unset_lut(data_t *data)
+{
+ igt_pipe_t *pipe = &data->display.pipes[data->pipe];
+
+ igt_pipe_obj_replace_prop_blob(pipe, IGT_CRTC_GAMMA_LUT, NULL, 0);
+}
+
+static bool test_plane(data_t *data)
+{
+ igt_plane_t *plane = data->plane;
+ struct igt_fb *small_fb = &data->small_fb;
+ struct igt_fb *big_fb = &data->big_fb;
+ int w = data->big_fb_width - small_fb->width;
+ int h = data->big_fb_height - small_fb->height;
+ struct {
+ int x, y;
+ } coords[] = {
+ /* bunch of coordinates pulled out of thin air */
+ { 0, 0, },
+ { w * 4 / 7, h / 5, },
+ { w * 3 / 7, h / 3, },
+ { w / 2, h / 2, },
+ { w / 3, h * 3 / 4, },
+ { w, h, },
+ };
+
+ if (!igt_plane_has_format_mod(plane, data->format, data->modifier))
+ return false;
+
+ if (data->rotation != IGT_ROTATION_0 &&
+ !igt_plane_has_prop(plane, IGT_PLANE_ROTATION))
+ return false;
+
+ /* FIXME need atomic on i965/g4x */
+ if (data->rotation != IGT_ROTATION_0 &&
+ data->rotation != IGT_ROTATION_180 &&
+ !data->display.is_atomic)
+ return false;
+
+ if (igt_plane_has_prop(plane, IGT_PLANE_ROTATION))
+ igt_plane_set_rotation(plane, data->rotation);
+ igt_plane_set_position(plane, 0, 0);
+
+ for (int i = 0; i < ARRAY_SIZE(coords); i++) {
+ igt_crc_t small_crc, big_crc;
+ int x = coords[i].x;
+ int y = coords[i].y;
+
+ /* Hardware limitation */
+ if (data->format == DRM_FORMAT_RGB565 &&
+ (data->rotation == IGT_ROTATION_90 ||
+ data->rotation == IGT_ROTATION_270)) {
+ x &= ~1;
+ y &= ~1;
+ }
+
+ igt_plane_set_fb(plane, small_fb);
+ igt_plane_set_size(plane, data->width, data->height);
+
+ /*
+ * Try to check that the rotation+format+modifier
+ * combo is supported.
+ */
+ if (i == 0 && data->display.is_atomic &&
+ igt_display_try_commit_atomic(&data->display,
+ DRM_MODE_ATOMIC_TEST_ONLY,
+ NULL) != 0) {
+ if (igt_plane_has_prop(plane, IGT_PLANE_ROTATION))
+ igt_plane_set_rotation(plane, IGT_ROTATION_0);
+ igt_plane_set_fb(plane, NULL);
+ return false;
+ }
+
+ /*
+ * To speed up skips we delay the big fb creation until
+ * the above rotation related check has been performed.
+ */
+ prep_fb(data);
+
+ /*
+ * Make a 1:1 copy of the desired part of the big fb
+ * rather than try to render the same pattern (translated
+ * accordinly) again via cairo. Something in cairo's
+ * rendering pipeline introduces slight differences into
+ * the result if we try that, and so the crc will not match.
+ */
+ copy_pattern(data, small_fb, 0, 0, big_fb, x, y,
+ small_fb->width, small_fb->height);
+
+ igt_display_commit2(&data->display, data->display.is_atomic ?
+ COMMIT_ATOMIC : COMMIT_UNIVERSAL);
+
+
+ igt_pipe_crc_collect_crc(data->pipe_crc, &small_crc);
+
+ igt_plane_set_fb(plane, big_fb);
+ igt_fb_set_position(big_fb, plane, x, y);
+ igt_fb_set_size(big_fb, plane, small_fb->width, small_fb->height);
+ igt_plane_set_size(plane, data->width, data->height);
+ igt_display_commit2(&data->display, data->display.is_atomic ?
+ COMMIT_ATOMIC : COMMIT_UNIVERSAL);
+
+ igt_pipe_crc_collect_crc(data->pipe_crc, &big_crc);
+
+ igt_plane_set_fb(plane, NULL);
+
+ igt_assert_crc_equal(&big_crc, &small_crc);
+ }
+
+ return true;
+}
+
+static bool test_pipe(data_t *data)
+{
+ drmModeModeInfo *mode;
+ igt_plane_t *primary;
+ int width, height;
+ bool ret = false;
+
+ if (data->format == DRM_FORMAT_C8 &&
+ !igt_pipe_obj_has_prop(&data->display.pipes[data->pipe],
+ IGT_CRTC_GAMMA_LUT))
+ return false;
+
+ mode = igt_output_get_mode(data->output);
+
+ data->width = mode->hdisplay;
+ data->height = mode->vdisplay;
+
+ width = mode->hdisplay;
+ height = mode->vdisplay;
+ if (data->rotation == IGT_ROTATION_90 ||
+ data->rotation == IGT_ROTATION_270)
+ igt_swap(width, height);
+
+ igt_create_color_fb(data->drm_fd, width, height,
+ data->format, data->modifier,
+ 0, 1, 0, &data->small_fb);
+
+ igt_output_set_pipe(data->output, data->pipe);
+
+ primary = igt_output_get_plane_type(data->output, DRM_PLANE_TYPE_PRIMARY);
+ igt_plane_set_fb(primary, NULL);
+
+ if (!data->display.is_atomic) {
+ struct igt_fb fb;
+
+ igt_create_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_LINEAR,
+ &fb);
+
+ /* legacy setcrtc needs an fb */
+ igt_plane_set_fb(primary, &fb);
+ igt_display_commit2(&data->display, COMMIT_LEGACY);
+
+ igt_plane_set_fb(primary, NULL);
+ igt_display_commit2(&data->display, COMMIT_UNIVERSAL);
+
+ igt_remove_fb(data->drm_fd, &fb);
+ }
+
+ if (data->format == DRM_FORMAT_C8)
+ set_c8_lut(data);
+
+ igt_display_commit2(&data->display, data->display.is_atomic ?
+ COMMIT_ATOMIC : COMMIT_UNIVERSAL);
+
+ data->pipe_crc = igt_pipe_crc_new(data->drm_fd, data->pipe,
+ INTEL_PIPE_CRC_SOURCE_AUTO);
+
+ for_each_plane_on_pipe(&data->display, data->pipe, data->plane) {
+ ret = test_plane(data);
+ if (ret)
+ break;
+ }
+
+ if (data->format == DRM_FORMAT_C8)
+ unset_lut(data);
+
+ igt_pipe_crc_free(data->pipe_crc);
+
+ igt_output_set_pipe(data->output, PIPE_ANY);
+
+ igt_remove_fb(data->drm_fd, &data->small_fb);
+
+ return ret;
+}
+
+static void test_scanout(data_t *data)
+{
+ max_fb_size(data, &data->big_fb_width, &data->big_fb_height,
+ data->format, data->modifier);
+
+ for_each_pipe_with_valid_output(&data->display, data->pipe, data->output) {
+ if (test_pipe(data))
+ return;
+ break;
+ }
+
+ igt_skip("unsupported configuration\n");
+}
+
+static void
+test_size_overflow(data_t *data)
+{
+ uint32_t fb_id;
+ uint32_t bo;
+ uint32_t offsets[4] = {};
+ uint32_t strides[4] = { 256*1024, };
+ int ret;
+
+ igt_require(igt_display_has_format_mod(&data->display,
+ DRM_FORMAT_XRGB8888,
+ data->modifier));
+
+ /*
+ * Try to hit a specific integer overflow in i915 fb size
+ * calculations. 256k * 16k == 1<<32 which is checked
+ * against the bo size. The check should fail on account
+ * of the bo being smaller, but due to the overflow the
+ * computed fb size is 0 and thus the check never trips.
+ */
+ igt_require(data->max_fb_width >= 16383 &&
+ data->max_fb_height >= 16383);
+
+ bo = gem_create(data->drm_fd, (1ULL << 32) - 4096);
+ igt_require(bo);
+
+ ret = __kms_addfb(data->drm_fd, bo,
+ 16383, 16383,
+ DRM_FORMAT_XRGB8888,
+ data->modifier,
+ strides, offsets, 1,
+ DRM_MODE_FB_MODIFIERS, &fb_id);
+
+ igt_assert_neq(ret, 0);
+
+ gem_close(data->drm_fd, bo);
+}
+
+static void
+test_size_offset_overflow(data_t *data)
+{
+ uint32_t fb_id;
+ uint32_t bo;
+ uint32_t offsets[4] = {};
+ uint32_t strides[4] = { 8192, };
+ int ret;
+
+ igt_require(igt_display_has_format_mod(&data->display,
+ DRM_FORMAT_NV12,
+ data->modifier));
+
+ /*
+ * Try to hit a specific integer overflow in i915 fb size
+ * calculations. This time it's offsets[1] + the tile
+ * aligned chroma plane size that overflows and
+ * incorrectly passes the bo size check.
+ */
+ igt_require(igt_display_has_format_mod(&data->display,
+ DRM_FORMAT_NV12,
+ data->modifier));
+
+ bo = gem_create(data->drm_fd, (1ULL << 32) - 4096);
+ igt_require(bo);
+
+ offsets[0] = 0;
+ offsets[1] = (1ULL << 32) - 8192 * 4096;
+
+ ret = __kms_addfb(data->drm_fd, bo,
+ 8192, 8188,
+ DRM_FORMAT_NV12,
+ data->modifier,
+ strides, offsets, 1,
+ DRM_MODE_FB_MODIFIERS, &fb_id);
+ igt_assert_neq(ret, 0);
+
+ gem_close(data->drm_fd, bo);
+}
+
+static int rmfb(int fd, uint32_t id)
+{
+ int err;
+
+ err = 0;
+ if (igt_ioctl(fd, DRM_IOCTL_MODE_RMFB, &id))
+ err = -errno;
+
+ errno = 0;
+ return err;
+}
+
+static void
+test_addfb(data_t *data)
+{
+ uint64_t size;
+ uint32_t fb_id;
+ uint32_t bo;
+ uint32_t offsets[4] = {};
+ uint32_t strides[4] = {};
+ uint32_t format;
+ int ret;
+
+ /*
+ * gen3 max tiled stride is 8k bytes, but
+ * max fb size of 4k pixels, hence we can't test
+ * with 32bpp and must use 16bpp instead.
+ */
+ if (intel_gen(data->devid) == 3)
+ format = DRM_FORMAT_RGB565;
+ else
+ format = DRM_FORMAT_XRGB8888;
+
+ igt_require(igt_display_has_format_mod(&data->display,
+ format, data->modifier));
+
+ igt_calc_fb_size(data->drm_fd,
+ data->max_fb_width,
+ data->max_fb_height,
+ format, data->modifier,
+ &size, &strides[0]);
+
+ bo = gem_create(data->drm_fd, size);
+ igt_require(bo);
+
+ if (intel_gen(data->devid) < 4)
+ gem_set_tiling(data->drm_fd, bo,
+ igt_fb_mod_to_tiling(data->modifier), strides[0]);
+
+ ret = __kms_addfb(data->drm_fd, bo,
+ data->max_fb_width,
+ data->max_fb_height,
+ format, data->modifier,
+ strides, offsets, 1,
+ DRM_MODE_FB_MODIFIERS, &fb_id);
+ igt_assert_eq(ret, 0);
+
+ rmfb(data->drm_fd, fb_id);
+ gem_close(data->drm_fd, bo);
+}
+
+static data_t data;
+
+static const struct {
+ uint64_t modifier;
+ const char *name;
+} modifiers[] = {
+ { DRM_FORMAT_MOD_LINEAR, "linear", },
+ { I915_FORMAT_MOD_X_TILED, "x-tiled", },
+ { I915_FORMAT_MOD_Y_TILED, "y-tiled", },
+ { I915_FORMAT_MOD_Yf_TILED, "yf-tiled", },
+};
+
+static const struct {
+ uint32_t format;
+ uint8_t bpp;
+} formats[] = {
+ { DRM_FORMAT_C8, 8, },
+ { DRM_FORMAT_RGB565, 16, },
+ { DRM_FORMAT_XRGB8888, 32, },
+ { DRM_FORMAT_XBGR16161616F, 64, },
+};
+
+static const struct {
+ igt_rotation_t rotation;
+ uint16_t angle;
+} rotations[] = {
+ { IGT_ROTATION_0, 0, },
+ { IGT_ROTATION_90, 90, },
+ { IGT_ROTATION_180, 180, },
+ { IGT_ROTATION_270, 270, },
+};
+
+igt_main
+{
+ igt_fixture {
+ drmModeResPtr res;
+
+ igt_skip_on_simulation();
+
+ data.drm_fd = drm_open_driver_master(DRIVER_INTEL);
+
+ igt_require(is_i915_device(data.drm_fd));
+
+ data.devid = intel_get_drm_devid(data.drm_fd);
+
+ kmstest_set_vt_graphics_mode();
+
+ igt_require_pipe_crc(data.drm_fd);
+ igt_display_require(&data.display, data.drm_fd);
+
+ res = drmModeGetResources(data.drm_fd);
+ igt_assert(res);
+
+ data.max_fb_width = res->max_width;
+ data.max_fb_height = res->max_height;
+
+ drmModeFreeResources(res);
+
+ igt_info("Max driver framebuffer size %dx%d\n",
+ data.max_fb_width, data.max_fb_height);
+
+ data.ram_size = intel_get_total_ram_mb() << 20;
+ data.aper_size = gem_aperture_size(data.drm_fd);
+ data.mappable_size = gem_mappable_aperture_size();
+
+ igt_info("RAM: %"PRIu64" MiB, GPU address space: %"PRId64" MiB, GGTT mappable size: %"PRId64" MiB\n",
+ data.ram_size >> 20, data.aper_size >> 20,
+ data.mappable_size >> 20);
+
+ /*
+ * Gen3 render engine is limited to 2kx2k, whereas
+ * the display engine can do 4kx4k. Use the blitter
+ * on gen3 to avoid exceeding the render engine limits.
+ * On gen2 we could use either, but let's go for the
+ * blitter there as well.
+ */
+ if (intel_gen(data.devid) >= 4)
+ data.render_copy = igt_get_render_copyfunc(data.devid);
+
+ data.bufmgr = drm_intel_bufmgr_gem_init(data.drm_fd, 4096);
+ data.batch = intel_batchbuffer_alloc(data.bufmgr, data.devid);
+ }
+
+ /*
+ * Skip linear as it doesn't hit the overflow we want
+ * on account of the tile height being effectively one,
+ * and thus the kenrnel rounding up to the next tile
+ * height won't do anything.
+ */
+ for (int i = 1; i < ARRAY_SIZE(modifiers); i++) {
+ igt_subtest_f("%s-addfb-size-overflow",
+ modifiers[i].name) {
+ data.modifier = modifiers[i].modifier;
+ test_size_overflow(&data);
+ }
+ }
+
+ for (int i = 1; i < ARRAY_SIZE(modifiers); i++) {
+ igt_subtest_f("%s-addfb-size-offset-overflow",
+ modifiers[i].name) {
+ data.modifier = modifiers[i].modifier;
+ test_size_offset_overflow(&data);
+ }
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(modifiers); i++) {
+ igt_subtest_f("%s-addfb", modifiers[i].name) {
+ data.modifier = modifiers[i].modifier;
+
+ test_addfb(&data);
+ }
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(modifiers); i++) {
+ data.modifier = modifiers[i].modifier;
+
+ for (int j = 0; j < ARRAY_SIZE(formats); j++) {
+ data.format = formats[j].format;
+
+ for (int k = 0; k < ARRAY_SIZE(rotations); k++) {
+ data.rotation = rotations[k].rotation;
+
+ igt_subtest_f("%s-%dbpp-rotate-%d", modifiers[i].name,
+ formats[j].bpp, rotations[k].angle) {
+ igt_require(data.format == DRM_FORMAT_C8 ||
+ igt_fb_supported_format(data.format));
+ igt_require(igt_display_has_format_mod(&data.display, data.format, data.modifier));
+ test_scanout(&data);
+ }
+ }
+
+ igt_fixture
+ cleanup_fb(&data);
+ }
+ }
+
+ igt_fixture {
+ igt_display_fini(&data.display);
+
+ intel_batchbuffer_free(data.batch);
+ drm_intel_bufmgr_destroy(data.bufmgr);
+ }
+}
diff --git a/tests/kms_chamelium.c b/tests/kms_chamelium.c
index 714e5e06..378024d8 100644
--- a/tests/kms_chamelium.c
+++ b/tests/kms_chamelium.c
@@ -27,12 +27,22 @@
#include "config.h"
#include "igt.h"
#include "igt_vc4.h"
+#include "igt_edid.h"
+#include "igt_eld.h"
#include <fcntl.h>
#include <pthread.h>
#include <string.h>
#include <stdatomic.h>
+enum test_edid {
+ TEST_EDID_BASE,
+ TEST_EDID_ALT,
+ TEST_EDID_HDMI_AUDIO,
+ TEST_EDID_DP_AUDIO,
+};
+#define TEST_EDID_COUNT 4
+
typedef struct {
struct chamelium *chamelium;
struct chamelium_port **ports;
@@ -41,8 +51,7 @@ typedef struct {
int drm_fd;
- int edid_id;
- int alt_edid_id;
+ struct chamelium_edid *edids[TEST_EDID_COUNT];
} data_t;
#define HOTPLUG_TIMEOUT 20 /* seconds */
@@ -255,18 +264,26 @@ test_basic_hotplug(data_t *data, struct chamelium_port *port, int toggle_count)
igt_hpd_storm_reset(data->drm_fd);
}
+static const unsigned char *get_edid(enum test_edid edid);
+
+static void set_edid(data_t *data, struct chamelium_port *port,
+ enum test_edid edid)
+{
+ chamelium_port_set_edid(data->chamelium, port, data->edids[edid]);
+}
+
static void
-test_edid_read(data_t *data, struct chamelium_port *port,
- int edid_id, const unsigned char *edid)
+test_edid_read(data_t *data, struct chamelium_port *port, enum test_edid edid)
{
drmModePropertyBlobPtr edid_blob = NULL;
+ const unsigned char *raw_edid = get_edid(edid);
drmModeConnector *connector = chamelium_port_get_connector(
data->chamelium, port, false);
uint64_t edid_blob_id;
reset_state(data, port);
- chamelium_port_set_edid(data->chamelium, port, edid_id);
+ set_edid(data, port, edid);
chamelium_plug(data->chamelium, port);
wait_for_connector(data, port, DRM_MODE_CONNECTED);
@@ -278,17 +295,38 @@ test_edid_read(data_t *data, struct chamelium_port *port,
igt_assert(edid_blob = drmModeGetPropertyBlob(data->drm_fd,
edid_blob_id));
- igt_assert(memcmp(edid, edid_blob->data, EDID_LENGTH) == 0);
+ igt_assert(memcmp(raw_edid, edid_blob->data, EDID_LENGTH) == 0);
drmModeFreePropertyBlob(edid_blob);
drmModeFreeConnector(connector);
}
+/* Wait for hotplug and return the remaining time left from timeout */
+static bool wait_for_hotplug(struct udev_monitor *mon, int *timeout)
+{
+ struct timespec start, end;
+ int elapsed;
+ bool detected;
+
+ igt_assert_eq(igt_gettime(&start), 0);
+ detected = igt_hotplug_detected(mon, *timeout);
+ igt_assert_eq(igt_gettime(&end), 0);
+
+ elapsed = igt_time_elapsed(&start, &end);
+ igt_assert_lte(0, elapsed);
+ *timeout = max(0, *timeout - elapsed);
+
+ return detected;
+}
+
static void
try_suspend_resume_hpd(data_t *data, struct chamelium_port *port,
enum igt_suspend_state state, enum igt_suspend_test test,
struct udev_monitor *mon, bool connected)
{
+ drmModeConnection target_state = connected ? DRM_MODE_DISCONNECTED :
+ DRM_MODE_CONNECTED;
+ int timeout = HOTPLUG_TIMEOUT;
int delay;
int p;
@@ -310,17 +348,29 @@ try_suspend_resume_hpd(data_t *data, struct chamelium_port *port,
}
igt_system_suspend_autoresume(state, test);
+ igt_assert(wait_for_hotplug(mon, &timeout));
- igt_assert(igt_hotplug_detected(mon, HOTPLUG_TIMEOUT));
if (port) {
- igt_assert_eq(reprobe_connector(data, port), connected ?
- DRM_MODE_DISCONNECTED : DRM_MODE_CONNECTED);
+ igt_assert_eq(reprobe_connector(data, port), target_state);
} else {
for (p = 0; p < data->port_count; p++) {
+ drmModeConnection current_state;
+
port = data->ports[p];
- igt_assert_eq(reprobe_connector(data, port), connected ?
- DRM_MODE_DISCONNECTED :
- DRM_MODE_CONNECTED);
+ /*
+ * There could be as many hotplug events sent by
+ * driver as connectors we scheduled an HPD toggle on
+ * above, depending on timing. So if we're not seeing
+ * the expected connector state try to wait for an HPD
+ * event for each connector/port.
+ */
+ current_state = reprobe_connector(data, port);
+ if (p > 0 && current_state != target_state) {
+ igt_assert(wait_for_hotplug(mon, &timeout));
+ current_state = reprobe_connector(data, port);
+ }
+
+ igt_assert_eq(current_state, target_state);
}
port = NULL;
@@ -373,8 +423,8 @@ static void
test_suspend_resume_edid_change(data_t *data, struct chamelium_port *port,
enum igt_suspend_state state,
enum igt_suspend_test test,
- int edid_id,
- int alt_edid_id)
+ enum test_edid edid,
+ enum test_edid alt_edid)
{
struct udev_monitor *mon = igt_watch_hotplug();
bool link_status_failed[2][data->port_count];
@@ -387,7 +437,7 @@ test_suspend_resume_edid_change(data_t *data, struct chamelium_port *port,
igt_flush_hotplugs(mon);
/* First plug in the port */
- chamelium_port_set_edid(data->chamelium, port, edid_id);
+ set_edid(data, port, edid);
chamelium_plug(data->chamelium, port);
igt_assert(igt_hotplug_detected(mon, HOTPLUG_TIMEOUT));
@@ -397,7 +447,7 @@ test_suspend_resume_edid_change(data_t *data, struct chamelium_port *port,
* Change the edid before we suspend. On resume, the machine should
* notice the EDID change and fire a hotplug event.
*/
- chamelium_port_set_edid(data->chamelium, port, alt_edid_id);
+ set_edid(data, port, alt_edid);
get_connectors_link_status_failed(data, link_status_failed[0]);
@@ -414,8 +464,7 @@ test_suspend_resume_edid_change(data_t *data, struct chamelium_port *port,
}
static igt_output_t *
-prepare_output(data_t *data,
- struct chamelium_port *port, bool set_edid)
+prepare_output(data_t *data, struct chamelium_port *port, enum test_edid edid)
{
igt_display_t *display = &data->display;
igt_output_t *output;
@@ -428,10 +477,10 @@ prepare_output(data_t *data,
igt_require(res = drmModeGetResources(data->drm_fd));
/* The chamelium's default EDID has a lot of resolutions, way more then
- * we need to test
+ * we need to test. Additionally the default EDID doesn't support HDMI
+ * audio.
*/
- if (set_edid)
- chamelium_port_set_edid(data->chamelium, port, data->edid_id);
+ set_edid(data, port, edid);
chamelium_plug(data->chamelium, port);
wait_for_connector(data, port, DRM_MODE_CONNECTED);
@@ -616,7 +665,7 @@ static void test_display_one_mode(data_t *data, struct chamelium_port *port,
reset_state(data, port);
- output = prepare_output(data, port, true);
+ output = prepare_output(data, port, TEST_EDID_BASE);
connector = chamelium_port_get_connector(data->chamelium, port, false);
primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
igt_assert(primary);
@@ -647,7 +696,7 @@ static void test_display_all_modes(data_t *data, struct chamelium_port *port,
reset_state(data, port);
- output = prepare_output(data, port, true);
+ output = prepare_output(data, port, TEST_EDID_BASE);
connector = chamelium_port_get_connector(data->chamelium, port, false);
primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
igt_assert(primary);
@@ -682,7 +731,7 @@ test_display_frame_dump(data_t *data, struct chamelium_port *port)
reset_state(data, port);
- output = prepare_output(data, port, true);
+ output = prepare_output(data, port, TEST_EDID_BASE);
connector = chamelium_port_get_connector(data->chamelium, port, false);
primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
igt_assert(primary);
@@ -725,45 +774,106 @@ test_display_frame_dump(data_t *data, struct chamelium_port *port)
/* A streak of 3 gives confidence that the signal is good. */
#define MIN_STREAK 3
-static int sampling_rates[] = {
+#define FLATLINE_AMPLITUDE 0.1 /* normalized, ie. in [0, 1] */
+#define FLATLINE_AMPLITUDE_ACCURACY 0.001 /* ± 0.1 % of the full amplitude */
+#define FLATLINE_ALIGN_ACCURACY 0 /* number of samples */
+
+/* TODO: enable >48KHz rates, these are not reliable */
+static int test_sampling_rates[] = {
32000,
44100,
48000,
- 88200,
- 96000,
- 176400,
- 192000,
+ /* 88200, */
+ /* 96000, */
+ /* 176400, */
+ /* 192000, */
};
-static int sampling_rates_count = sizeof(sampling_rates) / sizeof(int);
+static int test_sampling_rates_count = sizeof(test_sampling_rates) / sizeof(int);
+/* Test frequencies (Hz): a sine signal will be generated for each.
+ *
+ * Depending on the sampling rate chosen, it might not be possible to properly
+ * detect the generated sine (see Nyquist–Shannon sampling theorem).
+ * Frequencies that can't be reliably detected will be automatically pruned in
+ * #audio_signal_add_frequency. For instance, the 80KHz frequency can only be
+ * tested with a 192KHz sampling rate.
+ */
static int test_frequencies[] = {
300,
600,
1200,
- 80000,
10000,
+ 80000,
};
static int test_frequencies_count = sizeof(test_frequencies) / sizeof(int);
+static const snd_pcm_format_t test_formats[] = {
+ SND_PCM_FORMAT_S16_LE,
+ SND_PCM_FORMAT_S24_LE,
+ SND_PCM_FORMAT_S32_LE,
+};
+
+static const size_t test_formats_count = sizeof(test_formats) / sizeof(test_formats[0]);
+
struct audio_state {
- struct audio_signal *signal;
+ struct alsa *alsa;
+ struct chamelium *chamelium;
+ struct chamelium_port *port;
+ struct chamelium_stream *stream;
+
+ /* The capture format is only available after capture has started. */
+ struct {
+ snd_pcm_format_t format;
+ int channels;
+ int rate;
+ } playback, capture;
+
+ char *name;
+ struct audio_signal *signal; /* for frequencies test only */
+ int channel_mapping[CHAMELIUM_MAX_AUDIO_CHANNELS];
+
+ size_t recv_pages;
+ int msec;
+
+ int dump_fd;
+ char *dump_path;
+
+ pthread_t thread;
atomic_bool run;
+ atomic_bool positive; /* for pulse test only */
};
-static int
-audio_output_callback(void *data, short *buffer, int frames)
+static void audio_state_init(struct audio_state *state, data_t *data,
+ struct alsa *alsa, struct chamelium_port *port,
+ snd_pcm_format_t format, int channels, int rate)
{
- struct audio_state *state = data;
+ memset(state, 0, sizeof(*state));
+ state->dump_fd = -1;
- audio_signal_fill(state->signal, buffer, frames);
+ state->alsa = alsa;
+ state->chamelium = data->chamelium;
+ state->port = port;
- return state->run ? 0 : -1;
+ state->playback.format = format;
+ state->playback.channels = channels;
+ state->playback.rate = rate;
+
+ alsa_configure_output(alsa, format, channels, rate);
+
+ state->stream = chamelium_stream_init();
+ igt_assert_f(state->stream,
+ "Failed to initialize Chamelium stream client\n");
}
-static void *
-run_audio_thread(void *data)
+static void audio_state_fini(struct audio_state *state)
+{
+ chamelium_stream_deinit(state->stream);
+ free(state->name);
+}
+
+static void *run_audio_thread(void *data)
{
struct alsa *alsa = data;
@@ -771,52 +881,176 @@ run_audio_thread(void *data)
return NULL;
}
-static bool
-do_test_display_audio(data_t *data, struct chamelium_port *port,
- struct alsa *alsa, int playback_channels,
- int playback_rate)
+static void audio_state_start(struct audio_state *state, const char *name)
{
- int ret, capture_rate, capture_channels, msec, freq, step;
- struct chamelium_audio_file *audio_file;
- struct chamelium_stream *stream;
+ int ret;
+ bool ok;
+ size_t i, j;
enum chamelium_stream_realtime_mode stream_mode;
- struct audio_signal *signal;
- int32_t *recv, *buf;
- double *channel;
- size_t i, j, streak, page_count;
- size_t recv_len, buf_len, buf_cap, buf_size, channel_len;
- bool ok, success;
char dump_suffix[64];
- char *dump_path = NULL;
- int dump_fd = -1;
- pthread_t thread;
- struct audio_state state = {};
- int channel_mapping[8], capture_chan;
- if (!alsa_test_output_configuration(alsa, playback_channels,
- playback_rate)) {
- igt_debug("Skipping test with sample rate %d Hz and %d channels "
- "because at least one of the selected output devices "
- "doesn't support this configuration\n",
- playback_rate, playback_channels);
- return false;
+ free(state->name);
+ state->name = strdup(name);
+ state->recv_pages = 0;
+ state->msec = 0;
+
+ igt_debug("Starting %s test with playback format %s, "
+ "sampling rate %d Hz and %d channels\n",
+ name, snd_pcm_format_name(state->playback.format),
+ state->playback.rate, state->playback.channels);
+
+ chamelium_start_capturing_audio(state->chamelium, state->port, false);
+
+ stream_mode = CHAMELIUM_STREAM_REALTIME_STOP_WHEN_OVERFLOW;
+ ok = chamelium_stream_dump_realtime_audio(state->stream, stream_mode);
+ igt_assert_f(ok, "Failed to start streaming audio capture\n");
+
+ /* Start playing audio */
+ state->run = true;
+ ret = pthread_create(&state->thread, NULL,
+ run_audio_thread, state->alsa);
+ igt_assert_f(ret == 0, "Failed to start audio playback thread\n");
+
+ /* The Chamelium device only supports this PCM format. */
+ state->capture.format = SND_PCM_FORMAT_S32_LE;
+
+ /* Only after we've started playing audio, we can retrieve the capture
+ * format used by the Chamelium device. */
+ chamelium_get_audio_format(state->chamelium, state->port,
+ &state->capture.rate,
+ &state->capture.channels);
+ if (state->capture.rate == 0) {
+ igt_debug("Audio receiver doesn't indicate the capture "
+ "sampling rate, assuming it's %d Hz\n",
+ state->playback.rate);
+ state->capture.rate = state->playback.rate;
+ }
+
+ chamelium_get_audio_channel_mapping(state->chamelium, state->port,
+ state->channel_mapping);
+ /* Make sure we can capture all channels we send. */
+ for (i = 0; i < state->playback.channels; i++) {
+ ok = false;
+ for (j = 0; j < state->capture.channels; j++) {
+ if (state->channel_mapping[j] == i) {
+ ok = true;
+ break;
+ }
+ }
+ igt_assert_f(ok, "Cannot capture all channels\n");
+ }
+
+ if (igt_frame_dump_is_enabled()) {
+ snprintf(dump_suffix, sizeof(dump_suffix),
+ "capture-%s-%s-%dch-%dHz",
+ name, snd_pcm_format_name(state->playback.format),
+ state->playback.channels, state->playback.rate);
+
+ state->dump_fd = audio_create_wav_file_s32_le(dump_suffix,
+ state->capture.rate,
+ state->capture.channels,
+ &state->dump_path);
+ igt_assert_f(state->dump_fd >= 0,
+ "Failed to create audio dump file\n");
}
+}
+
+static void audio_state_receive(struct audio_state *state,
+ int32_t **recv, size_t *recv_len)
+{
+ bool ok;
+ size_t page_count;
+ size_t recv_size;
+
+ ok = chamelium_stream_receive_realtime_audio(state->stream,
+ &page_count,
+ recv, recv_len);
+ igt_assert_f(ok, "Failed to receive audio from stream server\n");
+
+ state->msec = state->recv_pages * *recv_len
+ / (double) state->capture.channels
+ / (double) state->capture.rate * 1000;
+ state->recv_pages++;
+
+ if (state->dump_fd >= 0) {
+ recv_size = *recv_len * sizeof(int32_t);
+ igt_assert_f(write(state->dump_fd, *recv, recv_size) == recv_size,
+ "Failed to write to audio dump file\n");
+ }
+}
- igt_debug("Testing with playback sampling rate %d Hz and %d channels\n",
- playback_rate, playback_channels);
- alsa_configure_output(alsa, playback_channels, playback_rate);
+static void audio_state_stop(struct audio_state *state, bool success)
+{
+ bool ok;
+ int ret;
+ struct chamelium_audio_file *audio_file;
- chamelium_start_capturing_audio(data->chamelium, port, false);
+ igt_debug("Stopping audio playback\n");
+ state->run = false;
+ ret = pthread_join(state->thread, NULL);
+ igt_assert_f(ret == 0, "Failed to join audio playback thread\n");
- stream = chamelium_stream_init();
- igt_assert(stream);
+ ok = chamelium_stream_stop_realtime_audio(state->stream);
+ igt_assert_f(ok, "Failed to stop streaming audio capture\n");
- stream_mode = CHAMELIUM_STREAM_REALTIME_STOP_WHEN_OVERFLOW;
- ok = chamelium_stream_dump_realtime_audio(stream, stream_mode);
- igt_assert(ok);
+ audio_file = chamelium_stop_capturing_audio(state->chamelium,
+ state->port);
+ if (audio_file) {
+ igt_debug("Audio file saved on the Chamelium in %s\n",
+ audio_file->path);
+ chamelium_destroy_audio_file(audio_file);
+ }
- signal = audio_signal_init(playback_channels, playback_rate);
- igt_assert(signal);
+ if (state->dump_fd >= 0) {
+ close(state->dump_fd);
+ state->dump_fd = -1;
+
+ if (success) {
+ /* Test succeeded, no need to keep the captured data */
+ unlink(state->dump_path);
+ } else
+ igt_debug("Saved captured audio data to %s\n",
+ state->dump_path);
+ free(state->dump_path);
+ state->dump_path = NULL;
+ }
+
+ igt_debug("Audio %s test result for format %s, sampling rate %d Hz "
+ "and %d channels: %s\n",
+ state->name, snd_pcm_format_name(state->playback.format),
+ state->playback.rate, state->playback.channels,
+ success ? "ALL GREEN" : "FAILED");
+}
+
+static int
+audio_output_frequencies_callback(void *data, void *buffer, int samples)
+{
+ struct audio_state *state = data;
+ double *tmp;
+ size_t len;
+
+ len = samples * state->playback.channels;
+ tmp = malloc(len * sizeof(double));
+ audio_signal_fill(state->signal, tmp, samples);
+ audio_convert_to(buffer, tmp, len, state->playback.format);
+ free(tmp);
+
+ return state->run ? 0 : -1;
+}
+
+static bool test_audio_frequencies(struct audio_state *state)
+{
+ int freq, step;
+ int32_t *recv, *buf;
+ double *channel;
+ size_t i, j, streak;
+ size_t recv_len, buf_len, buf_cap, channel_len;
+ bool success;
+ int capture_chan;
+
+ state->signal = audio_signal_init(state->playback.channels,
+ state->playback.rate);
+ igt_assert_f(state->signal, "Failed to initialize audio signal\n");
/* We'll choose different frequencies per channel to make sure they are
* independent from each other. To do so, we'll add a different offset
@@ -829,67 +1063,39 @@ do_test_display_audio(data_t *data, struct chamelium_port *port,
* later on. We cannot retrieve the capture rate before starting
* playing audio, so we don't really have the choice.
*/
- step = 2 * playback_rate / CAPTURE_SAMPLES;
+ step = 2 * state->playback.rate / CAPTURE_SAMPLES;
for (i = 0; i < test_frequencies_count; i++) {
- for (j = 0; j < playback_channels; j++) {
+ for (j = 0; j < state->playback.channels; j++) {
freq = test_frequencies[i] + j * step;
- audio_signal_add_frequency(signal, freq, j);
+ audio_signal_add_frequency(state->signal, freq, j);
}
}
- audio_signal_synthesize(signal);
+ audio_signal_synthesize(state->signal);
- state.signal = signal;
- state.run = true;
- alsa_register_output_callback(alsa, audio_output_callback, &state,
+ alsa_register_output_callback(state->alsa,
+ audio_output_frequencies_callback, state,
PLAYBACK_SAMPLES);
- /* Start playing audio */
- ret = pthread_create(&thread, NULL, run_audio_thread, alsa);
- igt_assert(ret == 0);
-
- /* Only after we've started playing audio, we can retrieve the capture
- * format used by the Chamelium device. */
- chamelium_get_audio_format(data->chamelium, port,
- &capture_rate, &capture_channels);
- if (capture_rate == 0) {
- igt_debug("Audio receiver doesn't indicate the capture "
- "sampling rate, assuming it's %d Hz\n", playback_rate);
- capture_rate = playback_rate;
- } else
- igt_assert(capture_rate == playback_rate);
-
- chamelium_get_audio_channel_mapping(data->chamelium, port,
- channel_mapping);
- /* Make sure we can capture all channels we send. */
- for (i = 0; i < playback_channels; i++) {
- ok = false;
- for (j = 0; j < capture_channels; j++) {
- if (channel_mapping[j] == i) {
- ok = true;
- break;
- }
- }
- igt_assert(ok);
- }
+ audio_state_start(state, "frequencies");
- if (igt_frame_dump_is_enabled()) {
- snprintf(dump_suffix, sizeof(dump_suffix), "capture-%dch-%d",
- playback_channels, playback_rate);
-
- dump_fd = audio_create_wav_file_s32_le(dump_suffix,
- capture_rate,
- capture_channels,
- &dump_path);
- igt_assert(dump_fd >= 0);
- }
+ igt_assert_f(state->capture.rate == state->playback.rate,
+ "Capture rate (%dHz) doesn't match playback rate (%dHz)\n",
+ state->capture.rate, state->playback.rate);
/* Needs to be a multiple of 128, because that's the number of samples
* we get per channel each time we receive an audio page from the
- * Chamelium device. */
+ * Chamelium device.
+ *
+ * Additionally, this value needs to be high enough to guarantee we
+ * capture a full period of each sine we generate. If we capture 2048
+ * samples at a 192KHz sampling rate, we get a full period for a >94Hz
+ * sines. For lower sampling rates, the capture duration will be
+ * longer.
+ */
channel_len = CAPTURE_SAMPLES;
channel = malloc(sizeof(double) * channel_len);
- buf_cap = capture_channels * channel_len;
+ buf_cap = state->capture.channels * channel_len;
buf = malloc(sizeof(int32_t) * buf_cap);
buf_len = 0;
@@ -898,13 +1104,8 @@ do_test_display_audio(data_t *data, struct chamelium_port *port,
success = false;
streak = 0;
- msec = 0;
- i = 0;
- while (!success && msec < AUDIO_TIMEOUT) {
- ok = chamelium_stream_receive_realtime_audio(stream,
- &page_count,
- &recv, &recv_len);
- igt_assert(ok);
+ while (!success && state->msec < AUDIO_TIMEOUT) {
+ audio_state_receive(state, &recv, &recv_len);
memcpy(&buf[buf_len], recv, recv_len * sizeof(int32_t));
buf_len += recv_len;
@@ -913,26 +1114,21 @@ do_test_display_audio(data_t *data, struct chamelium_port *port,
continue;
igt_assert(buf_len == buf_cap);
- if (dump_fd >= 0) {
- buf_size = buf_len * sizeof(int32_t);
- igt_assert(write(dump_fd, buf, buf_size) == buf_size);
- }
-
- msec = i * channel_len / (double) capture_rate * 1000;
- igt_debug("Detecting audio signal, t=%d msec\n", msec);
+ igt_debug("Detecting audio signal, t=%d msec\n", state->msec);
- for (j = 0; j < playback_channels; j++) {
- capture_chan = channel_mapping[j];
+ for (j = 0; j < state->playback.channels; j++) {
+ capture_chan = state->channel_mapping[j];
igt_assert(capture_chan >= 0);
igt_debug("Processing channel %zu (captured as "
"channel %d)\n", j, capture_chan);
audio_extract_channel_s32_le(channel, channel_len,
buf, buf_len,
- capture_channels,
+ state->capture.channels,
capture_chan);
- if (audio_signal_detect(signal, capture_rate, j,
+ if (audio_signal_detect(state->signal,
+ state->capture.rate, j,
channel, channel_len))
streak++;
else
@@ -940,55 +1136,220 @@ do_test_display_audio(data_t *data, struct chamelium_port *port,
}
buf_len = 0;
- i++;
- success = streak == MIN_STREAK * playback_channels;
+ success = streak == MIN_STREAK * state->playback.channels;
}
- igt_debug("Stopping audio playback\n");
- state.run = false;
- ret = pthread_join(thread, NULL);
- igt_assert(ret == 0);
-
- alsa_close_output(alsa);
-
- if (dump_fd >= 0) {
- close(dump_fd);
- if (success) {
- /* Test succeeded, no need to keep the captured data */
- unlink(dump_path);
- } else
- igt_debug("Saved captured audio data to %s\n", dump_path);
- free(dump_path);
- }
+ audio_state_stop(state, success);
free(recv);
free(buf);
free(channel);
+ audio_signal_fini(state->signal);
- ok = chamelium_stream_stop_realtime_audio(stream);
- igt_assert(ok);
+ return success;
+}
- audio_file = chamelium_stop_capturing_audio(data->chamelium,
- port);
- if (audio_file) {
- igt_debug("Audio file saved on the Chamelium in %s\n",
- audio_file->path);
- chamelium_destroy_audio_file(audio_file);
+static int audio_output_flatline_callback(void *data, void *buffer,
+ int samples)
+{
+ struct audio_state *state = data;
+ double *tmp;
+ size_t len, i;
+
+ len = samples * state->playback.channels;
+ tmp = malloc(len * sizeof(double));
+ for (i = 0; i < len; i++)
+ tmp[i] = (state->positive ? 1 : -1) * FLATLINE_AMPLITUDE;
+ audio_convert_to(buffer, tmp, len, state->playback.format);
+ free(tmp);
+
+ return state->run ? 0 : -1;
+}
+
+static bool detect_flatline_amplitude(double *buf, size_t buf_len, bool pos)
+{
+ double expected, min, max;
+ size_t i;
+ bool ok;
+
+ min = max = NAN;
+ for (i = 0; i < buf_len; i++) {
+ if (isnan(min) || buf[i] < min)
+ min = buf[i];
+ if (isnan(max) || buf[i] > max)
+ max = buf[i];
}
- audio_signal_fini(signal);
- chamelium_stream_deinit(stream);
+ expected = (pos ? 1 : -1) * FLATLINE_AMPLITUDE;
+ ok = (min >= expected - FLATLINE_AMPLITUDE_ACCURACY &&
+ max <= expected + FLATLINE_AMPLITUDE_ACCURACY);
+ if (ok)
+ igt_debug("Flatline wave amplitude detected\n");
+ else
+ igt_debug("Flatline amplitude not detected (min=%f, max=%f)\n",
+ min, max);
+ return ok;
+}
- igt_assert(success);
+static ssize_t detect_falling_edge(double *buf, size_t buf_len)
+{
+ size_t i;
+
+ for (i = 0; i < buf_len; i++) {
+ if (buf[i] < 0)
+ return i;
+ }
+
+ return -1;
+}
+
+/** test_audio_flatline:
+ *
+ * Send a constant value (one positive, then a negative one) and check that:
+ *
+ * - The amplitude of the flatline is correct
+ * - All channels switch from a positive signal to a negative one at the same
+ * time (ie. all channels are aligned)
+ */
+static bool test_audio_flatline(struct audio_state *state)
+{
+ bool success, amp_success, align_success;
+ int32_t *recv;
+ size_t recv_len, i, channel_len;
+ ssize_t j;
+ int streak, capture_chan;
+ double *channel;
+ int falling_edges[CHAMELIUM_MAX_AUDIO_CHANNELS];
+
+ alsa_register_output_callback(state->alsa,
+ audio_output_flatline_callback, state,
+ PLAYBACK_SAMPLES);
+
+ /* Start by sending a positive signal */
+ state->positive = true;
+
+ audio_state_start(state, "flatline");
+
+ for (i = 0; i < state->playback.channels; i++)
+ falling_edges[i] = -1;
+
+ recv = NULL;
+ recv_len = 0;
+ amp_success = false;
+ streak = 0;
+ while (!amp_success && state->msec < AUDIO_TIMEOUT) {
+ audio_state_receive(state, &recv, &recv_len);
+
+ igt_debug("Detecting audio signal, t=%d msec\n", state->msec);
+
+ for (i = 0; i < state->playback.channels; i++) {
+ capture_chan = state->channel_mapping[i];
+ igt_assert(capture_chan >= 0);
+ igt_debug("Processing channel %zu (captured as "
+ "channel %d)\n", i, capture_chan);
+
+ channel_len = audio_extract_channel_s32_le(NULL, 0,
+ recv, recv_len,
+ state->capture.channels,
+ capture_chan);
+ channel = malloc(channel_len * sizeof(double));
+ audio_extract_channel_s32_le(channel, channel_len,
+ recv, recv_len,
+ state->capture.channels,
+ capture_chan);
+
+ /* Check whether the amplitude is fine */
+ if (detect_flatline_amplitude(channel, channel_len,
+ state->positive))
+ streak++;
+ else
+ streak = 0;
+
+ /* If we're now sending a negative signal, detect the
+ * falling edge */
+ j = detect_falling_edge(channel, channel_len);
+ if (!state->positive && j >= 0) {
+ falling_edges[i] = recv_len * state->recv_pages
+ + j;
+ }
+
+ free(channel);
+ }
+
+ amp_success = streak == MIN_STREAK * state->playback.channels;
+
+ if (amp_success && state->positive) {
+ /* Switch to a negative signal after we've detected the
+ * positive one. */
+ state->positive = false;
+ amp_success = false;
+ streak = 0;
+ igt_debug("Switching to negative square wave\n");
+ }
+ }
+
+ /* Check alignment between all channels by comparing the index of the
+ * falling edge. */
+ align_success = true;
+ for (i = 0; i < state->playback.channels; i++) {
+ if (falling_edges[i] < 0) {
+ igt_debug("Falling edge not detected for channel %zu\n",
+ i);
+ align_success = false;
+ continue;
+ }
+
+ if (abs(falling_edges[0] - falling_edges[i]) >
+ FLATLINE_ALIGN_ACCURACY) {
+ igt_debug("Channel alignment mismatch: "
+ "channel 0 has a falling edge at index %d "
+ "while channel %zu has index %d\n",
+ falling_edges[0], i, falling_edges[i]);
+ align_success = false;
+ }
+ }
+
+ success = amp_success && align_success;
+ audio_state_stop(state, success);
+
+ free(recv);
+
+ return success;
+}
+
+static bool check_audio_configuration(struct alsa *alsa, snd_pcm_format_t format,
+ int channels, int sampling_rate)
+{
+ if (!alsa_test_output_configuration(alsa, format, channels,
+ sampling_rate)) {
+ igt_debug("Skipping test with format %s, sampling rate %d Hz "
+ "and %d channels because at least one of the "
+ "selected output devices doesn't support this "
+ "configuration\n",
+ snd_pcm_format_name(format),
+ sampling_rate, channels);
+ return false;
+ }
+ /* TODO: the Chamelium device sends a malformed signal for some audio
+ * configurations. See crbug.com/950917 */
+ if ((format != SND_PCM_FORMAT_S16_LE && sampling_rate >= 44100) ||
+ channels > 2) {
+ igt_debug("Skipping test with format %s, sampling rate %d Hz "
+ "and %d channels because the Chamelium device "
+ "doesn't support this configuration\n",
+ snd_pcm_format_name(format),
+ sampling_rate, channels);
+ return false;
+ }
return true;
}
static void
test_display_audio(data_t *data, struct chamelium_port *port,
- const char *audio_device)
+ const char *audio_device, enum test_edid edid)
{
- bool run = false;
+ bool run, success;
struct alsa *alsa;
int ret;
igt_output_t *output;
@@ -996,19 +1357,23 @@ test_display_audio(data_t *data, struct chamelium_port *port,
struct igt_fb fb;
drmModeModeInfo *mode;
drmModeConnector *connector;
- int fb_id, i;
+ int fb_id, i, j;
+ int channels, sampling_rate;
+ snd_pcm_format_t format;
+ struct audio_state state;
igt_require(alsa_has_exclusive_access());
+ /* Old Chamelium devices need an update for DisplayPort audio and
+ * chamelium_get_audio_format support. */
+ igt_require(chamelium_has_audio_support(data->chamelium, port));
+
alsa = alsa_init();
igt_assert(alsa);
reset_state(data, port);
- /* Use the default Chamelium EDID for this test, as the base IGT EDID
- * doesn't advertise audio support (see drm_detect_monitor_audio in
- * the kernel tree). */
- output = prepare_output(data, port, false);
+ output = prepare_output(data, port, edid);
connector = chamelium_port_get_connector(data->chamelium, port, false);
primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
igt_assert(primary);
@@ -1027,20 +1392,40 @@ test_display_audio(data_t *data, struct chamelium_port *port,
enable_output(data, port, output, mode, &fb);
- for (i = 0; i < sampling_rates_count; i++) {
- ret = alsa_open_output(alsa, audio_device);
- igt_assert(ret >= 0);
-
- /* TODO: playback on all 8 available channels */
- run |= do_test_display_audio(data, port, alsa,
- PLAYBACK_CHANNELS,
- sampling_rates[i]);
-
- alsa_close_output(alsa);
+ run = false;
+ success = true;
+ for (i = 0; i < test_sampling_rates_count; i++) {
+ for (j = 0; j < test_formats_count; j++) {
+ ret = alsa_open_output(alsa, audio_device);
+ igt_assert_f(ret >= 0, "Failed to open ALSA output\n");
+
+ /* TODO: playback on all 8 available channels (this
+ * isn't supported by Chamelium devices yet, see
+ * https://crbug.com/950917) */
+ format = test_formats[j];
+ channels = PLAYBACK_CHANNELS;
+ sampling_rate = test_sampling_rates[i];
+
+ if (!check_audio_configuration(alsa, format, channels,
+ sampling_rate))
+ continue;
+
+ run = true;
+
+ audio_state_init(&state, data, alsa, port,
+ format, channels, sampling_rate);
+ success &= test_audio_frequencies(&state);
+ success &= test_audio_flatline(&state);
+ audio_state_fini(&state);
+
+ alsa_close_output(alsa);
+ }
}
- /* Make sure we tested at least one frequency. */
+ /* Make sure we tested at least one frequency and format. */
igt_assert(run);
+ /* Make sure all runs were successful. */
+ igt_assert(success);
igt_remove_fb(data->drm_fd, &fb);
@@ -1049,18 +1434,97 @@ test_display_audio(data_t *data, struct chamelium_port *port,
free(alsa);
}
+static void
+test_display_audio_edid(data_t *data, struct chamelium_port *port,
+ enum test_edid edid)
+{
+ igt_output_t *output;
+ igt_plane_t *primary;
+ struct igt_fb fb;
+ drmModeModeInfo *mode;
+ drmModeConnector *connector;
+ int fb_id;
+ struct eld_entry eld;
+ struct eld_sad *sad;
-static void select_tiled_modifier(igt_plane_t *plane, uint32_t width,
+ reset_state(data, port);
+
+ output = prepare_output(data, port, edid);
+ connector = chamelium_port_get_connector(data->chamelium, port, false);
+ primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
+ igt_assert(primary);
+
+ /* Enable the output because audio cannot be played on inactive
+ * connectors. */
+ igt_assert(connector->count_modes > 0);
+ mode = &connector->modes[0];
+
+ fb_id = igt_create_color_pattern_fb(data->drm_fd,
+ mode->hdisplay, mode->vdisplay,
+ DRM_FORMAT_XRGB8888,
+ LOCAL_DRM_FORMAT_MOD_NONE,
+ 0, 0, 0, &fb);
+ igt_assert(fb_id > 0);
+
+ enable_output(data, port, output, mode, &fb);
+
+ igt_assert(eld_get_igt(&eld));
+ igt_assert(eld.sads_len == 1);
+
+ sad = &eld.sads[0];
+ igt_assert(sad->coding_type == CEA_SAD_FORMAT_PCM);
+ igt_assert(sad->channels == 2);
+ igt_assert(sad->rates == (CEA_SAD_SAMPLING_RATE_32KHZ |
+ CEA_SAD_SAMPLING_RATE_44KHZ | CEA_SAD_SAMPLING_RATE_48KHZ));
+ igt_assert(sad->bits == (CEA_SAD_SAMPLE_SIZE_16 |
+ CEA_SAD_SAMPLE_SIZE_20 | CEA_SAD_SAMPLE_SIZE_24));
+
+ igt_remove_fb(data->drm_fd, &fb);
+
+ drmModeFreeConnector(connector);
+}
+
+static void randomize_plane_stride(data_t *data,
+ uint32_t width, uint32_t height,
+ uint32_t format, uint64_t modifier,
+ size_t *stride)
+{
+ size_t stride_min;
+ uint32_t max_tile_w = 4, tile_w, tile_h;
+ int i;
+ struct igt_fb dummy;
+
+ stride_min = width * igt_format_plane_bpp(format, 0) / 8;
+
+ /* Randomize the stride to less than twice the minimum. */
+ *stride = (rand() % stride_min) + stride_min;
+
+ /*
+ * Create a dummy FB to determine bpp for each plane, and calculate
+ * the maximum tile width from that.
+ */
+ igt_create_fb(data->drm_fd, 64, 64, format, modifier, &dummy);
+ for (i = 0; i < dummy.num_planes; i++) {
+ igt_get_fb_tile_size(data->drm_fd, modifier, dummy.plane_bpp[i], &tile_w, &tile_h);
+
+ if (tile_w > max_tile_w)
+ max_tile_w = tile_w;
+ }
+ igt_remove_fb(data->drm_fd, &dummy);
+
+ /*
+ * Pixman requires the stride to be aligned to 32-bits, which is
+ * reflected in the initial value of max_tile_w and the hw
+ * may require a multiple of tile width, choose biggest of the 2.
+ */
+ *stride = ALIGN(*stride, max_tile_w);
+}
+
+static void update_tiled_modifier(igt_plane_t *plane, uint32_t width,
uint32_t height, uint32_t format,
uint64_t *modifier)
{
- if (igt_plane_has_format_mod(plane, format,
- DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)) {
- igt_debug("Selecting VC4 T-tiling\n");
-
- *modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
- } else if (igt_plane_has_format_mod(plane, format,
- DRM_FORMAT_MOD_BROADCOM_SAND256)) {
+ if (*modifier == DRM_FORMAT_MOD_BROADCOM_SAND256) {
/* Randomize the column height to less than twice the minimum. */
size_t column_height = (rand() % height) + height;
@@ -1068,90 +1532,84 @@ static void select_tiled_modifier(igt_plane_t *plane, uint32_t width,
column_height);
*modifier = DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(column_height);
- } else {
- *modifier = DRM_FORMAT_MOD_LINEAR;
}
}
-static void randomize_plane_format_stride(igt_plane_t *plane,
- uint32_t width, uint32_t height,
- uint32_t *format, uint64_t *modifier,
- size_t *stride, bool allow_yuv)
+static void randomize_plane_setup(data_t *data, igt_plane_t *plane,
+ drmModeModeInfo *mode,
+ uint32_t *width, uint32_t *height,
+ uint32_t *format, uint64_t *modifier,
+ bool allow_yuv)
{
- size_t stride_min;
- uint32_t *formats_array;
- unsigned int formats_count;
+ int min_dim;
+ uint32_t idx[plane->format_mod_count];
unsigned int count = 0;
unsigned int i;
- bool tiled;
- int index;
-
- igt_format_array_fill(&formats_array, &formats_count, allow_yuv);
/* First pass to count the supported formats. */
- for (i = 0; i < formats_count; i++)
- if (igt_plane_has_format_mod(plane, formats_array[i],
- DRM_FORMAT_MOD_LINEAR))
- count++;
+ for (i = 0; i < plane->format_mod_count; i++)
+ if (igt_fb_supported_format(plane->formats[i]) &&
+ (allow_yuv || !igt_format_is_yuv(plane->formats[i])))
+ idx[count++] = i;
igt_assert(count > 0);
- index = rand() % count;
+ i = idx[rand() % count];
+ *format = plane->formats[i];
+ *modifier = plane->modifiers[i];
- /* Second pass to get the index-th supported format. */
- for (i = 0; i < formats_count; i++) {
- if (!igt_plane_has_format_mod(plane, formats_array[i],
- DRM_FORMAT_MOD_LINEAR))
- continue;
+ update_tiled_modifier(plane, *width, *height, *format, modifier);
- if (!index--) {
- *format = formats_array[i];
- break;
- }
- }
-
- free(formats_array);
-
- igt_assert(index < 0);
-
- stride_min = width * igt_format_plane_bpp(*format, 0) / 8;
+ /*
+ * Randomize width and height in the mode dimensions range.
+ *
+ * Restrict to a min of 2 * min_dim, this way src_w/h are always at
+ * least min_dim, because src_w = width - (rand % w / 2).
+ *
+ * Use a minimum dimension of 16 for YUV, because planar YUV
+ * subsamples the UV plane.
+ */
+ min_dim = igt_format_is_yuv(*format) ? 16 : 8;
- /* Randomize the stride to less than twice the minimum. */
- *stride = (rand() % stride_min) + stride_min;
+ *width = max((rand() % mode->hdisplay) + 1, 2 * min_dim);
+ *height = max((rand() % mode->vdisplay) + 1, 2 * min_dim);
+}
- /* Pixman requires the stride to be aligned to 32-byte words. */
- *stride = ALIGN(*stride, sizeof(uint32_t));
+static void configure_plane(igt_plane_t *plane, uint32_t src_w, uint32_t src_h,
+ uint32_t src_x, uint32_t src_y, uint32_t crtc_w,
+ uint32_t crtc_h, int32_t crtc_x, int32_t crtc_y,
+ struct igt_fb *fb)
+{
+ igt_plane_set_fb(plane, fb);
- /* Randomize the use of a tiled mode with a 1/4 probability. */
- tiled = ((rand() % 4) == 0);
+ igt_plane_set_position(plane, crtc_x, crtc_y);
+ igt_plane_set_size(plane, crtc_w, crtc_h);
- if (tiled)
- select_tiled_modifier(plane, width, height, *format, modifier);
- else
- *modifier = DRM_FORMAT_MOD_LINEAR;
+ igt_fb_set_position(fb, plane, src_x, src_y);
+ igt_fb_set_size(fb, plane, src_w, src_h);
}
-static void randomize_plane_dimensions(drmModeModeInfo *mode,
- uint32_t *width, uint32_t *height,
- uint32_t *src_w, uint32_t *src_h,
- uint32_t *src_x, uint32_t *src_y,
- uint32_t *crtc_w, uint32_t *crtc_h,
- int32_t *crtc_x, int32_t *crtc_y,
- bool allow_scaling)
+static void randomize_plane_coordinates(data_t *data, igt_plane_t *plane,
+ drmModeModeInfo *mode,
+ struct igt_fb *fb,
+ uint32_t *src_w, uint32_t *src_h,
+ uint32_t *src_x, uint32_t *src_y,
+ uint32_t *crtc_w, uint32_t *crtc_h,
+ int32_t *crtc_x, int32_t *crtc_y,
+ bool allow_scaling)
{
+ bool is_yuv = igt_format_is_yuv(fb->drm_format);
+ uint32_t width = fb->width, height = fb->height;
double ratio;
-
- /* Randomize width and height in the mode dimensions range. */
- *width = (rand() % mode->hdisplay) + 1;
- *height = (rand() % mode->vdisplay) + 1;
+ int ret;
/* Randomize source offset in the first half of the original size. */
- *src_x = rand() % (*width / 2);
- *src_y = rand() % (*height / 2);
+ *src_x = rand() % (width / 2);
+ *src_y = rand() % (height / 2);
/* The source size only includes the active source area. */
- *src_w = *width - *src_x;
- *src_h = *height - *src_y;
+ *src_w = width - *src_x;
+ *src_h = height - *src_y;
if (allow_scaling) {
*crtc_w = (rand() % mode->hdisplay) + 1;
@@ -1161,17 +1619,22 @@ static void randomize_plane_dimensions(drmModeModeInfo *mode,
* Don't bother with scaling if dimensions are quite close in
* order to get non-scaling cases more frequently. Also limit
* scaling to 3x to avoid agressive filtering that makes
- * comparison less reliable.
+ * comparison less reliable, and don't go above 2x downsampling
+ * to avoid possible hw limitations.
*/
ratio = ((double) *crtc_w / *src_w);
- if (ratio > 0.8 && ratio < 1.2)
+ if (ratio < 0.5)
+ *src_w = *crtc_w * 2;
+ else if (ratio > 0.8 && ratio < 1.2)
*crtc_w = *src_w;
else if (ratio > 3.0)
*crtc_w = *src_w * 3;
ratio = ((double) *crtc_h / *src_h);
- if (ratio > 0.8 && ratio < 1.2)
+ if (ratio < 0.5)
+ *src_h = *crtc_h * 2;
+ else if (ratio > 0.8 && ratio < 1.2)
*crtc_h = *src_h;
else if (ratio > 3.0)
*crtc_h = *src_h * 3;
@@ -1186,8 +1649,15 @@ static void randomize_plane_dimensions(drmModeModeInfo *mode,
* scaled clipping may result in decimal dimensions, that most
* drivers don't support.
*/
- *crtc_x = rand() % (mode->hdisplay - *crtc_w);
- *crtc_y = rand() % (mode->vdisplay - *crtc_h);
+ if (*crtc_w < mode->hdisplay)
+ *crtc_x = rand() % (mode->hdisplay - *crtc_w);
+ else
+ *crtc_x = 0;
+
+ if (*crtc_h < mode->vdisplay)
+ *crtc_y = rand() % (mode->vdisplay - *crtc_h);
+ else
+ *crtc_y = 0;
} else {
/*
* Randomize the on-crtc position and allow the plane to go
@@ -1196,6 +1666,62 @@ static void randomize_plane_dimensions(drmModeModeInfo *mode,
*crtc_x = (rand() % mode->hdisplay) - *crtc_w / 2;
*crtc_y = (rand() % mode->vdisplay) - *crtc_h / 2;
}
+
+ configure_plane(plane, *src_w, *src_h, *src_x, *src_y,
+ *crtc_w, *crtc_h, *crtc_x, *crtc_y, fb);
+ ret = igt_display_try_commit_atomic(&data->display,
+ DRM_MODE_ATOMIC_TEST_ONLY |
+ DRM_MODE_ATOMIC_ALLOW_MODESET,
+ NULL);
+ if (!ret)
+ return;
+
+ /* Coordinates are logged in the dumped debug log, so only report w/h on failure here. */
+ igt_assert_f(ret != -ENOSPC,"Failure in testcase, invalid coordinates on a %ux%u fb\n", width, height);
+
+ /* Make YUV coordinates a multiple of 2 and retry the math. */
+ if (is_yuv) {
+ *src_x &= ~1;
+ *src_y &= ~1;
+ *src_w &= ~1;
+ *src_h &= ~1;
+ /* To handle 1:1 scaling, clear crtc_w/h too. */
+ *crtc_w &= ~1;
+ *crtc_h &= ~1;
+
+ if (*crtc_x < 0 && (*crtc_x & 1))
+ (*crtc_x)++;
+ else
+ *crtc_x &= ~1;
+
+ /* If negative, round up to 0 instead of down */
+ if (*crtc_y < 0 && (*crtc_y & 1))
+ (*crtc_y)++;
+ else
+ *crtc_y &= ~1;
+
+ configure_plane(plane, *src_w, *src_h, *src_x, *src_y, *crtc_w,
+ *crtc_h, *crtc_x, *crtc_y, fb);
+ ret = igt_display_try_commit_atomic(&data->display,
+ DRM_MODE_ATOMIC_TEST_ONLY |
+ DRM_MODE_ATOMIC_ALLOW_MODESET,
+ NULL);
+ if (!ret)
+ return;
+ }
+
+ igt_assert(!ret || allow_scaling);
+ igt_info("Scaling ratio %g / %g failed, trying without scaling.\n",
+ ((double) *crtc_w / *src_w), ((double) *crtc_h / *src_h));
+
+ *crtc_w = *src_w;
+ *crtc_h = *src_h;
+
+ configure_plane(plane, *src_w, *src_h, *src_x, *src_y, *crtc_w,
+ *crtc_h, *crtc_x, *crtc_y, fb);
+ igt_display_commit_atomic(&data->display,
+ DRM_MODE_ATOMIC_TEST_ONLY |
+ DRM_MODE_ATOMIC_ALLOW_MODESET, NULL);
}
static void blit_plane_cairo(data_t *data, cairo_surface_t *result,
@@ -1254,20 +1780,6 @@ static void blit_plane_cairo(data_t *data, cairo_surface_t *result,
cairo_destroy(cr);
}
-static void configure_plane(igt_plane_t *plane, uint32_t src_w, uint32_t src_h,
- uint32_t src_x, uint32_t src_y, uint32_t crtc_w,
- uint32_t crtc_h, int32_t crtc_x, int32_t crtc_y,
- struct igt_fb *fb)
-{
- igt_plane_set_fb(plane, fb);
-
- igt_plane_set_position(plane, crtc_x, crtc_y);
- igt_plane_set_size(plane, crtc_w, crtc_h);
-
- igt_fb_set_position(fb, plane, src_x, src_y);
- igt_fb_set_size(fb, plane, src_w, src_h);
-}
-
static void prepare_randomized_plane(data_t *data,
drmModeModeInfo *mode,
igt_plane_t *plane,
@@ -1288,51 +1800,49 @@ static void prepare_randomized_plane(data_t *data,
bool tiled;
int fb_id;
- randomize_plane_dimensions(mode, &overlay_fb_w, &overlay_fb_h,
- &overlay_src_w, &overlay_src_h,
- &overlay_src_x, &overlay_src_y,
- &overlay_crtc_w, &overlay_crtc_h,
- &overlay_crtc_x, &overlay_crtc_y,
- allow_scaling);
+ randomize_plane_setup(data, plane, mode, &overlay_fb_w, &overlay_fb_h,
+ &format, &modifier, allow_yuv);
- igt_debug("Plane %d: framebuffer size %dx%d\n", index,
- overlay_fb_w, overlay_fb_h);
- igt_debug("Plane %d: on-crtc size %dx%d\n", index,
- overlay_crtc_w, overlay_crtc_h);
- igt_debug("Plane %d: on-crtc position %dx%d\n", index,
- overlay_crtc_x, overlay_crtc_y);
- igt_debug("Plane %d: in-framebuffer size %dx%d\n", index,
- overlay_src_w, overlay_src_h);
- igt_debug("Plane %d: in-framebuffer position %dx%d\n", index,
- overlay_src_x, overlay_src_y);
+ tiled = (modifier != LOCAL_DRM_FORMAT_MOD_NONE);
+ igt_debug("Plane %d: framebuffer size %dx%d %s format (%s)\n",
+ index, overlay_fb_w, overlay_fb_h,
+ igt_format_str(format), tiled ? "tiled" : "linear");
/* Get a pattern framebuffer for the overlay plane. */
fb_id = chamelium_get_pattern_fb(data, overlay_fb_w, overlay_fb_h,
DRM_FORMAT_XRGB8888, 32, &pattern_fb);
igt_assert(fb_id > 0);
- randomize_plane_format_stride(plane, overlay_fb_w, overlay_fb_h,
- &format, &modifier, &stride, allow_yuv);
+ randomize_plane_stride(data, overlay_fb_w, overlay_fb_h,
+ format, modifier, &stride);
- tiled = (modifier != LOCAL_DRM_FORMAT_MOD_NONE);
-
- igt_debug("Plane %d: %s format (%s) with stride %ld\n", index,
- igt_format_str(format), tiled ? "tiled" : "linear", stride);
+ igt_debug("Plane %d: stride %ld\n", index, stride);
fb_id = igt_fb_convert_with_stride(overlay_fb, &pattern_fb, format,
modifier, stride);
igt_assert(fb_id > 0);
+ randomize_plane_coordinates(data, plane, mode, overlay_fb,
+ &overlay_src_w, &overlay_src_h,
+ &overlay_src_x, &overlay_src_y,
+ &overlay_crtc_w, &overlay_crtc_h,
+ &overlay_crtc_x, &overlay_crtc_y,
+ allow_scaling);
+
+ igt_debug("Plane %d: in-framebuffer size %dx%d\n", index,
+ overlay_src_w, overlay_src_h);
+ igt_debug("Plane %d: in-framebuffer position %dx%d\n", index,
+ overlay_src_x, overlay_src_y);
+ igt_debug("Plane %d: on-crtc size %dx%d\n", index,
+ overlay_crtc_w, overlay_crtc_h);
+ igt_debug("Plane %d: on-crtc position %dx%d\n", index,
+ overlay_crtc_x, overlay_crtc_y);
+
blit_plane_cairo(data, result_surface, overlay_src_w, overlay_src_h,
overlay_src_x, overlay_src_y,
overlay_crtc_w, overlay_crtc_h,
overlay_crtc_x, overlay_crtc_y, &pattern_fb);
- configure_plane(plane, overlay_src_w, overlay_src_h,
- overlay_src_x, overlay_src_y,
- overlay_crtc_w, overlay_crtc_h,
- overlay_crtc_x, overlay_crtc_y, overlay_fb);
-
/* Remove the original pattern framebuffer. */
igt_remove_fb(data->drm_fd, &pattern_fb);
}
@@ -1377,7 +1887,7 @@ static void test_display_planes_random(data_t *data,
reset_state(data, port);
/* Find the connector and pipe. */
- output = prepare_output(data, port, true);
+ output = prepare_output(data, port, TEST_EDID_BASE);
mode = igt_output_get_mode(output);
@@ -1408,7 +1918,7 @@ static void test_display_planes_random(data_t *data,
igt_output_count_plane_type(output, DRM_PLANE_TYPE_OVERLAY);
/* Limit the number of planes to a reasonable scene. */
- overlay_planes_max = max(overlay_planes_max, 4);
+ overlay_planes_max = min(overlay_planes_max, 4);
overlay_planes_count = (rand() % overlay_planes_max) + 1;
igt_debug("Using %d overlay planes\n", overlay_planes_count);
@@ -1461,17 +1971,8 @@ static void test_display_planes_random(data_t *data,
chamelium_destroy_frame_dump(dump);
}
- for (i = 0; i < overlay_planes_count; i++) {
- struct igt_fb *overlay_fb = &overlay_fbs[i];
- igt_plane_t *plane;
-
- plane = igt_output_get_plane_type_index(output,
- DRM_PLANE_TYPE_OVERLAY,
- i);
- igt_assert(plane);
-
- igt_remove_fb(data->drm_fd, overlay_fb);
- }
+ for (i = 0; i < overlay_planes_count; i++)
+ igt_remove_fb(data->drm_fd, &overlay_fbs[i]);
free(overlay_fbs);
@@ -1541,6 +2042,21 @@ test_hpd_storm_disable(data_t *data, struct chamelium_port *port, int width)
igt_hpd_storm_reset(data->drm_fd);
}
+static const unsigned char *get_edid(enum test_edid edid)
+{
+ switch (edid) {
+ case TEST_EDID_BASE:
+ return igt_kms_get_base_edid();
+ case TEST_EDID_ALT:
+ return igt_kms_get_alt_edid();
+ case TEST_EDID_HDMI_AUDIO:
+ return igt_kms_get_hdmi_audio_edid();
+ case TEST_EDID_DP_AUDIO:
+ return igt_kms_get_dp_audio_edid();
+ }
+ assert(0); /* unreachable */
+}
+
#define for_each_port(p, port) \
for (p = 0, port = data.ports[p]; \
p < data.port_count; \
@@ -1557,7 +2073,8 @@ static data_t data;
igt_main
{
struct chamelium_port *port;
- int edid_id, alt_edid_id, p;
+ int p;
+ size_t i;
igt_fixture {
igt_skip_on_simulation();
@@ -1569,12 +2086,10 @@ igt_main
data.ports = chamelium_get_ports(data.chamelium,
&data.port_count);
- edid_id = chamelium_new_edid(data.chamelium,
- igt_kms_get_base_edid());
- alt_edid_id = chamelium_new_edid(data.chamelium,
- igt_kms_get_alt_edid());
- data.edid_id = edid_id;
- data.alt_edid_id = alt_edid_id;
+ for (i = 0; i < TEST_EDID_COUNT; ++i) {
+ data.edids[i] = chamelium_new_edid(data.chamelium,
+ get_edid(i));
+ }
/* So fbcon doesn't try to reprobe things itself */
kmstest_set_vt_graphics_mode();
@@ -1598,10 +2113,8 @@ igt_main
HPD_TOGGLE_COUNT_FAST);
connector_subtest("dp-edid-read", DisplayPort) {
- test_edid_read(&data, port, edid_id,
- igt_kms_get_base_edid());
- test_edid_read(&data, port, alt_edid_id,
- igt_kms_get_alt_edid());
+ test_edid_read(&data, port, TEST_EDID_BASE);
+ test_edid_read(&data, port, TEST_EDID_ALT);
}
connector_subtest("dp-hpd-after-suspend", DisplayPort)
@@ -1626,13 +2139,15 @@ igt_main
test_suspend_resume_edid_change(&data, port,
SUSPEND_STATE_MEM,
SUSPEND_TEST_NONE,
- edid_id, alt_edid_id);
+ TEST_EDID_BASE,
+ TEST_EDID_ALT);
connector_subtest("dp-edid-change-during-hibernate", DisplayPort)
test_suspend_resume_edid_change(&data, port,
SUSPEND_STATE_DISK,
SUSPEND_TEST_DEVICES,
- edid_id, alt_edid_id);
+ TEST_EDID_BASE,
+ TEST_EDID_ALT);
connector_subtest("dp-crc-single", DisplayPort)
test_display_all_modes(&data, port, DRM_FORMAT_XRGB8888,
@@ -1649,8 +2164,15 @@ igt_main
connector_subtest("dp-frame-dump", DisplayPort)
test_display_frame_dump(&data, port);
+ /* The EDID we generate advertises HDMI audio, not DP audio.
+ * Use the Chamelium's default EDID for DP audio. */
connector_subtest("dp-audio", DisplayPort)
- test_display_audio(&data, port, "HDMI");
+ test_display_audio(&data, port, "HDMI",
+ TEST_EDID_DP_AUDIO);
+
+ connector_subtest("dp-audio-edid", DisplayPort)
+ test_display_audio_edid(&data, port,
+ TEST_EDID_DP_AUDIO);
}
igt_subtest_group {
@@ -1668,10 +2190,8 @@ igt_main
HPD_TOGGLE_COUNT_FAST);
connector_subtest("hdmi-edid-read", HDMIA) {
- test_edid_read(&data, port, edid_id,
- igt_kms_get_base_edid());
- test_edid_read(&data, port, alt_edid_id,
- igt_kms_get_alt_edid());
+ test_edid_read(&data, port, TEST_EDID_BASE);
+ test_edid_read(&data, port, TEST_EDID_ALT);
}
connector_subtest("hdmi-hpd-after-suspend", HDMIA)
@@ -1696,13 +2216,15 @@ igt_main
test_suspend_resume_edid_change(&data, port,
SUSPEND_STATE_MEM,
SUSPEND_TEST_NONE,
- edid_id, alt_edid_id);
+ TEST_EDID_BASE,
+ TEST_EDID_ALT);
connector_subtest("hdmi-edid-change-during-hibernate", HDMIA)
test_suspend_resume_edid_change(&data, port,
SUSPEND_STATE_DISK,
SUSPEND_TEST_DEVICES,
- edid_id, alt_edid_id);
+ TEST_EDID_BASE,
+ TEST_EDID_ALT);
connector_subtest("hdmi-crc-single", HDMIA)
test_display_all_modes(&data, port, DRM_FORMAT_XRGB8888,
@@ -1798,6 +2320,14 @@ igt_main
connector_subtest("hdmi-frame-dump", HDMIA)
test_display_frame_dump(&data, port);
+
+ connector_subtest("hdmi-audio", HDMIA)
+ test_display_audio(&data, port, "HDMI",
+ TEST_EDID_HDMI_AUDIO);
+
+ connector_subtest("hdmi-audio-edid", HDMIA)
+ test_display_audio_edid(&data, port,
+ TEST_EDID_HDMI_AUDIO);
}
igt_subtest_group {
@@ -1813,10 +2343,8 @@ igt_main
test_basic_hotplug(&data, port, HPD_TOGGLE_COUNT_FAST);
connector_subtest("vga-edid-read", VGA) {
- test_edid_read(&data, port, edid_id,
- igt_kms_get_base_edid());
- test_edid_read(&data, port, alt_edid_id,
- igt_kms_get_alt_edid());
+ test_edid_read(&data, port, TEST_EDID_BASE);
+ test_edid_read(&data, port, TEST_EDID_ALT);
}
connector_subtest("vga-hpd-after-suspend", VGA)
diff --git a/tests/kms_concurrent.c b/tests/kms_concurrent.c
index d82ca040..23b05ea1 100644
--- a/tests/kms_concurrent.c
+++ b/tests/kms_concurrent.c
@@ -372,30 +372,27 @@ static int opt_handler(int option, int option_index, void *input)
opt.seed = strtol(optarg, NULL, 0);
break;
default:
- igt_assert(false);
+ return IGT_OPT_HANDLER_ERROR;
}
- return 0;
+ return IGT_OPT_HANDLER_SUCCESS;
}
const char *help_str =
" --iterations Number of iterations for test coverage. -1 loop forever, default 1 iteration\n"
" --seed Seed for random number generator\n";
+struct option long_options[] = {
+ { "iterations", required_argument, NULL, 'i'},
+ { "seed", required_argument, NULL, 's'},
+ { 0, 0, 0, 0 }
+};
static data_t data;
-int main(int argc, char *argv[])
+igt_main_args("", long_options, help_str, opt_handler, NULL)
{
- struct option long_options[] = {
- { "iterations", required_argument, NULL, 'i'},
- { "seed", required_argument, NULL, 's'},
- { 0, 0, 0, 0 }
- };
enum pipe pipe;
- igt_subtest_init_parse_opts(&argc, argv, "", long_options, help_str,
- opt_handler, NULL);
-
igt_skip_on_simulation();
igt_fixture {
@@ -414,6 +411,4 @@ int main(int argc, char *argv[])
igt_display_fini(&data.display);
close(data.drm_fd);
}
-
- igt_exit();
}
diff --git a/tests/kms_cursor_crc.c b/tests/kms_cursor_crc.c
index fd74fda5..d0fb8f1d 100644
--- a/tests/kms_cursor_crc.c
+++ b/tests/kms_cursor_crc.c
@@ -89,20 +89,22 @@ static void draw_cursor(cairo_t *cr, int x, int y, int cw, int ch, double a)
static void cursor_enable(data_t *data)
{
igt_output_t *output = data->output;
- igt_plane_t *cursor;
+ igt_plane_t *cursor =
+ igt_output_get_plane_type(output, DRM_PLANE_TYPE_CURSOR);
- cursor = igt_output_get_plane_type(output, DRM_PLANE_TYPE_CURSOR);
igt_plane_set_fb(cursor, &data->fb);
igt_plane_set_size(cursor, data->curw, data->curh);
+ igt_fb_set_size(&data->fb, cursor, data->curw, data->curh);
}
static void cursor_disable(data_t *data)
{
igt_output_t *output = data->output;
- igt_plane_t *cursor;
+ igt_plane_t *cursor =
+ igt_output_get_plane_type(output, DRM_PLANE_TYPE_CURSOR);
- cursor = igt_output_get_plane_type(output, DRM_PLANE_TYPE_CURSOR);
igt_plane_set_fb(cursor, NULL);
+ igt_plane_set_position(cursor, 0, 0);
}
static bool chv_cursor_broken(data_t *data, int x)
@@ -146,7 +148,8 @@ static void do_single_test(data_t *data, int x, int y)
igt_display_t *display = &data->display;
igt_pipe_crc_t *pipe_crc = data->pipe_crc;
igt_crc_t crc, ref_crc;
- igt_plane_t *cursor;
+ igt_plane_t *cursor =
+ igt_output_get_plane_type(data->output, DRM_PLANE_TYPE_CURSOR);
cairo_t *cr;
int ret = 0;
@@ -158,7 +161,6 @@ static void do_single_test(data_t *data, int x, int y)
igt_put_cairo_ctx(data->drm_fd, &data->primary_fb, cr);
cursor_enable(data);
- cursor = igt_output_get_plane_type(data->output, DRM_PLANE_TYPE_CURSOR);
igt_plane_set_position(cursor, x, y);
if (chv_cursor_broken(data, x) && cursor_visible(data, x, y)) {
@@ -217,7 +219,8 @@ static void do_single_test(data_t *data, int x, int y)
static void do_fail_test(data_t *data, int x, int y, int expect)
{
igt_display_t *display = &data->display;
- igt_plane_t *cursor;
+ igt_plane_t *cursor =
+ igt_output_get_plane_type(data->output, DRM_PLANE_TYPE_CURSOR);
cairo_t *cr;
int ret;
@@ -229,7 +232,6 @@ static void do_fail_test(data_t *data, int x, int y, int expect)
igt_put_cairo_ctx(data->drm_fd, &data->primary_fb, cr);
cursor_enable(data);
- cursor = igt_output_get_plane_type(data->output, DRM_PLANE_TYPE_CURSOR);
igt_plane_set_position(cursor, x, y);
ret = igt_display_try_commit2(display, COMMIT_LEGACY);
@@ -337,6 +339,18 @@ static void test_crc_random(data_t *data)
}
}
+static void cleanup_crtc(data_t *data)
+{
+ igt_display_t *display = &data->display;
+
+ igt_pipe_crc_free(data->pipe_crc);
+ data->pipe_crc = NULL;
+
+ igt_remove_fb(data->drm_fd, &data->primary_fb);
+
+ igt_display_reset(display);
+}
+
static void prepare_crtc(data_t *data, igt_output_t *output,
int cursor_w, int cursor_h)
{
@@ -344,9 +358,10 @@ static void prepare_crtc(data_t *data, igt_output_t *output,
igt_display_t *display = &data->display;
igt_plane_t *primary;
+ cleanup_crtc(data);
+
/* select the pipe we want to use */
igt_output_set_pipe(output, data->pipe);
- cursor_disable(data);
/* create and set the primary plane fb */
mode = igt_output_get_mode(output);
@@ -362,9 +377,6 @@ static void prepare_crtc(data_t *data, igt_output_t *output,
igt_display_commit(display);
/* create the pipe_crc object for this pipe */
- if (data->pipe_crc)
- igt_pipe_crc_free(data->pipe_crc);
-
data->pipe_crc = igt_pipe_crc_new(data->drm_fd, data->pipe,
INTEL_PIPE_CRC_SOURCE_AUTO);
@@ -379,31 +391,10 @@ static void prepare_crtc(data_t *data, igt_output_t *output,
data->curh = cursor_h;
data->refresh = mode->vrefresh;
- /* make sure cursor is disabled */
- cursor_disable(data);
- igt_wait_for_vblank(data->drm_fd, data->pipe);
-
/* get reference crc w/o cursor */
igt_pipe_crc_collect_crc(data->pipe_crc, &data->ref_crc);
}
-static void cleanup_crtc(data_t *data, igt_output_t *output)
-{
- igt_display_t *display = &data->display;
- igt_plane_t *primary;
-
- igt_pipe_crc_free(data->pipe_crc);
- data->pipe_crc = NULL;
-
- igt_remove_fb(data->drm_fd, &data->primary_fb);
-
- primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
- igt_plane_set_fb(primary, NULL);
-
- igt_output_set_pipe(output, PIPE_ANY);
- igt_display_commit(display);
-}
-
static void test_cursor_alpha(data_t *data, double a)
{
igt_display_t *display = &data->display;
@@ -413,7 +404,6 @@ static void test_cursor_alpha(data_t *data, double a)
uint32_t fb_id;
int curw = data->curw;
int curh = data->curh;
- int ret;
/*alpha cursor fb*/
fb_id = igt_create_fb(data->drm_fd, curw, curh,
@@ -427,8 +417,7 @@ static void test_cursor_alpha(data_t *data, double a)
/*Hardware Test*/
cursor_enable(data);
- ret = drmModeSetCursor(data->drm_fd, data->output->config.crtc->crtc_id, data->fb.gem_handle, curw, curh);
- igt_assert_eq(ret, 0);
+ igt_display_commit(display);
igt_wait_for_vblank(data->drm_fd, data->pipe);
igt_pipe_crc_collect_crc(pipe_crc, &crc);
cursor_disable(data);
@@ -462,42 +451,10 @@ static void test_cursor_opaque(data_t *data)
test_cursor_alpha(data, 1.0);
}
-
static void run_test(data_t *data, void (*testfunc)(data_t *), int cursor_w, int cursor_h)
{
- igt_display_t *display = &data->display;
- igt_output_t *output;
- enum pipe p;
- int valid_tests = 0;
-
- igt_require(cursor_w <= data->cursor_max_w &&
- cursor_h <= data->cursor_max_h);
-
- for_each_pipe_with_valid_output(display, p, output) {
- data->output = output;
- data->pipe = p;
-
- prepare_crtc(data, output, cursor_w, cursor_h);
-
- valid_tests++;
-
- igt_info("Beginning %s on pipe %s, connector %s\n",
- igt_subtest_name(),
- kmstest_pipe_name(data->pipe),
- igt_output_name(output));
-
- testfunc(data);
-
- igt_info("\n%s on pipe %s, connector %s: PASSED\n\n",
- igt_subtest_name(),
- kmstest_pipe_name(data->pipe),
- igt_output_name(output));
-
- /* cleanup what prepare_crtc() has done */
- cleanup_crtc(data, output);
- }
-
- igt_require_f(valid_tests, "no valid crtc/connector combinations found\n");
+ prepare_crtc(data, data->output, cursor_w, cursor_h);
+ testfunc(data);
}
static void create_cursor_fb(data_t *data, int cur_w, int cur_h)
@@ -555,7 +512,8 @@ static void test_cursor_size(data_t *data)
uint32_t fb_id;
int i, size;
int cursor_max_size = data->cursor_max_w;
- int ret;
+ igt_plane_t *cursor =
+ igt_output_get_plane_type(data->output, DRM_PLANE_TYPE_CURSOR);
/* Create a maximum size cursor, then change the size in flight to
* smaller ones to see that the size is applied correctly
@@ -572,17 +530,16 @@ static void test_cursor_size(data_t *data)
/* Hardware test loop */
cursor_enable(data);
- ret = drmModeMoveCursor(data->drm_fd, data->output->config.crtc->crtc_id, 0, 0);
- igt_assert_eq(ret, 0);
for (i = 0, size = cursor_max_size; size >= 64; size /= 2, i++) {
/* Change size in flight: */
- ret = drmModeSetCursor(data->drm_fd, data->output->config.crtc->crtc_id,
- data->fb.gem_handle, size, size);
- igt_assert_eq(ret, 0);
+ igt_plane_set_size(cursor, size, size);
+ igt_fb_set_size(&data->fb, cursor, size, size);
+ igt_display_commit(display);
igt_wait_for_vblank(data->drm_fd, data->pipe);
igt_pipe_crc_collect_crc(pipe_crc, &crc[i]);
}
cursor_disable(data);
+ igt_display_commit(display);
igt_remove_fb(data->drm_fd, &data->fb);
/* Software test loop */
for (i = 0, size = cursor_max_size; size >= 64; size /= 2, i++) {
@@ -608,20 +565,29 @@ static void test_rapid_movement(data_t *data)
struct timeval start, end, delta;
int x = 0, y = 0;
long usec;
- int crtc_id = data->output->config.crtc->crtc_id;
+ igt_display_t *display = &data->display;
+ igt_plane_t *cursor =
+ igt_output_get_plane_type(data->output, DRM_PLANE_TYPE_CURSOR);
- igt_assert_eq(drmModeSetCursor(data->drm_fd, crtc_id,
- data->fb.gem_handle, data->curw, data->curh), 0);
+ cursor_enable(data);
gettimeofday(&start, NULL);
- for ( ; x < 100; x++)
- igt_assert_eq(drmModeMoveCursor(data->drm_fd, crtc_id, x, y), 0);
- for ( ; y < 100; y++)
- igt_assert_eq(drmModeMoveCursor(data->drm_fd, crtc_id, x, y), 0);
- for ( ; x > 0; x--)
- igt_assert_eq(drmModeMoveCursor(data->drm_fd, crtc_id, x, y), 0);
- for ( ; y > 0; y--)
- igt_assert_eq(drmModeMoveCursor(data->drm_fd, crtc_id, x, y), 0);
+ for ( ; x < 100; x++) {
+ igt_plane_set_position(cursor, x, y);
+ igt_display_commit(display);
+ }
+ for ( ; y < 100; y++) {
+ igt_plane_set_position(cursor, x, y);
+ igt_display_commit(display);
+ }
+ for ( ; x > 0; x--) {
+ igt_plane_set_position(cursor, x, y);
+ igt_display_commit(display);
+ }
+ for ( ; y > 0; y--) {
+ igt_plane_set_position(cursor, x, y);
+ igt_display_commit(display);
+ }
gettimeofday(&end, NULL);
/*
@@ -633,44 +599,68 @@ static void test_rapid_movement(data_t *data)
timersub(&end, &start, &delta);
usec = delta.tv_usec + 1000000 * delta.tv_sec;
igt_assert_lt(usec, 0.9 * 400 * 1000000 / data->refresh);
-
- igt_assert_eq(drmModeSetCursor(data->drm_fd, crtc_id,
- 0, data->curw, data->curh), 0);
-
}
-static void run_test_generic(data_t *data)
+static void run_tests_on_pipe(data_t *data, enum pipe pipe)
{
int cursor_size;
+
+ igt_fixture {
+ data->pipe = pipe;
+ data->output = igt_get_single_output_for_pipe(&data->display, pipe);
+ igt_require(data->output);
+ }
+
+ igt_subtest_f("pipe-%s-cursor-size-change", kmstest_pipe_name(pipe))
+ run_test(data, test_cursor_size,
+ data->cursor_max_w, data->cursor_max_h);
+
+ igt_subtest_f("pipe-%s-cursor-alpha-opaque", kmstest_pipe_name(pipe))
+ run_test(data, test_cursor_opaque, data->cursor_max_w, data->cursor_max_h);
+
+ igt_subtest_f("pipe-%s-cursor-alpha-transparent", kmstest_pipe_name(pipe))
+ run_test(data, test_cursor_transparent, data->cursor_max_w, data->cursor_max_h);
+
+ igt_fixture
+ create_cursor_fb(data, data->cursor_max_w, data->cursor_max_h);
+
+ igt_subtest_f("pipe-%s-cursor-dpms", kmstest_pipe_name(pipe)) {
+ data->flags = TEST_DPMS;
+ run_test(data, test_crc_random, data->cursor_max_w, data->cursor_max_h);
+ }
+ data->flags = 0;
+
+ igt_subtest_f("pipe-%s-cursor-suspend", kmstest_pipe_name(pipe)) {
+ data->flags = TEST_SUSPEND;
+ run_test(data, test_crc_random, data->cursor_max_w, data->cursor_max_h);
+ }
+ data->flags = 0;
+
+ igt_fixture
+ igt_remove_fb(data->drm_fd, &data->fb);
+
for (cursor_size = 64; cursor_size <= 512; cursor_size *= 2) {
int w = cursor_size;
int h = cursor_size;
- igt_fixture
+ igt_fixture {
+ igt_require(w <= data->cursor_max_w &&
+ h <= data->cursor_max_h);
+
create_cursor_fb(data, w, h);
+ }
/* Using created cursor FBs to test cursor support */
- igt_subtest_f("cursor-%dx%d-onscreen", w, h)
+ igt_subtest_f("pipe-%s-cursor-%dx%d-onscreen", kmstest_pipe_name(pipe), w, h)
run_test(data, test_crc_onscreen, w, h);
- igt_subtest_f("cursor-%dx%d-offscreen", w, h)
+ igt_subtest_f("pipe-%s-cursor-%dx%d-offscreen", kmstest_pipe_name(pipe), w, h)
run_test(data, test_crc_offscreen, w, h);
- igt_subtest_f("cursor-%dx%d-sliding", w, h)
+ igt_subtest_f("pipe-%s-cursor-%dx%d-sliding", kmstest_pipe_name(pipe), w, h)
run_test(data, test_crc_sliding, w, h);
- igt_subtest_f("cursor-%dx%d-random", w, h)
- run_test(data, test_crc_random, w, h);
- igt_subtest_f("cursor-%dx%d-dpms", w, h) {
- data->flags = TEST_DPMS;
+ igt_subtest_f("pipe-%s-cursor-%dx%d-random", kmstest_pipe_name(pipe), w, h)
run_test(data, test_crc_random, w, h);
- data->flags = 0;
- }
-
- igt_subtest_f("cursor-%dx%d-suspend", w, h) {
- data->flags = TEST_SUSPEND;
- run_test(data, test_crc_random, w, h);
- data->flags = 0;
- }
- igt_subtest_f("cursor-%dx%d-rapid-movement", w, h) {
+ igt_subtest_f("pipe-%s-cursor-%dx%d-rapid-movement", kmstest_pipe_name(pipe), w, h) {
run_test(data, test_rapid_movement, w, h);
}
@@ -684,23 +674,25 @@ static void run_test_generic(data_t *data)
*/
h /= 3;
- igt_fixture
- create_cursor_fb(data, w, h);
+ igt_fixture {
+ if (has_nonsquare_cursors(data))
+ create_cursor_fb(data, w, h);
+ }
/* Using created cursor FBs to test cursor support */
- igt_subtest_f("cursor-%dx%d-onscreen", w, h) {
+ igt_subtest_f("pipe-%s-cursor-%dx%d-onscreen", kmstest_pipe_name(pipe), w, h) {
igt_require(has_nonsquare_cursors(data));
run_test(data, test_crc_onscreen, w, h);
}
- igt_subtest_f("cursor-%dx%d-offscreen", w, h) {
+ igt_subtest_f("pipe-%s-cursor-%dx%d-offscreen", kmstest_pipe_name(pipe), w, h) {
igt_require(has_nonsquare_cursors(data));
run_test(data, test_crc_offscreen, w, h);
}
- igt_subtest_f("cursor-%dx%d-sliding", w, h) {
+ igt_subtest_f("pipe-%s-cursor-%dx%d-sliding", kmstest_pipe_name(pipe), w, h) {
igt_require(has_nonsquare_cursors(data));
run_test(data, test_crc_sliding, w, h);
}
- igt_subtest_f("cursor-%dx%d-random", w, h) {
+ igt_subtest_f("pipe-%s-cursor-%dx%d-random", kmstest_pipe_name(pipe), w, h) {
igt_require(has_nonsquare_cursors(data));
run_test(data, test_crc_random, w, h);
}
@@ -716,6 +708,7 @@ igt_main
{
uint64_t cursor_width = 64, cursor_height = 64;
int ret;
+ enum pipe pipe;
igt_skip_on_simulation();
@@ -741,18 +734,9 @@ igt_main
data.cursor_max_w = cursor_width;
data.cursor_max_h = cursor_height;
- igt_subtest_f("cursor-size-change")
- run_test(&data, test_cursor_size, cursor_width, cursor_height);
-
- igt_subtest_f("cursor-alpha-opaque") {
- run_test(&data, test_cursor_opaque, cursor_width, cursor_height);
- }
-
- igt_subtest_f("cursor-alpha-transparent") {
- run_test(&data, test_cursor_transparent, cursor_width, cursor_height);
- }
-
- run_test_generic(&data);
+ for_each_pipe_static(pipe)
+ igt_subtest_group
+ run_tests_on_pipe(&data, pipe);
igt_fixture {
igt_display_fini(&data.display);
diff --git a/tests/kms_cursor_edge_walk.c b/tests/kms_cursor_edge_walk.c
index 86629555..809dca42 100644
--- a/tests/kms_cursor_edge_walk.c
+++ b/tests/kms_cursor_edge_walk.c
@@ -285,31 +285,27 @@ static int opt_handler(int opt, int opt_index, void *_data)
data->jump = true;
break;
default:
- break;
+ return IGT_OPT_HANDLER_ERROR;
}
- return 0;
+ return IGT_OPT_HANDLER_SUCCESS;
}
static data_t data;
static uint64_t max_curw = 64, max_curh = 64;
+static const struct option long_opts[] = {
+ { .name = "colored", .val = 'c' },
+ { .name = "disable", .val = 'd'},
+ { .name = "jump", .val = 'j' },
+ {}
+};
+static const char *help_str =
+ " --colored\t\tUse a colored cursor (disables CRC checks)\n"
+ " --disable\t\tDisable the cursor between each step\n"
+ " --jump\t\tJump the cursor to middle of the screen between each step)\n";
-int main(int argc, char **argv)
+igt_main_args("", long_opts, help_str, opt_handler, &data)
{
- static const struct option long_opts[] = {
- { .name = "colored", .val = 'c' },
- { .name = "disable", .val = 'd'},
- { .name = "jump", .val = 'j' },
- {}
- };
- static const char *help_str =
- " --colored\t\tUse a colored cursor (disables CRC checks)\n"
- " --disable\t\tDisable the cursor between each step\n"
- " --jump\t\tJump the cursor to middle of the screen between each step)\n";
-
- igt_subtest_init_parse_opts(&argc, argv, "", long_opts, help_str,
- opt_handler, &data);
-
igt_skip_on_simulation();
igt_fixture {
@@ -370,6 +366,4 @@ int main(int argc, char **argv)
igt_fixture
igt_display_fini(&data.display);
-
- igt_exit();
}
diff --git a/tests/kms_dp_dsc.c b/tests/kms_dp_dsc.c
index 1bfefbf2..e2e3aaa0 100644
--- a/tests/kms_dp_dsc.c
+++ b/tests/kms_dp_dsc.c
@@ -311,5 +311,4 @@ igt_main
close(data.drm_fd);
igt_display_fini(&data.display);
}
- igt_exit();
}
diff --git a/tests/kms_flip.c b/tests/kms_flip.c
index 8c17c8da..2a158d97 100755
--- a/tests/kms_flip.c
+++ b/tests/kms_flip.c
@@ -71,6 +71,7 @@
#define TEST_SUSPEND (1 << 26)
#define TEST_BO_TOOBIG (1 << 28)
+#define TEST_NO_VBLANK (1 << 29)
#define TEST_BASIC (1 << 30)
#define EVENT_FLIP (1 << 0)
@@ -126,6 +127,18 @@ struct event_state {
int seq_step;
};
+static bool vblank_dependence(int flags)
+{
+ int vblank_flags = TEST_VBLANK | TEST_VBLANK_BLOCK |
+ TEST_VBLANK_ABSOLUTE | TEST_VBLANK_EXPIRED_SEQ |
+ TEST_CHECK_TS | TEST_VBLANK_RACE | TEST_EBUSY;
+
+ if (flags & vblank_flags)
+ return true;
+
+ return false;
+}
+
static float timeval_float(const struct timeval *tv)
{
return tv->tv_sec + tv->tv_usec / 1000000.0f;
@@ -494,7 +507,7 @@ static void check_state(const struct test_output *o, const struct event_state *e
/* check only valid if no modeset happens in between, that increments by
* (1 << 23) on each step. This bounding matches the one in
* DRM_IOCTL_WAIT_VBLANK. */
- if (!(o->flags & (TEST_DPMS | TEST_MODESET)))
+ if (!(o->flags & (TEST_DPMS | TEST_MODESET | TEST_NO_VBLANK)))
igt_assert_f(es->current_seq - (es->last_seq + o->seq_step) <= 1UL << 23,
"unexpected %s seq %u, should be >= %u\n",
es->name, es->current_seq, es->last_seq + o->seq_step);
@@ -1176,6 +1189,7 @@ static void run_test_on_crtc_set(struct test_output *o, int *crtc_idxs,
unsigned bo_size = 0;
uint64_t tiling;
int i;
+ bool vblank = true;
switch (crtc_count) {
case RUN_TEST:
@@ -1259,6 +1273,14 @@ static void run_test_on_crtc_set(struct test_output *o, int *crtc_idxs,
}
igt_assert(fb_is_bound(o, o->fb_ids[0]));
+ vblank = kms_has_vblank(drm_fd);
+ if (!vblank) {
+ if (vblank_dependence(o->flags))
+ igt_require_f(vblank, "There is no VBlank\n");
+ else
+ o->flags |= TEST_NO_VBLANK;
+ }
+
/* quiescent the hw a bit so ensure we don't miss a single frame */
if (o->flags & TEST_CHECK_TS)
calibrate_ts(o, crtc_idxs[0]);
@@ -1481,7 +1503,7 @@ static void test_nonblocking_read(int in)
close(fd);
}
-int main(int argc, char **argv)
+igt_main
{
struct {
int duration;
@@ -1530,8 +1552,6 @@ int main(int argc, char **argv)
};
int i;
- igt_subtest_init(argc, argv);
-
igt_fixture {
drm_fd = drm_open_driver_master(DRIVER_ANY);
@@ -1591,11 +1611,4 @@ int main(int argc, char **argv)
run_pair(tests[i].duration, tests[i].flags);
}
igt_stop_signal_helper();
-
- /*
- * Let drm_fd leak, since it's needed by the dpms restore
- * exit_handler and igt_exit() won't return.
- */
-
- igt_exit();
}
diff --git a/tests/kms_flip_tiling.c b/tests/kms_flip_tiling.c
index d1e6687f..582af53c 100644
--- a/tests/kms_flip_tiling.c
+++ b/tests/kms_flip_tiling.c
@@ -75,6 +75,14 @@ test_flip_tiling(data_t *data, enum pipe pipe, igt_output_t *output, uint64_t ti
igt_output_set_pipe(output, pipe);
mode = igt_output_get_mode(output);
+
+ /* Interlaced modes don't support Y/Yf tiling */
+ if (tiling[0] == LOCAL_I915_FORMAT_MOD_Y_TILED ||
+ tiling[0] == LOCAL_I915_FORMAT_MOD_Yf_TILED ||
+ tiling[1] == LOCAL_I915_FORMAT_MOD_Y_TILED ||
+ tiling[1] == LOCAL_I915_FORMAT_MOD_Yf_TILED)
+ igt_require(!(mode->flags & DRM_MODE_FLAG_INTERLACE));
+
primary = igt_output_get_plane(output, 0);
width = mode->hdisplay;
diff --git a/tests/kms_force_connector_basic.c b/tests/kms_force_connector_basic.c
index b8246e66..20812d5e 100644
--- a/tests/kms_force_connector_basic.c
+++ b/tests/kms_force_connector_basic.c
@@ -65,25 +65,23 @@ static int opt_handler(int opt, int opt_index, void *data)
break;
}
- return 0;
+ return IGT_OPT_HANDLER_SUCCESS;
}
-int main(int argc, char **argv)
+struct option long_opts[] = {
+ {"reset", 0, 0, 'r'},
+ {0, 0, 0, 0}
+};
+const char *help_str =
+ " --reset\t\tReset all connector force states and edid.\n";
+
+igt_main_args("", long_opts, help_str, opt_handler, NULL)
{
/* force the VGA output and test that it worked */
int drm_fd = 0;
drmModeRes *res;
drmModeConnector *vga_connector = NULL, *temp;
int start_n_modes, start_connection;
- struct option long_opts[] = {
- {"reset", 0, 0, 'r'},
- {0, 0, 0, 0}
- };
- const char *help_str =
- " --reset\t\tReset all connector force states and edid.\n";
-
- igt_subtest_init_parse_opts(&argc, argv, "", long_opts, help_str,
- opt_handler, NULL);
igt_fixture {
unsigned vga_connector_id = 0;
@@ -320,6 +318,4 @@ int main(int argc, char **argv)
reset_connectors();
}
-
- igt_exit();
}
diff --git a/tests/kms_frontbuffer_tracking.c b/tests/kms_frontbuffer_tracking.c
index ee13b138..1037faf8 100644
--- a/tests/kms_frontbuffer_tracking.c
+++ b/tests/kms_frontbuffer_tracking.c
@@ -3070,19 +3070,23 @@ static int opt_handler(int option, int option_index, void *data)
case 'x':
errno = 0;
opt.shared_fb_x_offset = strtol(optarg, NULL, 0);
- igt_assert(errno == 0);
+ if (errno != 0)
+ return IGT_OPT_HANDLER_ERROR;
break;
case 'y':
errno = 0;
opt.shared_fb_y_offset = strtol(optarg, NULL, 0);
- igt_assert(errno == 0);
+ if (errno != 0)
+ return IGT_OPT_HANDLER_ERROR;
break;
case '1':
- igt_assert_eq(opt.only_pipes, PIPE_COUNT);
+ if (opt.only_pipes != PIPE_COUNT)
+ return IGT_OPT_HANDLER_ERROR;
opt.only_pipes = PIPE_SINGLE;
break;
case '2':
- igt_assert_eq(opt.only_pipes, PIPE_COUNT);
+ if (opt.only_pipes != PIPE_COUNT)
+ return IGT_OPT_HANDLER_ERROR;
opt.only_pipes = PIPE_DUAL;
break;
case 'l':
@@ -3090,14 +3094,16 @@ static int opt_handler(int option, int option_index, void *data)
opt.tiling = LOCAL_I915_FORMAT_MOD_X_TILED;
else if (!strcmp(optarg, "y"))
opt.tiling = LOCAL_I915_FORMAT_MOD_Y_TILED;
- else
- igt_assert_f(false, "Bad tiling value: %s\n", optarg);
+ else {
+ igt_warn("Bad tiling value: %s\n", optarg);
+ return IGT_OPT_HANDLER_ERROR;
+ }
break;
default:
- igt_assert(false);
+ return IGT_OPT_HANDLER_ERROR;
}
- return 0;
+ return IGT_OPT_HANDLER_SUCCESS;
}
const char *help_str =
@@ -3245,28 +3251,26 @@ static const char *flip_str(enum flip_type flip)
#define TEST_MODE_ITER_END } } } } } }
-int main(int argc, char *argv[])
+struct option long_options[] = {
+ { "no-status-check", 0, 0, 's'},
+ { "no-crc-check", 0, 0, 'c'},
+ { "no-fbc-compression-check", 0, 0, 'o'},
+ { "no-fbc-action-check", 0, 0, 'a'},
+ { "no-edp", 0, 0, 'e'},
+ { "use-small-modes", 0, 0, 'm'},
+ { "show-hidden", 0, 0, 'i'},
+ { "step", 0, 0, 't'},
+ { "shared-fb-x", 1, 0, 'x'},
+ { "shared-fb-y", 1, 0, 'y'},
+ { "1p-only", 0, 0, '1'},
+ { "2p-only", 0, 0, '2'},
+ { "tiling", 1, 0, 'l'},
+ { 0, 0, 0, 0 }
+};
+
+igt_main_args("", long_options, help_str, opt_handler, NULL)
{
struct test_mode t;
- struct option long_options[] = {
- { "no-status-check", 0, 0, 's'},
- { "no-crc-check", 0, 0, 'c'},
- { "no-fbc-compression-check", 0, 0, 'o'},
- { "no-fbc-action-check", 0, 0, 'a'},
- { "no-edp", 0, 0, 'e'},
- { "use-small-modes", 0, 0, 'm'},
- { "show-hidden", 0, 0, 'i'},
- { "step", 0, 0, 't'},
- { "shared-fb-x", 1, 0, 'x'},
- { "shared-fb-y", 1, 0, 'y'},
- { "1p-only", 0, 0, '1'},
- { "2p-only", 0, 0, '2'},
- { "tiling", 1, 0, 'l'},
- { 0, 0, 0, 0 }
- };
-
- igt_subtest_init_parse_opts(&argc, argv, "", long_options, help_str,
- opt_handler, NULL);
igt_fixture
setup_environment();
@@ -3473,6 +3477,4 @@ int main(int argc, char *argv[])
igt_fixture
teardown_environment();
-
- igt_exit();
}
diff --git a/tests/kms_hdmi_inject.c b/tests/kms_hdmi_inject.c
index 699bad5b..8c0d1333 100644
--- a/tests/kms_hdmi_inject.c
+++ b/tests/kms_hdmi_inject.c
@@ -22,14 +22,35 @@
*
*/
+#include "config.h"
+
#include <dirent.h>
+
#include "igt.h"
+#include "igt_edid.h"
+#include "igt_eld.h"
#define HDISPLAY_4K 3840
#define VDISPLAY_4K 2160
IGT_TEST_DESCRIPTION("Tests 4K and audio HDMI injection.");
+/**
+ * This collection of tests performs EDID and status injection tests. Injection
+ * forces a given EDID and status on a connector. The kernel will parse the
+ * forced EDID and we will check whether correct metadata is exposed to
+ * userspace.
+ *
+ * Currently, this can be used to test:
+ *
+ * - 4K modes exposed via KMS
+ * - Audio capabilities of the monitor exposed via ALSA. EDID-Like Data (ELD)
+ * entries in /proc/asound are verified.
+ *
+ * Injection is performed on a disconnected connector.
+ */
+
+/** get_connector: get the first disconnected HDMI connector */
static drmModeConnector *
get_connector(int drm_fd, drmModeRes *res)
{
@@ -118,88 +139,17 @@ hdmi_inject_4k(int drm_fd, drmModeConnector *connector)
free(edid);
}
-static bool
-eld_entry_is_igt(const char* path)
-{
- FILE *in;
- char buf[1024];
- uint8_t eld_valid = 0;
- uint8_t mon_valid = 0;
-
- in = fopen(path, "r");
- if (!in)
- return false;
-
- memset(buf, 0, 1024);
-
- while ((fgets(buf, 1024, in)) != NULL) {
-
- char *line = buf;
-
- if (!strncasecmp(line, "eld_valid", 9) &&
- strstr(line, "1")) {
- eld_valid++;
- }
-
- if (!strncasecmp(line, "monitor_name", 12) &&
- strstr(line, "IGT")) {
- mon_valid++;
- }
- }
-
- fclose(in);
- if (mon_valid && eld_valid)
- return true;
-
- return false;
-}
-
-static bool
-eld_is_valid(void)
-{
- DIR *dir;
- struct dirent *snd_hda;
- int i;
-
- for (i = 0; i < 8; i++) {
- char cards[128];
-
- snprintf(cards, sizeof(cards), "/proc/asound/card%d", i);
- dir = opendir(cards);
- if (!dir)
- continue;
-
- while ((snd_hda = readdir(dir))) {
- char fpath[PATH_MAX];
-
- if (*snd_hda->d_name == '.' ||
- strstr(snd_hda->d_name, "eld") == 0)
- continue;
-
- snprintf(fpath, sizeof(fpath), "%s/%s", cards,
- snd_hda->d_name);
- if (eld_entry_is_igt(fpath)) {
- closedir(dir);
- return true;
- }
- }
- closedir(dir);
- }
-
- return false;
-}
-
static void
hdmi_inject_audio(int drm_fd, drmModeConnector *connector)
{
- unsigned char *edid;
+ const unsigned char *edid;
size_t length;
int fb_id, cid, ret, crtc_mask = -1;
struct igt_fb fb;
struct kmstest_connector_config config;
- kmstest_edid_add_audio(igt_kms_get_base_edid(), EDID_LENGTH, &edid,
- &length);
+ edid = igt_kms_get_hdmi_audio_edid();
+ length = AUDIO_EDID_LENGTH;
kmstest_force_edid(drm_fd, connector, edid, length);
@@ -233,7 +183,7 @@ hdmi_inject_audio(int drm_fd, drmModeConnector *connector)
* Test if we have /proc/asound/HDMI/eld#0.0 and is its contents are
* valid.
*/
- igt_assert(eld_is_valid());
+ igt_assert(eld_has_igt());
igt_remove_fb(drm_fd, &fb);
@@ -242,8 +192,6 @@ hdmi_inject_audio(int drm_fd, drmModeConnector *connector)
kmstest_force_connector(drm_fd, connector, FORCE_CONNECTOR_UNSPECIFIED);
kmstest_force_edid(drm_fd, connector, NULL, 0);
-
- free(edid);
}
igt_main
diff --git a/tests/kms_mmap_write_crc.c b/tests/kms_mmap_write_crc.c
index bf2b0c29..73606b16 100644
--- a/tests/kms_mmap_write_crc.c
+++ b/tests/kms_mmap_write_crc.c
@@ -258,18 +258,18 @@ static int opt_handler(int opt, int opt_index, void *data)
if (opt == 'n') {
ioctl_sync = false;
igt_info("set via cmd line to not use sync ioctls\n");
+ } else {
+ return IGT_OPT_HANDLER_ERROR;
}
- return 0;
+ return IGT_OPT_HANDLER_SUCCESS;
}
static data_t data;
-int main(int argc, char **argv)
+igt_main_args("n", NULL, NULL, opt_handler, NULL)
{
int i;
- igt_subtest_init_parse_opts(&argc, argv, "n", NULL, NULL,
- opt_handler, NULL);
igt_skip_on_simulation();
@@ -299,6 +299,4 @@ int main(int argc, char **argv)
igt_stop_helper(&hog);
}
-
- igt_exit();
}
diff --git a/tests/kms_plane.c b/tests/kms_plane.c
index 471bcbeb..59d5f1e8 100644
--- a/tests/kms_plane.c
+++ b/tests/kms_plane.c
@@ -389,6 +389,39 @@ static void set_legacy_lut(data_t *data, enum pipe pipe,
free(lut);
}
+static bool set_c8_legacy_lut(data_t *data, enum pipe pipe,
+ uint16_t mask)
+{
+ igt_pipe_t *pipe_obj = &data->display.pipes[pipe];
+ drmModeCrtc *crtc;
+ uint16_t *r, *g, *b;
+ int i, lut_size;
+
+ crtc = drmModeGetCrtc(data->drm_fd, pipe_obj->crtc_id);
+ lut_size = crtc->gamma_size;
+ drmModeFreeCrtc(crtc);
+
+ if (lut_size != 256)
+ return false;
+
+ r = malloc(sizeof(uint16_t) * 3 * lut_size);
+ g = r + lut_size;
+ b = g + lut_size;
+
+ /* igt_fb uses RGB332 for C8 */
+ for (i = 0; i < lut_size; i++) {
+ r[i] = (((i & 0xe0) >> 5) * 0xffff / 0x7) & mask;
+ g[i] = (((i & 0x1c) >> 2) * 0xffff / 0x7) & mask;
+ b[i] = (((i & 0x03) >> 0) * 0xffff / 0x3) & mask;
+ }
+
+ igt_assert_eq(drmModeCrtcSetGamma(data->drm_fd, pipe_obj->crtc_id,
+ lut_size, r, g, b), 0);
+
+ free(r);
+
+ return true;
+}
static void test_format_plane_color(data_t *data, enum pipe pipe,
igt_plane_t *plane,
@@ -448,6 +481,25 @@ static void test_format_plane_color(data_t *data, enum pipe pipe,
igt_remove_fb(data->drm_fd, &old_fb);
}
+static int num_unique_crcs(const igt_crc_t crc[], int num_crc)
+{
+ int num_unique_crc = 0;
+
+ for (int i = 0; i < num_crc; i++) {
+ int j;
+
+ for (j = i + 1; j < num_crc; j++) {
+ if (igt_check_crc_equal(&crc[i], &crc[j]))
+ break;
+ }
+
+ if (j == num_crc)
+ num_unique_crc++;
+ }
+
+ return num_unique_crc;
+}
+
static bool test_format_plane(data_t *data, enum pipe pipe,
igt_output_t *output, igt_plane_t *plane)
{
@@ -520,6 +572,13 @@ static bool test_format_plane(data_t *data, enum pipe pipe,
i, &ref_crc[i], &fb);
}
+ /*
+ * Make sure we have some difference between the colors. This
+ * at least avoids claiming success when everything is just
+ * black all the time (eg. if the plane is never even on).
+ */
+ igt_require(num_unique_crcs(ref_crc, ARRAY_SIZE(colors)) > 1);
+
for (int i = 0; i < plane->format_mod_count; i++) {
int crc_mismatch_count = 0;
int crc_mismatch_mask = 0;
@@ -532,8 +591,13 @@ static bool test_format_plane(data_t *data, enum pipe pipe,
modifier == ref_modifier)
continue;
- if (!igt_fb_supported_format(format))
- continue;
+ if (format == DRM_FORMAT_C8) {
+ if (!set_c8_legacy_lut(data, pipe, 0xfc00))
+ continue;
+ } else {
+ if (!igt_fb_supported_format(format))
+ continue;
+ }
igt_info("Testing format " IGT_FORMAT_FMT " / modifier 0x%" PRIx64 " on %s.%u\n",
IGT_FORMAT_ARGS(format), modifier,
@@ -552,6 +616,9 @@ static bool test_format_plane(data_t *data, enum pipe pipe,
}
}
+ if (format == DRM_FORMAT_C8)
+ set_legacy_lut(data, pipe, 0xfc00);
+
if (crc_mismatch_count)
igt_warn("CRC mismatches with format " IGT_FORMAT_FMT " on %s.%u with %d/%d solid colors tested (0x%X)\n",
IGT_FORMAT_ARGS(format), kmstest_pipe_name(pipe),
diff --git a/tests/kms_plane_lowres.c b/tests/kms_plane_lowres.c
index 0b78573f..68b85025 100644
--- a/tests/kms_plane_lowres.c
+++ b/tests/kms_plane_lowres.c
@@ -262,6 +262,4 @@ igt_main
igt_fixture {
igt_display_fini(&data.display);
}
-
- igt_exit();
}
diff --git a/tests/kms_plane_multiple.c b/tests/kms_plane_multiple.c
index d2d02a5f..0d3ba4ff 100644
--- a/tests/kms_plane_multiple.c
+++ b/tests/kms_plane_multiple.c
@@ -80,16 +80,6 @@ static void test_fini(data_t *data, igt_output_t *output, int n_planes)
{
igt_pipe_crc_stop(data->pipe_crc);
- for (int i = 0; i < n_planes; i++) {
- igt_plane_t *plane = data->plane[i];
- if (!plane)
- continue;
- if (plane->type == DRM_PLANE_TYPE_PRIMARY)
- continue;
- igt_plane_set_fb(plane, NULL);
- data->plane[i] = NULL;
- }
-
/* reset the constraint on the pipe */
igt_output_set_pipe(output, PIPE_ANY);
@@ -99,7 +89,8 @@ static void test_fini(data_t *data, igt_output_t *output, int n_planes)
free(data->plane);
data->plane = NULL;
- igt_remove_fb(data->drm_fd, data->fb);
+ free(data->fb);
+ data->fb = NULL;
igt_display_reset(&data->display);
}
@@ -195,6 +186,7 @@ prepare_planes(data_t *data, enum pipe pipe_id, color_t *color,
int *y;
int *size;
int i;
+ int* suffle;
igt_output_set_pipe(output, pipe_id);
primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
@@ -206,6 +198,34 @@ prepare_planes(data_t *data, enum pipe pipe_id, color_t *color,
igt_assert_f(y, "Failed to allocate %ld bytes for variable y\n", (long int) (pipe->n_planes * sizeof(*y)));
size = malloc(pipe->n_planes * sizeof(*size));
igt_assert_f(size, "Failed to allocate %ld bytes for variable size\n", (long int) (pipe->n_planes * sizeof(*size)));
+ suffle = malloc(pipe->n_planes * sizeof(*suffle));
+ igt_assert_f(suffle, "Failed to allocate %ld bytes for variable size\n", (long int) (pipe->n_planes * sizeof(*suffle)));
+
+ for (i = 0; i < pipe->n_planes; i++)
+ suffle[i] = i;
+
+ /*
+ * suffle table for planes. using rand() should keep it
+ * 'randomized in expected way'
+ */
+ for (i = 0; i < 256; i++) {
+ int n, m;
+ int a, b;
+
+ n = rand() % (pipe->n_planes-1);
+ m = rand() % (pipe->n_planes-1);
+
+ /*
+ * keep primary plane at its place for test's sake.
+ */
+ if(n == primary->index || m == primary->index)
+ continue;
+
+ a = suffle[n];
+ b = suffle[m];
+ suffle[n] = b;
+ suffle[m] = a;
+ }
mode = igt_output_get_mode(output);
@@ -213,7 +233,11 @@ prepare_planes(data_t *data, enum pipe pipe_id, color_t *color,
x[primary->index] = 0;
y[primary->index] = 0;
for (i = 0; i < max_planes; i++) {
- igt_plane_t *plane = igt_output_get_plane(output, i);
+ /*
+ * Here is made assumption primary plane will have
+ * index zero.
+ */
+ igt_plane_t *plane = igt_output_get_plane(output, suffle[i]);
uint32_t plane_format;
uint64_t plane_tiling;
@@ -251,6 +275,10 @@ prepare_planes(data_t *data, enum pipe pipe_id, color_t *color,
create_fb_for_mode_position(data, output, mode, color, x, y,
size, size, tiling, max_planes);
igt_plane_set_fb(data->plane[primary->index], &data->fb[primary->index]);
+ free((void*)x);
+ free((void*)y);
+ free((void*)size);
+ free((void*)suffle);
}
static void
@@ -260,7 +288,9 @@ test_plane_position_with_output(data_t *data, enum pipe pipe,
{
color_t blue = { 0.0f, 0.0f, 1.0f };
igt_crc_t crc;
+ igt_plane_t *plane;
int i;
+ int err, c = 0;
int iterations = opt.iterations < 1 ? 1 : opt.iterations;
bool loop_forever;
char info[256];
@@ -274,22 +304,46 @@ test_plane_position_with_output(data_t *data, enum pipe pipe,
iterations, iterations > 1 ? "iterations" : "iteration");
}
- igt_info("Testing connector %s using pipe %s with %d planes %s with seed %d\n",
- igt_output_name(output), kmstest_pipe_name(pipe), n_planes,
- info, opt.seed);
-
test_init(data, pipe, n_planes);
test_grab_crc(data, output, pipe, &blue, tiling);
+ /*
+ * Find out how many planes are allowed simultaneously
+ */
+ do {
+ c++;
+ prepare_planes(data, pipe, &blue, tiling, c, output);
+ err = igt_display_try_commit2(&data->display, COMMIT_ATOMIC);
+
+ for_each_plane_on_pipe(&data->display, pipe, plane)
+ igt_plane_set_fb(plane, NULL);
+
+ for (int x = 0; x < c; x++)
+ igt_remove_fb(data->drm_fd, &data->fb[x]);
+ } while (!err && c < n_planes);
+
+ if(err)
+ c--;
+
+ igt_info("Testing connector %s using pipe %s with %d planes %s with seed %d\n",
+ igt_output_name(output), kmstest_pipe_name(pipe), c,
+ info, opt.seed);
+
i = 0;
while (i < iterations || loop_forever) {
- prepare_planes(data, pipe, &blue, tiling, n_planes, output);
+ prepare_planes(data, pipe, &blue, tiling, c, output);
igt_display_commit2(&data->display, COMMIT_ATOMIC);
igt_pipe_crc_get_current(data->display.drm_fd, data->pipe_crc, &crc);
+ for_each_plane_on_pipe(&data->display, pipe, plane)
+ igt_plane_set_fb(plane, NULL);
+
+ for (int x = 0; x < c; x++)
+ igt_remove_fb(data->drm_fd, &data->fb[x]);
+
igt_assert_crc_equal(&data->ref_crc, &crc);
i++;
@@ -346,8 +400,8 @@ static int opt_handler(int option, int option_index, void *input)
opt.iterations = strtol(optarg, NULL, 0);
if (opt.iterations < LOOP_FOREVER || opt.iterations == 0) {
- igt_info("incorrect number of iterations\n");
- igt_assert(false);
+ igt_info("incorrect number of iterations: %d\n", opt.iterations);
+ return IGT_OPT_HANDLER_ERROR;
}
break;
@@ -356,28 +410,26 @@ static int opt_handler(int option, int option_index, void *input)
opt.seed = strtol(optarg, NULL, 0);
break;
default:
- igt_assert(false);
+ return IGT_OPT_HANDLER_ERROR;
}
- return 0;
+ return IGT_OPT_HANDLER_SUCCESS;
}
const char *help_str =
" --iterations Number of iterations for test coverage. -1 loop forever, default 64 iterations\n"
" --seed Seed for random number generator\n";
-int main(int argc, char *argv[])
+struct option long_options[] = {
+ { "iterations", required_argument, NULL, 'i'},
+ { "seed", required_argument, NULL, 's'},
+ { 0, 0, 0, 0 }
+};
+
+igt_main_args("", long_options, help_str, opt_handler, NULL)
{
- struct option long_options[] = {
- { "iterations", required_argument, NULL, 'i'},
- { "seed", required_argument, NULL, 's'},
- { 0, 0, 0, 0 }
- };
enum pipe pipe;
- igt_subtest_init_parse_opts(&argc, argv, "", long_options, help_str,
- opt_handler, NULL);
-
igt_skip_on_simulation();
igt_fixture {
@@ -396,6 +448,4 @@ int main(int argc, char *argv[])
igt_fixture {
igt_display_fini(&data.display);
}
-
- igt_exit();
}
diff --git a/tests/kms_plane_scaling.c b/tests/kms_plane_scaling.c
index 3d247130..0bd36904 100644
--- a/tests/kms_plane_scaling.c
+++ b/tests/kms_plane_scaling.c
@@ -201,6 +201,26 @@ static bool can_rotate(data_t *d, unsigned format, uint64_t tiling,
return true;
}
+static bool can_scale(data_t *d, unsigned format)
+{
+ if (!is_i915_device(d->drm_fd))
+ return true;
+
+ switch (format) {
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ if (intel_gen(d->devid) >= 11)
+ return true;
+ /* fall through */
+ case DRM_FORMAT_C8:
+ return false;
+ default:
+ return true;
+ }
+}
+
static void test_scaler_with_rotation_pipe(data_t *d, enum pipe pipe,
igt_output_t *output)
{
@@ -221,7 +241,8 @@ static void test_scaler_with_rotation_pipe(data_t *d, enum pipe pipe,
if (igt_fb_supported_format(format) &&
igt_plane_has_format_mod(plane, format, tiling) &&
- can_rotate(d, format, tiling, rot))
+ can_rotate(d, format, tiling, rot) &&
+ can_scale(d, format))
check_scaling_pipe_plane_rot(d, plane, format,
tiling, pipe,
output, rot);
@@ -255,7 +276,8 @@ static void test_scaler_with_pixel_format_pipe(data_t *d, enum pipe pipe, igt_ou
uint32_t format = plane->drm_plane->formats[j];
if (igt_fb_supported_format(format) &&
- igt_plane_has_format_mod(plane, format, tiling))
+ igt_plane_has_format_mod(plane, format, tiling) &&
+ can_scale(d, format))
check_scaling_pipe_plane_rot(d, plane,
format, tiling,
pipe, output, IGT_ROTATION_0);
@@ -490,13 +512,15 @@ test_scaler_with_clipping_clamping_scenario(data_t *d, enum pipe pipe, igt_outpu
for (int i = 0; i < d->plane1->drm_plane->count_formats; i++) {
unsigned f1 = d->plane1->drm_plane->formats[i];
- if (!igt_fb_supported_format(f1))
+ if (!igt_fb_supported_format(f1) ||
+ !can_scale(d, f1))
continue;
for (int j = 0; j < d->plane2->drm_plane->count_formats; j++) {
unsigned f2 = d->plane2->drm_plane->formats[j];
- if (!igt_fb_supported_format(f2))
+ if (!igt_fb_supported_format(f2) ||
+ !can_scale(d, f2))
continue;
__test_scaler_with_clipping_clamping_scenario(d, mode, f1, f2);
diff --git a/tests/kms_psr.c b/tests/kms_psr.c
index 3e16a6bf..39de0112 100644
--- a/tests/kms_psr.c
+++ b/tests/kms_psr.c
@@ -411,29 +411,28 @@ static int opt_handler(int opt, int opt_index, void *_data)
data->with_psr_disabled = true;
break;
default:
- igt_assert(0);
+ return IGT_OPT_HANDLER_ERROR;
}
- return 0;
+ return IGT_OPT_HANDLER_SUCCESS;
}
-int main(int argc, char *argv[])
+const char *help_str =
+ " --no-psr\tRun test without PSR/PSR2.";
+static struct option long_options[] = {
+ {"no-psr", 0, 0, 'n'},
+ { 0, 0, 0, 0 }
+};
+data_t data = {};
+
+igt_main_args("", long_options, help_str, opt_handler, &data)
{
- const char *help_str =
- " --no-psr\tRun test without PSR/PSR2.";
- static struct option long_options[] = {
- {"no-psr", 0, 0, 'n'},
- { 0, 0, 0, 0 }
- };
- data_t data = {};
enum operations op;
const char *append_subtest_name[2] = {
"",
"psr2_"
};
- igt_subtest_init_parse_opts(&argc, argv, "", long_options,
- help_str, opt_handler, &data);
igt_skip_on_simulation();
igt_fixture {
@@ -533,6 +532,4 @@ int main(int argc, char *argv[])
drm_intel_bufmgr_destroy(data.bufmgr);
display_fini(&data);
}
-
- igt_exit();
}
diff --git a/tests/kms_psr2_su.c b/tests/kms_psr2_su.c
index b9a57582..a9f675d1 100644
--- a/tests/kms_psr2_su.c
+++ b/tests/kms_psr2_su.c
@@ -228,12 +228,10 @@ static void cleanup(data_t *data)
igt_remove_fb(data->drm_fd, &data->fb[0]);
}
-int main(int argc, char *argv[])
+igt_main
{
data_t data = {};
- igt_subtest_init_parse_opts(&argc, argv, "", NULL,
- NULL, NULL, NULL);
igt_skip_on_simulation();
igt_fixture {
@@ -287,6 +285,4 @@ int main(int argc, char *argv[])
drm_intel_bufmgr_destroy(data.bufmgr);
display_fini(&data);
}
-
- igt_exit();
}
diff --git a/tests/kms_rotation_crc.c b/tests/kms_rotation_crc.c
index fc995d07..fe404810 100644
--- a/tests/kms_rotation_crc.c
+++ b/tests/kms_rotation_crc.c
@@ -805,14 +805,13 @@ igt_main
data.pos_y = 0;
igt_subtest_f("bad-pixel-format") {
- /*
- * gen11 enables RGB565 rotation for 90/270 degrees.
- * DRM_FORMAT_C8 fmt need to be enabled for IGT if want to run
- * this test on gen11 and later.
- */
- igt_require(gen >= 9 && gen < 11);
+ /* gen11 enables RGB565 rotation for 90/270 degrees.
+ * so apart from this, any other gen11+ pixel format
+ * can be used which doesn't support 90/270 degree
+ * rotation */
+ igt_require(gen >= 9);
data.rotation = IGT_ROTATION_90;
- data.override_fmt = DRM_FORMAT_RGB565;
+ data.override_fmt = gen < 11 ? DRM_FORMAT_RGB565 : DRM_FORMAT_Y212;
test_plane_rotation(&data, DRM_PLANE_TYPE_PRIMARY, true);
}
data.override_fmt = 0;
diff --git a/tests/kms_selftest.c b/tests/kms_selftest.c
index f61ddd99..abc4bfe9 100644
--- a/tests/kms_selftest.c
+++ b/tests/kms_selftest.c
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
#include "igt.h"
#include "igt_kmod.h"
diff --git a/tests/kms_setmode.c b/tests/kms_setmode.c
index c40c723d..8ace587e 100644
--- a/tests/kms_setmode.c
+++ b/tests/kms_setmode.c
@@ -186,7 +186,7 @@ static void create_fb_for_crtc(struct crtc_config *crtc,
static void get_mode_for_crtc(struct crtc_config *crtc,
drmModeModeInfo *mode_ret)
{
- drmModeModeInfo mode;
+ drmModeModeInfo *mode;
int i;
/*
@@ -194,8 +194,8 @@ static void get_mode_for_crtc(struct crtc_config *crtc,
* connectors.
*/
for (i = 0; i < crtc->connector_count; i++) {
- mode = crtc->cconfs[i].default_mode;
- if (crtc_supports_mode(crtc, &mode))
+ mode = &crtc->cconfs[i].default_mode;
+ if (crtc_supports_mode(crtc, mode))
goto found;
}
@@ -204,19 +204,22 @@ static void get_mode_for_crtc(struct crtc_config *crtc,
* connectors.
*/
for (i = 0; i < crtc->cconfs[0].connector->count_modes; i++) {
- mode = crtc->cconfs[0].connector->modes[i];
- if (crtc_supports_mode(crtc, &mode))
+ mode = &crtc->cconfs[0].connector->modes[i];
+ if (crtc_supports_mode(crtc, mode))
goto found;
}
/*
- * If none is found then just pick the default mode of the first
- * connector and hope the other connectors can support it by scaling
- * etc.
+ * If none is found then just pick the default mode from all connectors
+ * with the smallest clock, hope the other connectors can support it by
+ * scaling etc.
*/
- mode = crtc->cconfs[0].default_mode;
+ mode = &crtc->cconfs[0].default_mode;
+ for (i = 1; i < crtc->connector_count; i++)
+ if (crtc->cconfs[i].default_mode.clock < mode->clock)
+ mode = &crtc->cconfs[i].default_mode;
found:
- *mode_ret = mode;
+ *mode_ret = *mode;
}
static int get_encoder_idx(drmModeRes *resources, drmModeEncoder *encoder)
@@ -821,13 +824,17 @@ static int opt_handler(int opt, int opt_index, void *data)
filter_test_id = atoi(optarg);
break;
default:
- igt_assert(0);
+ return IGT_OPT_HANDLER_ERROR;
}
- return 0;
+ return IGT_OPT_HANDLER_SUCCESS;
}
-int main(int argc, char **argv)
+const char *help_str =
+ " -d\t\tDon't run any test, only print what would be done. (still needs DRM access)\n"
+ " -t <test id>\tRun only the test with this id.";
+
+igt_main_args("dt:", NULL, help_str, opt_handler, NULL)
{
const struct {
enum test_flags flags;
@@ -845,16 +852,7 @@ int main(int argc, char **argv)
{ TEST_INVALID | TEST_CLONE | TEST_SINGLE_CRTC_CLONE | TEST_STEALING,
"invalid-clone-single-crtc-stealing" }
};
- const char *help_str =
- " -d\t\tDon't run any test, only print what would be done. (still needs DRM access)\n"
- " -t <test id>\tRun only the test with this id.";
int i;
- int ret;
-
- ret = igt_subtest_init_parse_opts(&argc, argv, "dt:", NULL, help_str,
- opt_handler, NULL);
- if (ret < 0)
- return ret == -1 ? 0 : ret;
igt_skip_on_simulation();
@@ -886,6 +884,4 @@ int main(int argc, char **argv)
close(drm_fd);
}
-
- igt_exit();
}
diff --git a/tests/kms_tv_load_detect.c b/tests/kms_tv_load_detect.c
index 012d0629..89f587d5 100644
--- a/tests/kms_tv_load_detect.c
+++ b/tests/kms_tv_load_detect.c
@@ -26,15 +26,13 @@
IGT_TEST_DESCRIPTION("Check tv load detection works correctly.");
-int main(int argc, char **argv)
+igt_main
{
/* force the VGA output and test that it worked */
int drm_fd = 0;
drmModeRes *res;
drmModeConnector *tv_connector = NULL, *temp;
- igt_subtest_init(argc, argv);
-
igt_fixture {
drm_fd = drm_open_driver_master(DRIVER_INTEL);
@@ -87,6 +85,4 @@ int main(int argc, char **argv)
drmModeFreeConnector(tv_connector);
close(drm_fd);
}
-
- igt_exit();
}
diff --git a/tests/meson.build b/tests/meson.build
index 711979b4..f168fbba 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -14,6 +14,7 @@ test_progs = [
'kms_atomic_interruptible',
'kms_atomic_transition',
'kms_available_modes_crc',
+ 'kms_big_fb',
'kms_busy',
'kms_ccs',
'kms_color',
@@ -63,6 +64,10 @@ test_progs = [
'kms_vblank',
'kms_vrr',
'meta_test',
+ 'panfrost_get_param',
+ 'panfrost_gem_new',
+ 'panfrost_prime',
+ 'panfrost_submit',
'perf',
'prime_busy',
'prime_mmap',
@@ -109,10 +114,13 @@ i915_progs = [
'gem_cs_prefetch',
'gem_cs_tlb',
'gem_ctx_bad_destroy',
+ 'gem_ctx_clone',
'gem_ctx_create',
+ 'gem_ctx_engines',
'gem_ctx_exec',
'gem_ctx_isolation',
'gem_ctx_param',
+ 'gem_ctx_shared',
'gem_ctx_switch',
'gem_ctx_thrash',
'gem_double_irq_loop',
@@ -210,6 +218,7 @@ i915_progs = [
'gem_unfence_active_buffers',
'gem_unref_active_buffers',
'gem_userptr_blits',
+ 'gem_vm_create',
'gem_wait',
'gem_workarounds',
'gem_write_read_ring_switch',
@@ -239,7 +248,7 @@ if libdrm_nouveau.found()
test_deps += libdrm_nouveau
endif
-if chamelium_found
+if chamelium.found()
test_progs += [
'kms_chamelium',
]
@@ -284,6 +293,13 @@ test_executables += executable('gem_eio',
install : true)
test_list += 'gem_eio'
+test_executables += executable('gem_exec_balancer', 'i915/gem_exec_balancer.c',
+ dependencies : test_deps + [ lib_igt_perf ],
+ install_dir : libexecdir,
+ install_rpath : libexecdir_rpathdir,
+ install : true)
+test_list += 'gem_exec_balancer'
+
test_executables += executable('gem_mocs_settings',
join_paths('i915', 'gem_mocs_settings.c'),
dependencies : test_deps + [ lib_igt_perf ],
diff --git a/tests/panfrost_gem_new.c b/tests/panfrost_gem_new.c
new file mode 100644
index 00000000..940525ff
--- /dev/null
+++ b/tests/panfrost_gem_new.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright © 2016 Broadcom
+ * Copyright © 2019 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "igt.h"
+#include "igt_panfrost.h"
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include "panfrost_drm.h"
+
+igt_main
+{
+ int fd;
+
+ igt_fixture {
+ fd = drm_open_driver(DRIVER_PANFROST);
+ }
+
+ igt_subtest("gem-new-4096") {
+ struct panfrost_bo *bo = igt_panfrost_gem_new(fd, 4096);
+ igt_panfrost_free_bo(fd, bo);
+ }
+
+ igt_subtest("gem-new-0") {
+ struct drm_panfrost_create_bo arg = {
+ .size = 0,
+ };
+ do_ioctl_err(fd, DRM_IOCTL_PANFROST_CREATE_BO, &arg, EINVAL);
+ }
+
+ igt_subtest("gem-new-zeroed") {
+ int fd2 = drm_open_driver(DRIVER_PANFROST);
+ struct panfrost_bo *bo;
+ uint32_t *map;
+ /* A size different from any used in our other tests, to try
+ * to convince it to land as the only one of its size in the
+ * kernel BO cache
+ */
+ size_t size = 3 * 4096, i;
+
+ /* Make a BO and free it on our main fd. */
+ bo = igt_panfrost_gem_new(fd, size);
+ map = igt_panfrost_mmap_bo(fd, bo->handle, size, PROT_READ | PROT_WRITE);
+ memset(map, 0xd0, size);
+ munmap(map, size);
+ igt_panfrost_free_bo(fd, bo);
+
+ /* Now, allocate a BO on the other fd and make sure it doesn't
+ * have the old contents.
+ */
+ bo = igt_panfrost_gem_new(fd2, size);
+ map = igt_panfrost_mmap_bo(fd2, bo->handle, size, PROT_READ | PROT_WRITE);
+ for (i = 0; i < size / 4; i++)
+ igt_assert_eq_u32(map[i], 0x0);
+ munmap(map, size);
+ igt_panfrost_free_bo(fd2, bo);
+
+ close(fd2);
+ }
+
+ igt_fixture
+ close(fd);
+}
diff --git a/tests/panfrost_get_param.c b/tests/panfrost_get_param.c
new file mode 100644
index 00000000..11c2632b
--- /dev/null
+++ b/tests/panfrost_get_param.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright © 2017 Broadcom
+ * Copyright © 2019 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "igt.h"
+#include "igt_panfrost.h"
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <poll.h>
+#include "panfrost_drm.h"
+
+igt_main
+{
+ int fd;
+
+ igt_fixture
+ fd = drm_open_driver(DRIVER_PANFROST);
+
+ igt_subtest("base-params") {
+ int last_base_param = DRM_PANFROST_PARAM_GPU_PROD_ID;
+ uint32_t results[last_base_param + 1];
+
+ for (int i = 0; i < ARRAY_SIZE(results); i++)
+ results[i] = igt_panfrost_get_param(fd, i);
+
+ igt_assert(results[DRM_PANFROST_PARAM_GPU_PROD_ID]);
+ }
+
+ igt_subtest("get-bad-param") {
+ struct drm_panfrost_get_param get = {
+ .param = 0xd0d0d0d0,
+ };
+ do_ioctl_err(fd, DRM_IOCTL_PANFROST_GET_PARAM, &get, EINVAL);
+ }
+
+ igt_subtest("get-bad-padding") {
+ struct drm_panfrost_get_param get = {
+ .param = DRM_PANFROST_PARAM_GPU_PROD_ID,
+ .pad = 1,
+ };
+ do_ioctl_err(fd, DRM_IOCTL_PANFROST_GET_PARAM, &get, EINVAL);
+ }
+
+ igt_fixture
+ close(fd);
+}
diff --git a/tests/panfrost_prime.c b/tests/panfrost_prime.c
new file mode 100644
index 00000000..351d46f2
--- /dev/null
+++ b/tests/panfrost_prime.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright © 2016 Broadcom
+ * Copyright © 2019 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "igt.h"
+#include "igt_panfrost.h"
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include "panfrost_drm.h"
+
+igt_main
+{
+ int fd, kms_fd;
+
+ igt_fixture {
+ kms_fd = drm_open_driver_master(DRIVER_ANY);
+ fd = drm_open_driver(DRIVER_PANFROST);
+ }
+
+ igt_subtest("gem-prime-import") {
+ struct panfrost_bo *bo;
+ uint32_t handle, dumb_handle;
+ struct drm_panfrost_get_bo_offset get_bo_offset = {0,};
+ int dmabuf_fd;
+
+ /* Just to be sure that when we import the dumb buffer it has
+ * a non-NULL address.
+ */
+ bo = igt_panfrost_gem_new(fd, 1024);
+
+ dumb_handle = kmstest_dumb_create(kms_fd, 1024, 1024, 32, NULL, NULL);
+
+ dmabuf_fd = prime_handle_to_fd(kms_fd, dumb_handle);
+
+ handle = prime_fd_to_handle(fd, dmabuf_fd);
+
+ get_bo_offset.handle = handle;
+ do_ioctl(fd, DRM_IOCTL_PANFROST_GET_BO_OFFSET, &get_bo_offset);
+ igt_assert(get_bo_offset.offset);
+
+ gem_close(fd, handle);
+
+ kmstest_dumb_destroy(kms_fd, dumb_handle);
+
+ igt_panfrost_free_bo(fd, bo);
+ }
+
+ igt_fixture {
+ close(fd);
+ close(kms_fd);
+ }
+}
diff --git a/tests/panfrost_submit.c b/tests/panfrost_submit.c
new file mode 100644
index 00000000..13ce85b7
--- /dev/null
+++ b/tests/panfrost_submit.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright © 2016 Broadcom
+ * Copyright © 2019 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "igt.h"
+#include "igt_panfrost.h"
+#include "igt_syncobj.h"
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include "panfrost-job.h"
+#include "panfrost_drm.h"
+
+#define WIDTH 1920
+#define HEIGHT 1080
+#define CLEAR_COLOR 0xff7f7f7f
+
+/* One tenth of a second */
+#define SHORT_TIME_NSEC 100000000ull
+
+/* Add the time that the bad job takes to timeout (sched->timeout) and the time that a reset can take */
+#define BAD_JOB_TIME_NSEC (SHORT_TIME_NSEC + 500000000ull + 100000000ull)
+
+#define NSECS_PER_SEC 1000000000ull
+
+static uint64_t
+abs_timeout(uint64_t duration)
+{
+ struct timespec current;
+ clock_gettime(CLOCK_MONOTONIC, &current);
+ return (uint64_t)current.tv_sec * NSECS_PER_SEC + current.tv_nsec + duration;
+}
+
+static void check_error(int fd, struct panfrost_submit *submit)
+{
+ struct mali_job_descriptor_header *header;
+
+ header = submit->submit_bo->map;
+ igt_assert_eq_u64(header->fault_pointer, 0);
+}
+
+static void check_fb(int fd, struct panfrost_bo *bo)
+{
+ int gpu_prod_id = igt_panfrost_get_param(fd, DRM_PANFROST_PARAM_GPU_PROD_ID);
+ __uint32_t *fbo;
+ int i;
+
+ fbo = bo->map;
+
+ if (gpu_prod_id >= 0x0750) {
+ for (i = 0; i < ALIGN(WIDTH, 16) * HEIGHT; i++)
+ igt_assert_eq_u32(fbo[i], CLEAR_COLOR);
+ } else {
+ // Mask the alpha away because on <=T720 we don't know how to have it
+ for (i = 0; i < ALIGN(WIDTH, 16) * HEIGHT; i++)
+ igt_assert_eq_u32(fbo[i], CLEAR_COLOR & 0x00ffffff);
+ }
+}
+
+igt_main
+{
+ int fd;
+
+ igt_fixture {
+ fd = drm_open_driver(DRIVER_PANFROST);
+ }
+
+ igt_subtest("pan-submit") {
+ struct panfrost_submit *submit;
+
+ submit = igt_panfrost_trivial_job(fd, false, WIDTH, HEIGHT,
+ CLEAR_COLOR);
+
+ igt_panfrost_bo_mmap(fd, submit->fbo);
+ do_ioctl(fd, DRM_IOCTL_PANFROST_SUBMIT, submit->args);
+ igt_assert(syncobj_wait(fd, &submit->args->out_sync, 1,
+ abs_timeout(SHORT_TIME_NSEC), 0, NULL));
+ check_error(fd, submit);
+ check_fb(fd, submit->fbo);
+ igt_panfrost_free_job(fd, submit);
+ }
+
+ igt_subtest("pan-submit-error-no-jc") {
+ struct drm_panfrost_submit submit = {.jc = 0,};
+ do_ioctl_err(fd, DRM_IOCTL_PANFROST_SUBMIT, &submit, EINVAL);
+ }
+
+ igt_subtest("pan-submit-error-bad-in-syncs") {
+ struct panfrost_submit *submit;
+
+ submit = igt_panfrost_trivial_job(fd, false, WIDTH, HEIGHT,
+ CLEAR_COLOR);
+ submit->args->in_syncs = 0ULL;
+ submit->args->in_sync_count = 1;
+
+ do_ioctl_err(fd, DRM_IOCTL_PANFROST_SUBMIT, submit->args, EFAULT);
+ }
+
+ igt_subtest("pan-submit-error-bad-bo-handles") {
+ struct panfrost_submit *submit;
+
+ submit = igt_panfrost_trivial_job(fd, false, WIDTH, HEIGHT,
+ CLEAR_COLOR);
+ submit->args->bo_handles = 0ULL;
+ submit->args->bo_handle_count = 1;
+
+ do_ioctl_err(fd, DRM_IOCTL_PANFROST_SUBMIT, submit->args, EFAULT);
+ }
+
+ igt_subtest("pan-submit-error-bad-requirements") {
+ struct panfrost_submit *submit;
+
+ submit = igt_panfrost_trivial_job(fd, false, WIDTH, HEIGHT,
+ CLEAR_COLOR);
+ submit->args->requirements = 2;
+
+ do_ioctl_err(fd, DRM_IOCTL_PANFROST_SUBMIT, submit->args, EINVAL);
+ }
+
+ igt_subtest("pan-submit-error-bad-out-sync") {
+ struct panfrost_submit *submit;
+
+ submit = igt_panfrost_trivial_job(fd, false, WIDTH, HEIGHT,
+ CLEAR_COLOR);
+ submit->args->out_sync = -1;
+
+ do_ioctl_err(fd, DRM_IOCTL_PANFROST_SUBMIT, submit->args, ENODEV);
+ }
+
+ igt_subtest("pan-reset") {
+ struct panfrost_submit *submit;
+
+ submit = igt_panfrost_trivial_job(fd, true, WIDTH, HEIGHT,
+ CLEAR_COLOR);
+ do_ioctl(fd, DRM_IOCTL_PANFROST_SUBMIT, submit->args);
+ /* Expect for this job to timeout */
+ igt_assert(!syncobj_wait(fd, &submit->args->out_sync, 1,
+ abs_timeout(SHORT_TIME_NSEC), 0, NULL));
+ igt_panfrost_free_job(fd, submit);
+
+ submit = igt_panfrost_trivial_job(fd, false, WIDTH, HEIGHT,
+ CLEAR_COLOR);
+ igt_panfrost_bo_mmap(fd, submit->fbo);
+ do_ioctl(fd, DRM_IOCTL_PANFROST_SUBMIT, submit->args);
+ /* This one should work */
+ igt_assert(syncobj_wait(fd, &submit->args->out_sync, 1,
+ abs_timeout(BAD_JOB_TIME_NSEC), 0, NULL));
+ check_fb(fd, submit->fbo);
+ igt_panfrost_free_job(fd, submit);
+ }
+
+ igt_fixture {
+ close(fd);
+ }
+}
diff --git a/tests/perf.c b/tests/perf.c
index f2c0aeb8..5ad8b2db 100644
--- a/tests/perf.c
+++ b/tests/perf.c
@@ -3670,7 +3670,7 @@ test_invalid_create_userspace_config(void)
igt_assert_eq(__i915_perf_add_config(drm_fd, &config), -EINVAL);
/* invalid mux_regs */
- strncpy(config.uuid, uuid, sizeof(config.uuid));
+ memcpy(config.uuid, uuid, sizeof(config.uuid));
config.n_mux_regs = 1;
config.mux_regs_ptr = to_user_pointer(invalid_mux_regs);
config.n_boolean_regs = 0;
@@ -3679,7 +3679,7 @@ test_invalid_create_userspace_config(void)
igt_assert_eq(__i915_perf_add_config(drm_fd, &config), -EINVAL);
/* empty config */
- strncpy(config.uuid, uuid, sizeof(config.uuid));
+ memcpy(config.uuid, uuid, sizeof(config.uuid));
config.n_mux_regs = 0;
config.mux_regs_ptr = to_user_pointer(mux_regs);
config.n_boolean_regs = 0;
@@ -3688,7 +3688,7 @@ test_invalid_create_userspace_config(void)
igt_assert_eq(__i915_perf_add_config(drm_fd, &config), -EINVAL);
/* empty config with null pointers */
- strncpy(config.uuid, uuid, sizeof(config.uuid));
+ memcpy(config.uuid, uuid, sizeof(config.uuid));
config.n_mux_regs = 1;
config.mux_regs_ptr = to_user_pointer(NULL);
config.n_boolean_regs = 2;
@@ -3699,7 +3699,7 @@ test_invalid_create_userspace_config(void)
igt_assert_eq(__i915_perf_add_config(drm_fd, &config), -EINVAL);
/* invalid pointers */
- strncpy(config.uuid, uuid, sizeof(config.uuid));
+ memcpy(config.uuid, uuid, sizeof(config.uuid));
config.n_mux_regs = 42;
config.mux_regs_ptr = to_user_pointer((void *) 0xDEADBEEF);
config.n_boolean_regs = 0;
@@ -3786,7 +3786,7 @@ test_create_destroy_userspace_config(void)
i915_perf_remove_config(drm_fd, config_id);
memset(&config, 0, sizeof(config));
- strncpy(config.uuid, uuid, sizeof(config.uuid));
+ memcpy(config.uuid, uuid, sizeof(config.uuid));
config.n_mux_regs = 1;
config.mux_regs_ptr = to_user_pointer(mux_regs);
diff --git a/tests/perf_pmu.c b/tests/perf_pmu.c
index e719a292..72b9166a 100644
--- a/tests/perf_pmu.c
+++ b/tests/perf_pmu.c
@@ -82,7 +82,7 @@ init(int gem_fd, const struct intel_execution_engine2 *e, uint8_t sample)
if (fd < 0)
err = errno;
- exists = gem_has_engine(gem_fd, e->class, e->instance);
+ exists = gem_context_has_engine(gem_fd, 0, e->flags);
if (intel_gen(intel_get_drm_devid(gem_fd)) < 6 &&
sample == I915_SAMPLE_SEMA)
exists = false;
@@ -158,11 +158,6 @@ static unsigned int measured_usleep(unsigned int usec)
return igt_nsec_elapsed(&ts);
}
-static unsigned int e2ring(int gem_fd, const struct intel_execution_engine2 *e)
-{
- return gem_class_instance_to_eb_flags(gem_fd, e->class, e->instance);
-}
-
#define TEST_BUSY (1)
#define FLAG_SYNC (2)
#define TEST_TRAILING_IDLE (4)
@@ -170,14 +165,15 @@ static unsigned int e2ring(int gem_fd, const struct intel_execution_engine2 *e)
#define FLAG_LONG (16)
#define FLAG_HANG (32)
-static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
+static igt_spin_t * __spin_poll(int fd, uint32_t ctx,
+ const struct intel_execution_engine2 *e)
{
struct igt_spin_factory opts = {
.ctx = ctx,
- .engine = flags,
+ .engine = e->flags,
};
- if (gem_can_store_dword(fd, flags))
+ if (gem_class_can_store_dword(fd, e->class))
opts.flags |= IGT_SPIN_POLL_RUN;
return __igt_spin_factory(fd, &opts);
@@ -209,20 +205,34 @@ static unsigned long __spin_wait(int fd, igt_spin_t *spin)
return igt_nsec_elapsed(&start);
}
-static igt_spin_t * __spin_sync(int fd, uint32_t ctx, unsigned long flags)
+static igt_spin_t * __spin_sync(int fd, uint32_t ctx,
+ const struct intel_execution_engine2 *e)
{
- igt_spin_t *spin = __spin_poll(fd, ctx, flags);
+ igt_spin_t *spin = __spin_poll(fd, ctx, e);
__spin_wait(fd, spin);
return spin;
}
-static igt_spin_t * spin_sync(int fd, uint32_t ctx, unsigned long flags)
+static igt_spin_t * spin_sync(int fd, uint32_t ctx,
+ const struct intel_execution_engine2 *e)
{
igt_require_gem(fd);
- return __spin_sync(fd, ctx, flags);
+ return __spin_sync(fd, ctx, e);
+}
+
+static igt_spin_t * spin_sync_flags(int fd, uint32_t ctx, unsigned int flags)
+{
+ struct intel_execution_engine2 e = { };
+
+ e.class = gem_execbuf_flags_to_engine_class(flags);
+ e.instance = (flags & (I915_EXEC_BSD_MASK | I915_EXEC_RING_MASK)) ==
+ (I915_EXEC_BSD | I915_EXEC_BSD_RING2) ? 1 : 0;
+ e.flags = flags;
+
+ return spin_sync(fd, ctx, &e);
}
static void end_spin(int fd, igt_spin_t *spin, unsigned int flags)
@@ -267,7 +277,7 @@ single(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
fd = open_pmu(I915_PMU_ENGINE_BUSY(e->class, e->instance));
if (flags & TEST_BUSY)
- spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+ spin = spin_sync(gem_fd, 0, e);
else
spin = NULL;
@@ -316,7 +326,7 @@ busy_start(int gem_fd, const struct intel_execution_engine2 *e)
*/
sleep(2);
- spin = __spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+ spin = __spin_sync(gem_fd, 0, e);
fd = open_pmu(I915_PMU_ENGINE_BUSY(e->class, e->instance));
@@ -347,6 +357,7 @@ busy_double_start(int gem_fd, const struct intel_execution_engine2 *e)
int fd;
ctx = gem_context_create(gem_fd);
+ gem_context_set_all_engines(gem_fd, ctx);
/*
* Defeat the busy stats delayed disable, we need to guarantee we are
@@ -359,11 +370,11 @@ busy_double_start(int gem_fd, const struct intel_execution_engine2 *e)
* re-submission in execlists mode. Make sure busyness is correctly
* reported with the engine busy, and after the engine went idle.
*/
- spin[0] = __spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+ spin[0] = __spin_sync(gem_fd, 0, e);
usleep(500e3);
spin[1] = __igt_spin_new(gem_fd,
.ctx = ctx,
- .engine = e2ring(gem_fd, e));
+ .engine = e->flags);
/*
* Open PMU as fast as possible after the second spin batch in attempt
@@ -424,7 +435,7 @@ static void
busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
const unsigned int num_engines, unsigned int flags)
{
- const struct intel_execution_engine2 *e_;
+ struct intel_execution_engine2 *e_;
uint64_t tval[2][num_engines];
unsigned int busy_idx = 0, i;
uint64_t val[num_engines];
@@ -434,8 +445,8 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
i = 0;
fd[0] = -1;
- for_each_engine_class_instance(gem_fd, e_) {
- if (e == e_)
+ __for_each_physical_engine(gem_fd, e_) {
+ if (e->class == e_->class && e->instance == e_->instance)
busy_idx = i;
fd[i++] = open_group(I915_PMU_ENGINE_BUSY(e_->class,
@@ -445,7 +456,7 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
igt_assert_eq(i, num_engines);
- spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+ spin = spin_sync(gem_fd, 0, e);
pmu_read_multi(fd[0], num_engines, tval[0]);
slept = measured_usleep(batch_duration_ns / 1000);
if (flags & TEST_TRAILING_IDLE)
@@ -478,7 +489,7 @@ __submit_spin(int gem_fd, igt_spin_t *spin,
struct drm_i915_gem_execbuffer2 eb = spin->execbuf;
eb.flags &= ~(0x3f | I915_EXEC_BSD_MASK);
- eb.flags |= e2ring(gem_fd, e) | I915_EXEC_NO_RELOC;
+ eb.flags |= e->flags | I915_EXEC_NO_RELOC;
eb.batch_start_offset += offset;
gem_execbuf(gem_fd, &eb);
@@ -488,7 +499,7 @@ static void
most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
const unsigned int num_engines, unsigned int flags)
{
- const struct intel_execution_engine2 *e_;
+ struct intel_execution_engine2 *e_;
uint64_t tval[2][num_engines];
uint64_t val[num_engines];
int fd[num_engines];
@@ -497,13 +508,13 @@ most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
unsigned int idle_idx, i;
i = 0;
- for_each_engine_class_instance(gem_fd, e_) {
- if (e == e_)
+ __for_each_physical_engine(gem_fd, e_) {
+ if (e->class == e_->class && e->instance == e_->instance)
idle_idx = i;
else if (spin)
__submit_spin(gem_fd, spin, e_, 64);
else
- spin = __spin_poll(gem_fd, 0, e2ring(gem_fd, e_));
+ spin = __spin_poll(gem_fd, 0, e_);
val[i++] = I915_PMU_ENGINE_BUSY(e_->class, e_->instance);
}
@@ -545,7 +556,7 @@ static void
all_busy_check_all(int gem_fd, const unsigned int num_engines,
unsigned int flags)
{
- const struct intel_execution_engine2 *e;
+ struct intel_execution_engine2 *e;
uint64_t tval[2][num_engines];
uint64_t val[num_engines];
int fd[num_engines];
@@ -554,11 +565,11 @@ all_busy_check_all(int gem_fd, const unsigned int num_engines,
unsigned int i;
i = 0;
- for_each_engine_class_instance(gem_fd, e) {
+ __for_each_physical_engine(gem_fd, e) {
if (spin)
__submit_spin(gem_fd, spin, e, 64);
else
- spin = __spin_poll(gem_fd, 0, e2ring(gem_fd, e));
+ spin = __spin_poll(gem_fd, 0, e);
val[i++] = I915_PMU_ENGINE_BUSY(e->class, e->instance);
}
@@ -602,7 +613,7 @@ no_sema(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
open_group(I915_PMU_ENGINE_WAIT(e->class, e->instance), fd);
if (flags & TEST_BUSY)
- spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+ spin = spin_sync(gem_fd, 0, e);
else
spin = NULL;
@@ -689,7 +700,7 @@ sema_wait(int gem_fd, const struct intel_execution_engine2 *e,
eb.buffer_count = 2;
eb.buffers_ptr = to_user_pointer(obj);
- eb.flags = e2ring(gem_fd, e);
+ eb.flags = e->flags;
/**
* Start the semaphore wait PMU and after some known time let the above
@@ -845,7 +856,7 @@ event_wait(int gem_fd, const struct intel_execution_engine2 *e)
eb.buffer_count = 1;
eb.buffers_ptr = to_user_pointer(&obj);
- eb.flags = e2ring(gem_fd, e) | I915_EXEC_SECURE;
+ eb.flags = e->flags | I915_EXEC_SECURE;
for_each_pipe_with_valid_output(&data.display, p, output) {
struct igt_helper_process waiter = { };
@@ -936,7 +947,7 @@ multi_client(int gem_fd, const struct intel_execution_engine2 *e)
*/
fd[1] = open_pmu(config);
- spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+ spin = spin_sync(gem_fd, 0, e);
val[0] = val[1] = __pmu_read_single(fd[0], &ts[0]);
slept[1] = measured_usleep(batch_duration_ns / 1000);
@@ -1052,8 +1063,8 @@ static void cpu_hotplug(int gem_fd)
* Create two spinners so test can ensure shorter gaps in engine
* busyness as it is terminating one and re-starting the other.
*/
- spin[0] = igt_spin_new(gem_fd, .engine = I915_EXEC_RENDER);
- spin[1] = __igt_spin_new(gem_fd, .engine = I915_EXEC_RENDER);
+ spin[0] = igt_spin_new(gem_fd, .engine = I915_EXEC_DEFAULT);
+ spin[1] = __igt_spin_new(gem_fd, .engine = I915_EXEC_DEFAULT);
val = __pmu_read_single(fd, &ts[0]);
@@ -1137,7 +1148,7 @@ static void cpu_hotplug(int gem_fd)
igt_spin_free(gem_fd, spin[cur]);
spin[cur] = __igt_spin_new(gem_fd,
- .engine = I915_EXEC_RENDER);
+ .engine = I915_EXEC_DEFAULT);
cur ^= 1;
}
@@ -1175,7 +1186,7 @@ test_interrupts(int gem_fd)
/* Queue spinning batches. */
for (int i = 0; i < target; i++) {
spin[i] = __igt_spin_new(gem_fd,
- .engine = I915_EXEC_RENDER,
+ .engine = I915_EXEC_DEFAULT,
.flags = IGT_SPIN_FENCE_OUT);
if (i == 0) {
fence_fd = spin[i]->out_fence;
@@ -1301,7 +1312,7 @@ test_frequency(int gem_fd)
igt_require(igt_sysfs_get_u32(sysfs, "gt_boost_freq_mhz") == min_freq);
gem_quiescent_gpu(gem_fd); /* Idle to be sure the change takes effect */
- spin = spin_sync(gem_fd, 0, I915_EXEC_RENDER);
+ spin = spin_sync_flags(gem_fd, 0, I915_EXEC_DEFAULT);
slept = pmu_read_multi(fd, 2, start);
measured_usleep(batch_duration_ns / 1000);
@@ -1327,7 +1338,7 @@ test_frequency(int gem_fd)
igt_require(igt_sysfs_get_u32(sysfs, "gt_min_freq_mhz") == max_freq);
gem_quiescent_gpu(gem_fd);
- spin = spin_sync(gem_fd, 0, I915_EXEC_RENDER);
+ spin = spin_sync_flags(gem_fd, 0, I915_EXEC_DEFAULT);
slept = pmu_read_multi(fd, 2, start);
measured_usleep(batch_duration_ns / 1000);
@@ -1458,14 +1469,14 @@ test_enable_race(int gem_fd, const struct intel_execution_engine2 *e)
int fd;
igt_require(gem_has_execlists(gem_fd));
- igt_require(gem_has_engine(gem_fd, e->class, e->instance));
+ igt_require(gem_context_has_engine(gem_fd, 0, e->flags));
obj.handle = gem_create(gem_fd, 4096);
gem_write(gem_fd, obj.handle, 0, &bbend, sizeof(bbend));
eb.buffer_count = 1;
eb.buffers_ptr = to_user_pointer(&obj);
- eb.flags = e2ring(gem_fd, e);
+ eb.flags = e->flags;
/*
* This test is probabilistic so run in a few times to increase the
@@ -1562,7 +1573,7 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
igt_spin_t *spin;
/* Allocate our spin batch and idle it. */
- spin = igt_spin_new(gem_fd, .engine = e2ring(gem_fd, e));
+ spin = igt_spin_new(gem_fd, .engine = e->flags);
igt_spin_end(spin);
gem_sync(gem_fd, spin->handle);
@@ -1666,7 +1677,7 @@ igt_main
I915_PMU_LAST - __I915_PMU_OTHER(0) + 1;
unsigned int num_engines = 0;
int fd = -1;
- const struct intel_execution_engine2 *e;
+ struct intel_execution_engine2 *e;
unsigned int i;
igt_fixture {
@@ -1675,7 +1686,7 @@ igt_main
igt_require_gem(fd);
igt_require(i915_type_id() > 0);
- for_each_engine_class_instance(fd, e)
+ __for_each_physical_engine(fd, e)
num_engines++;
}
@@ -1685,7 +1696,7 @@ igt_main
igt_subtest("invalid-init")
invalid_init();
- __for_each_engine_class_instance(e) {
+ __for_each_physical_engine(fd, e) {
const unsigned int pct[] = { 2, 50, 98 };
/**
@@ -1703,7 +1714,7 @@ igt_main
igt_subtest_group {
igt_fixture {
- gem_require_engine(fd, e->class, e->instance);
+ gem_context_has_engine(fd, 0, e->flags);
}
/**
@@ -1889,12 +1900,11 @@ igt_main
gem_quiescent_gpu(fd);
}
- __for_each_engine_class_instance(e) {
+ __for_each_physical_engine(render_fd, e) {
igt_subtest_group {
igt_fixture {
- gem_require_engine(render_fd,
- e->class,
- e->instance);
+ gem_context_has_engine(render_fd,
+ 0, e->flags);
}
igt_subtest_f("render-node-busy-%s", e->name)
diff --git a/tests/prime_mmap_coherency.c b/tests/prime_mmap_coherency.c
index 04b15ddd..39538767 100644
--- a/tests/prime_mmap_coherency.c
+++ b/tests/prime_mmap_coherency.c
@@ -279,10 +279,8 @@ static void test_ioctl_errors(void)
}
}
-int main(int argc, char **argv)
+igt_main
{
- igt_subtest_init(argc, argv);
-
igt_fixture {
fd = drm_open_driver(DRIVER_INTEL);
igt_require_gem(fd);
@@ -321,6 +319,4 @@ int main(int argc, char **argv)
close(fd);
}
-
- igt_exit();
}
diff --git a/tests/prime_mmap_kms.c b/tests/prime_mmap_kms.c
index fdc37214..1c963e01 100644
--- a/tests/prime_mmap_kms.c
+++ b/tests/prime_mmap_kms.c
@@ -258,6 +258,4 @@ igt_main
igt_display_fini(&gpu.display);
close(gpu.drm_fd);
}
-
- igt_exit();
}
diff --git a/tests/prime_vgem.c b/tests/prime_vgem.c
index 94632452..69ae8c9b 100644
--- a/tests/prime_vgem.c
+++ b/tests/prime_vgem.c
@@ -741,16 +741,30 @@ static void flip_to_vgem(int i915, int vgem,
static void test_flip(int i915, int vgem, unsigned hang)
{
- struct vgem_bo bo[2];
+ drmModeModeInfo *mode = NULL;
uint32_t fb_id[2], handle[2], crtc_id;
+ igt_display_t display;
+ igt_output_t *output;
+ struct vgem_bo bo[2];
+ enum pipe pipe;
+
+ igt_display_require(&display, i915);
+ igt_display_require_output(&display);
+
+ for_each_pipe_with_valid_output(&display, pipe, output) {
+ mode = igt_output_get_mode(output);
+ break;
+ }
+
+ igt_assert(mode);
for (int i = 0; i < 2; i++) {
uint32_t strides[4] = {};
uint32_t offsets[4] = {};
int fd;
- bo[i].width = 1024;
- bo[i].height = 768;
+ bo[i].width = mode->hdisplay;
+ bo[i].height = mode->vdisplay;
bo[i].bpp = 32;
vgem_create(vgem, &bo[i]);
@@ -831,7 +845,7 @@ igt_main
e->exec_id == 0 ? "basic-" : "",
e->name) {
gem_require_ring(i915, e->exec_id | e->flags);
- igt_require(gem_can_store_dword(i915, e->exec_id) | e->flags);
+ igt_require(gem_can_store_dword(i915, e->exec_id | e->flags));
gem_quiescent_gpu(i915);
test_sync(i915, vgem, e->exec_id, e->flags);
@@ -843,7 +857,7 @@ igt_main
e->exec_id == 0 ? "basic-" : "",
e->name) {
gem_require_ring(i915, e->exec_id | e->flags);
- igt_require(gem_can_store_dword(i915, e->exec_id) | e->flags);
+ igt_require(gem_can_store_dword(i915, e->exec_id | e->flags));
gem_quiescent_gpu(i915);
test_busy(i915, vgem, e->exec_id, e->flags);
@@ -855,7 +869,7 @@ igt_main
e->exec_id == 0 ? "basic-" : "",
e->name) {
gem_require_ring(i915, e->exec_id | e->flags);
- igt_require(gem_can_store_dword(i915, e->exec_id) | e->flags);
+ igt_require(gem_can_store_dword(i915, e->exec_id | e->flags));
gem_quiescent_gpu(i915);
test_wait(i915, vgem, e->exec_id, e->flags);
@@ -878,7 +892,7 @@ igt_main
e->exec_id == 0 ? "basic-" : "",
e->name) {
gem_require_ring(i915, e->exec_id | e->flags);
- igt_require(gem_can_store_dword(i915, e->exec_id) | e->flags);
+ igt_require(gem_can_store_dword(i915, e->exec_id | e->flags));
gem_quiescent_gpu(i915);
test_fence_wait(i915, vgem, e->exec_id, e->flags);
diff --git a/tests/testdisplay.c b/tests/testdisplay.c
index 67d1b68a..32590547 100644
--- a/tests/testdisplay.c
+++ b/tests/testdisplay.c
@@ -69,10 +69,10 @@
#include <stdlib.h>
#include <signal.h>
-#define SUBTEST_OPTS 1
-#define HELP_DESCRIPTION 2
-#define Yb_OPT 3
-#define Yf_OPT 4
+enum {
+ OPT_YB,
+ OPT_YF,
+};
static int tio_fd;
struct termios saved_tio;
@@ -87,6 +87,7 @@ int do_dpms = 0; /* This aliases to DPMS_ON */
uint32_t depth = 24, stride, bpp;
int qr_code = 0;
int specified_mode_num = -1, specified_disp_id = -1;
+bool opt_dump_info = false;
drmModeModeInfo force_timing;
@@ -519,29 +520,6 @@ int update_display(bool probe)
return 1;
}
-static char optstr[] = "3hiaf:s:d:p:mrto:j:y";
-
-static void __attribute__((noreturn)) usage(char *name, char opt)
-{
- igt_info("usage: %s [-hiasdpmtf]\n", name);
- igt_info("\t-i\tdump info\n");
- igt_info("\t-a\ttest all modes\n");
- igt_info("\t-s\t<duration>\tsleep between each mode test (default: 0)\n");
- igt_info("\t-d\t<depth>\tbit depth of scanout buffer\n");
- igt_info("\t-p\t<planew,h>,<crtcx,y>,<crtcw,h> test overlay plane\n");
- igt_info("\t-m\ttest the preferred mode\n");
- igt_info("\t-3\ttest all 3D modes\n");
- igt_info("\t-t\tuse a tiled framebuffer\n");
- igt_info("\t-j\tdo dpms off, optional arg to select dpms leve (1-3)\n");
- igt_info("\t-r\tprint a QR code on the screen whose content is \"pass\" for the automatic test\n");
- igt_info("\t-o\t<id of the display>,<number of the mode>\tonly test specified mode on the specified display\n");
- igt_info("\t-f\t<clock MHz>,<hdisp>,<hsync-start>,<hsync-end>,<htotal>,\n");
- igt_info("\t\t<vdisp>,<vsync-start>,<vsync-end>,<vtotal>\n");
- igt_info("\t\ttest force mode\n");
- igt_info("\tDefault is to test all modes.\n");
- exit((opt != 'h') ? -1 : 0);
-}
-
#define dump_resource(res) if (res) dump_##res()
static void __attribute__((noreturn)) cleanup_and_exit(int ret)
@@ -564,17 +542,12 @@ static gboolean input_event(GIOChannel *source, GIOCondition condition,
return TRUE;
}
-static void enter_exec_path(const char **argv)
+static void enter_exec_path(void)
{
- char *argv0, *exec_path;
- int ret;
+ char path[PATH_MAX];
- argv0 = strdup(argv[0]);
- igt_assert(argv0);
- exec_path = dirname(argv0);
- ret = chdir(exec_path);
- igt_assert_eq(ret, 0);
- free(argv0);
+ if (readlink("/proc/self/exe", path, sizeof(path)) > 0)
+ chdir(dirname(path));
}
static void restore_termio_mode(int sig)
@@ -600,101 +573,108 @@ static void set_termio_mode(void)
tcsetattr(tio_fd, TCSANOW, &tio);
}
-int main(int argc, char **argv)
+static char optstr[] = "3iaf:s:d:p:mrto:j:y";
+static struct option long_opts[] = {
+ {"yb", 0, 0, OPT_YB},
+ {"yf", 0, 0, OPT_YF},
+ { 0, 0, 0, 0 }
+};
+
+static const char *help_str =
+ " -i\tdump info\n"
+ " -a\ttest all modes\n"
+ " -s\t<duration>\tsleep between each mode test (default: 0)\n"
+ " -d\t<depth>\tbit depth of scanout buffer\n"
+ " -p\t<planew,h>,<crtcx,y>,<crtcw,h> test overlay plane\n"
+ " -m\ttest the preferred mode\n"
+ " -3\ttest all 3D modes\n"
+ " -t\tuse an X-tiled framebuffer\n"
+ " -y, --yb\n"
+ " \tuse a Y-tiled framebuffer\n"
+ " --yf\tuse a Yf-tiled framebuffer\n"
+ " -j\tdo dpms off, optional arg to select dpms level (1-3)\n"
+ " -r\tprint a QR code on the screen whose content is \"pass\" for the automatic test\n"
+ " -o\t<id of the display>,<number of the mode>\tonly test specified mode on the specified display\n"
+ " -f\t<clock MHz>,<hdisp>,<hsync-start>,<hsync-end>,<htotal>,\n"
+ " \t<vdisp>,<vsync-start>,<vsync-end>,<vtotal>\n"
+ " \ttest force mode\n"
+ " \tDefault is to test all modes.\n"
+ ;
+
+static int opt_handler(int opt, int opt_index, void *data)
+{
+ float force_clock;
+
+ switch (opt) {
+ case '3':
+ test_stereo_modes = 1;
+ break;
+ case 'i':
+ opt_dump_info = true;
+ break;
+ case 'a':
+ test_all_modes = 1;
+ break;
+ case 'f':
+ force_mode = 1;
+ if (sscanf(optarg,"%f,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu",
+ &force_clock,&force_timing.hdisplay, &force_timing.hsync_start,&force_timing.hsync_end,&force_timing.htotal,
+ &force_timing.vdisplay, &force_timing.vsync_start, &force_timing.vsync_end, &force_timing.vtotal)!= 9)
+ return IGT_OPT_HANDLER_ERROR;
+ force_timing.clock = force_clock*1000;
+
+ break;
+ case 's':
+ sleep_between_modes = atoi(optarg);
+ break;
+ case 'j':
+ do_dpms = atoi(optarg);
+ if (do_dpms == 0)
+ do_dpms = DRM_MODE_DPMS_OFF;
+ break;
+ case 'd':
+ depth = atoi(optarg);
+ igt_info("using depth %d\n", depth);
+ break;
+ case 'p':
+ if (sscanf(optarg, "%d,%d,%d,%d,%d,%d", &plane_width,
+ &plane_height, &crtc_x, &crtc_y,
+ &crtc_w, &crtc_h) != 6)
+ return IGT_OPT_HANDLER_ERROR;
+ test_plane = 1;
+ break;
+ case 'm':
+ test_preferred_mode = 1;
+ break;
+ case 't':
+ tiling = LOCAL_I915_FORMAT_MOD_X_TILED;
+ break;
+ case 'y':
+ case OPT_YB:
+ tiling = LOCAL_I915_FORMAT_MOD_Y_TILED;
+ break;
+ case OPT_YF:
+ tiling = LOCAL_I915_FORMAT_MOD_Yf_TILED;
+ break;
+ case 'r':
+ qr_code = 1;
+ break;
+ case 'o':
+ sscanf(optarg, "%d,%d", &specified_disp_id, &specified_mode_num);
+ break;
+ }
+
+ return IGT_OPT_HANDLER_SUCCESS;
+}
+
+igt_simple_main_args(optstr, long_opts, help_str, opt_handler, NULL)
{
- int c;
int ret = 0;
GIOChannel *stdinchannel;
GMainLoop *mainloop;
- float force_clock;
- bool opt_dump_info = false;
- struct option long_opts[] = {
- {"list-subtests", 0, 0, SUBTEST_OPTS},
- {"run-subtest", 1, 0, SUBTEST_OPTS},
- {"help-description", 0, 0, HELP_DESCRIPTION},
- {"help", 0, 0, 'h'},
- {"yb", 0, 0, Yb_OPT},
- {"yf", 0, 0, Yf_OPT},
- { 0, 0, 0, 0 }
- };
-
igt_skip_on_simulation();
- enter_exec_path((const char **) argv);
-
- while ((c = getopt_long(argc, argv, optstr, long_opts, NULL)) != -1) {
- switch (c) {
- case '3':
- test_stereo_modes = 1;
- break;
- case 'i':
- opt_dump_info = true;
- break;
- case 'a':
- test_all_modes = 1;
- break;
- case 'f':
- force_mode = 1;
- if(sscanf(optarg,"%f,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu",
- &force_clock,&force_timing.hdisplay, &force_timing.hsync_start,&force_timing.hsync_end,&force_timing.htotal,
- &force_timing.vdisplay, &force_timing.vsync_start, &force_timing.vsync_end, &force_timing.vtotal)!= 9)
- usage(argv[0], c);
- force_timing.clock = force_clock*1000;
-
- break;
- case 's':
- sleep_between_modes = atoi(optarg);
- break;
- case 'j':
- do_dpms = atoi(optarg);
- if (do_dpms == 0)
- do_dpms = DRM_MODE_DPMS_OFF;
- break;
- case 'd':
- depth = atoi(optarg);
- igt_info("using depth %d\n", depth);
- break;
- case 'p':
- if (sscanf(optarg, "%d,%d,%d,%d,%d,%d", &plane_width,
- &plane_height, &crtc_x, &crtc_y,
- &crtc_w, &crtc_h) != 6)
- usage(argv[0], c);
- test_plane = 1;
- break;
- case 'm':
- test_preferred_mode = 1;
- break;
- case 't':
- tiling = LOCAL_I915_FORMAT_MOD_X_TILED;
- break;
- case 'y':
- case Yb_OPT:
- tiling = LOCAL_I915_FORMAT_MOD_Y_TILED;
- break;
- case Yf_OPT:
- tiling = LOCAL_I915_FORMAT_MOD_Yf_TILED;
- break;
- case 'r':
- qr_code = 1;
- break;
- case 'o':
- sscanf(optarg, "%d,%d", &specified_disp_id, &specified_mode_num);
- break;
- case SUBTEST_OPTS:
- /* invalid subtest options */
- exit(IGT_EXIT_INVALID);
- break;
- case HELP_DESCRIPTION:
- igt_info("Tests display functionality.");
- exit(0);
- break;
- default:
- /* fall through */
- case 'h':
- usage(argv[0], c);
- break;
- }
- }
+ enter_exec_path();
set_termio_mode();
@@ -771,6 +751,4 @@ out_close:
close(drm_fd);
igt_assert_eq(ret, 0);
-
- igt_exit();
}
diff --git a/tools/intel_reg.c b/tools/intel_reg.c
index 1247b70b..e517956b 100644
--- a/tools/intel_reg.c
+++ b/tools/intel_reg.c
@@ -329,9 +329,7 @@ static int register_srm(struct config *config, struct reg *reg,
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
- execbuf.flags = gem_class_instance_to_eb_flags(fd,
- engine->class,
- engine->instance);
+ execbuf.flags = engine->flags;
if (secure)
execbuf.flags |= I915_EXEC_SECURE;
diff --git a/tools/intel_vbt_decode.c b/tools/intel_vbt_decode.c
index d80b1dae..38eccc48 100644
--- a/tools/intel_vbt_decode.c
+++ b/tools/intel_vbt_decode.c
@@ -228,7 +228,7 @@ static void dump_backlight_info(struct context *context,
const struct bdb_block *block)
{
const struct bdb_lfp_backlight_data *backlight = block->data;
- const struct bdb_lfp_backlight_data_entry *blc;
+ const struct lfp_backlight_data_entry *blc;
if (sizeof(*blc) != backlight->entry_size) {
printf("\tBacklight struct sizes don't match (expected %zu, got %u), skipping\n",
@@ -629,8 +629,8 @@ static void dump_lvds_data(struct context *context,
const uint8_t *lfp_data_ptr =
(const uint8_t *) lvds_data->data + lfp_data_size * i;
const uint8_t *timing_data = lfp_data_ptr + dvo_offset;
- const struct bdb_lvds_lfp_data_entry *lfp_data =
- (const struct bdb_lvds_lfp_data_entry *)lfp_data_ptr;
+ const struct lvds_lfp_data_entry *lfp_data =
+ (const struct lvds_lfp_data_entry *)lfp_data_ptr;
char marker;
if (i != context->panel_type && !context->dump_all_panel_types)
diff --git a/tools/intel_vbt_defs.h b/tools/intel_vbt_defs.h
index 3f5eff49..89ef14ca 100644
--- a/tools/intel_vbt_defs.h
+++ b/tools/intel_vbt_defs.h
@@ -75,65 +75,51 @@ struct bdb_header {
u16 bdb_size;
} __packed;
-/* strictly speaking, this is a "skip" block, but it has interesting info */
-struct vbios_data {
- u8 type; /* 0 == desktop, 1 == mobile */
- u8 relstage;
- u8 chipset;
- u8 lvds_present:1;
- u8 tv_present:1;
- u8 rsvd2:6; /* finish byte */
- u8 rsvd3[4];
- u8 signon[155];
- u8 copyright[61];
- u16 code_segment;
- u8 dos_boot_mode;
- u8 bandwidth_percent;
- u8 rsvd4; /* popup memory size */
- u8 resize_pci_bios;
- u8 rsvd5; /* is crt already on ddc2 */
-} __packed;
-
/*
* There are several types of BIOS data blocks (BDBs), each block has
* an ID and size in the first 3 bytes (ID in first, size in next 2).
* Known types are listed below.
*/
-#define BDB_GENERAL_FEATURES 1
-#define BDB_GENERAL_DEFINITIONS 2
-#define BDB_OLD_TOGGLE_LIST 3
-#define BDB_MODE_SUPPORT_LIST 4
-#define BDB_GENERIC_MODE_TABLE 5
-#define BDB_EXT_MMIO_REGS 6
-#define BDB_SWF_IO 7
-#define BDB_SWF_MMIO 8
-#define BDB_PSR 9
-#define BDB_MODE_REMOVAL_TABLE 10
-#define BDB_CHILD_DEVICE_TABLE 11
-#define BDB_DRIVER_FEATURES 12
-#define BDB_DRIVER_PERSISTENCE 13
-#define BDB_EXT_TABLE_PTRS 14
-#define BDB_DOT_CLOCK_OVERRIDE 15
-#define BDB_DISPLAY_SELECT 16
-/* 17 rsvd */
-#define BDB_DRIVER_ROTATION 18
-#define BDB_DISPLAY_REMOVE 19
-#define BDB_OEM_CUSTOM 20
-#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
-#define BDB_SDVO_LVDS_OPTIONS 22
-#define BDB_SDVO_PANEL_DTDS 23
-#define BDB_SDVO_LVDS_PNP_IDS 24
-#define BDB_SDVO_LVDS_POWER_SEQ 25
-#define BDB_TV_OPTIONS 26
-#define BDB_EDP 27
-#define BDB_LVDS_OPTIONS 40
-#define BDB_LVDS_LFP_DATA_PTRS 41
-#define BDB_LVDS_LFP_DATA 42
-#define BDB_LVDS_BACKLIGHT 43
-#define BDB_LVDS_POWER 44
-#define BDB_MIPI_CONFIG 52
-#define BDB_MIPI_SEQUENCE 53
-#define BDB_SKIP 254 /* VBIOS private block, ignore */
+enum bdb_block_id {
+ BDB_GENERAL_FEATURES = 1,
+ BDB_GENERAL_DEFINITIONS = 2,
+ BDB_OLD_TOGGLE_LIST = 3,
+ BDB_MODE_SUPPORT_LIST = 4,
+ BDB_GENERIC_MODE_TABLE = 5,
+ BDB_EXT_MMIO_REGS = 6,
+ BDB_SWF_IO = 7,
+ BDB_SWF_MMIO = 8,
+ BDB_PSR = 9,
+ BDB_MODE_REMOVAL_TABLE = 10,
+ BDB_CHILD_DEVICE_TABLE = 11,
+ BDB_DRIVER_FEATURES = 12,
+ BDB_DRIVER_PERSISTENCE = 13,
+ BDB_EXT_TABLE_PTRS = 14,
+ BDB_DOT_CLOCK_OVERRIDE = 15,
+ BDB_DISPLAY_SELECT = 16,
+ BDB_DRIVER_ROTATION = 18,
+ BDB_DISPLAY_REMOVE = 19,
+ BDB_OEM_CUSTOM = 20,
+ BDB_EFP_LIST = 21, /* workarounds for VGA hsync/vsync */
+ BDB_SDVO_LVDS_OPTIONS = 22,
+ BDB_SDVO_PANEL_DTDS = 23,
+ BDB_SDVO_LVDS_PNP_IDS = 24,
+ BDB_SDVO_LVDS_POWER_SEQ = 25,
+ BDB_TV_OPTIONS = 26,
+ BDB_EDP = 27,
+ BDB_LVDS_OPTIONS = 40,
+ BDB_LVDS_LFP_DATA_PTRS = 41,
+ BDB_LVDS_LFP_DATA = 42,
+ BDB_LVDS_BACKLIGHT = 43,
+ BDB_LVDS_POWER = 44,
+ BDB_MIPI_CONFIG = 52,
+ BDB_MIPI_SEQUENCE = 53,
+ BDB_SKIP = 254, /* VBIOS private block, ignore */
+};
+
+/*
+ * Block 1 - General Bit Definitions
+ */
struct bdb_general_features {
/* bits 1 */
@@ -176,6 +162,10 @@ struct bdb_general_features {
u8 rsvd11:2; /* finish byte */
} __packed;
+/*
+ * Block 2 - General Bytes Definition
+ */
+
/* pre-915 */
#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
@@ -299,6 +289,8 @@ struct bdb_general_features {
#define DVO_PORT_DPA 10
#define DVO_PORT_DPE 11 /* 193 */
#define DVO_PORT_HDMIE 12 /* 193 */
+#define DVO_PORT_DPF 13 /* N/A */
+#define DVO_PORT_HDMIF 14 /* N/A */
#define DVO_PORT_MIPIA 21 /* 171 */
#define DVO_PORT_MIPIB 22 /* 171 */
#define DVO_PORT_MIPIC 23 /* 171 */
@@ -316,8 +308,26 @@ enum vbt_gmbus_ddi {
DDC_BUS_DDI_C,
DDC_BUS_DDI_D,
DDC_BUS_DDI_F,
+ ICL_DDC_BUS_DDI_A = 0x1,
+ ICL_DDC_BUS_DDI_B,
+ ICL_DDC_BUS_PORT_1 = 0x4,
+ ICL_DDC_BUS_PORT_2,
+ ICL_DDC_BUS_PORT_3,
+ ICL_DDC_BUS_PORT_4,
};
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
+#define DP_AUX_F 0x60
+
+#define VBT_DP_MAX_LINK_RATE_HBR3 0
+#define VBT_DP_MAX_LINK_RATE_HBR2 1
+#define VBT_DP_MAX_LINK_RATE_HBR 2
+#define VBT_DP_MAX_LINK_RATE_LBR 3
+
/*
* The child device config, aka the display device data structure, provides a
* description of a port and its configuration on the platform.
@@ -382,7 +392,8 @@ struct child_device_config {
u8 lspcon:1; /* 192 */
u8 iboost:1; /* 196 */
u8 hpd_invert:1; /* 196 */
- u8 flag_reserved:3;
+ u8 use_vbt_vswing:1; /* 218 */
+ u8 flag_reserved:2;
u8 hdmi_support:1; /* 158 */
u8 dp_support:1; /* 158 */
u8 tmds_support:1; /* 158 */
@@ -407,7 +418,9 @@ struct child_device_config {
u16 extended_type;
u8 dvo_function;
u8 dp_usb_type_c:1; /* 195 */
- u8 flags2_reserved:7; /* 195 */
+ u8 tbt:1; /* 209 */
+ u8 flags2_reserved:2; /* 195 */
+ u8 dp_port_trace_length:4; /* 209 */
u8 dp_gpio_index; /* 195 */
u16 dp_gpio_pin_num; /* 195 */
u8 dp_iboost_level:4; /* 196 */
@@ -441,194 +454,44 @@ struct bdb_general_definitions {
* number = (block_size - sizeof(bdb_general_definitions))/
* defs->child_dev_size;
*/
- uint8_t devices[0];
-} __packed;
-
-/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
-#define MODE_MASK 0x3
-
-struct bdb_lvds_options {
- u8 panel_type;
- u8 rsvd1;
- /* LVDS capabilities, stored in a dword */
- u8 pfit_mode:2;
- u8 pfit_text_mode_enhanced:1;
- u8 pfit_gfx_mode_enhanced:1;
- u8 pfit_ratio_auto:1;
- u8 pixel_dither:1;
- u8 lvds_edid:1;
- u8 rsvd2:1;
- u8 rsvd4;
- /* LVDS Panel channel bits stored here */
- u32 lvds_panel_channel_bits;
- /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
- u16 ssc_bits;
- u16 ssc_freq;
- u16 ssc_ddt;
- /* Panel color depth defined here */
- u16 panel_color_depth;
- /* LVDS panel type bits stored here */
- u32 dps_panel_type_bits;
- /* LVDS backlight control type bits stored here */
- u32 blt_control_type_bits;
-} __packed;
-
-/* LFP pointer table contains entries to the struct below */
-struct bdb_lvds_lfp_data_ptr {
- u16 fp_timing_offset; /* offsets are from start of bdb */
- u8 fp_table_size;
- u16 dvo_timing_offset;
- u8 dvo_table_size;
- u16 panel_pnp_id_offset;
- u8 pnp_table_size;
-} __packed;
-
-struct bdb_lvds_lfp_data_ptrs {
- u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
- struct bdb_lvds_lfp_data_ptr ptr[16];
-} __packed;
-
-/* LFP data has 3 blocks per entry */
-struct lvds_fp_timing {
- u16 x_res;
- u16 y_res;
- u32 lvds_reg;
- u32 lvds_reg_val;
- u32 pp_on_reg;
- u32 pp_on_reg_val;
- u32 pp_off_reg;
- u32 pp_off_reg_val;
- u32 pp_cycle_reg;
- u32 pp_cycle_reg_val;
- u32 pfit_reg;
- u32 pfit_reg_val;
- u16 terminator;
+ u8 devices[0];
} __packed;
-struct lvds_dvo_timing {
- u16 clock; /**< In 10khz */
- u8 hactive_lo;
- u8 hblank_lo;
- u8 hblank_hi:4;
- u8 hactive_hi:4;
- u8 vactive_lo;
- u8 vblank_lo;
- u8 vblank_hi:4;
- u8 vactive_hi:4;
- u8 hsync_off_lo;
- u8 hsync_pulse_width_lo;
- u8 vsync_pulse_width_lo:4;
- u8 vsync_off_lo:4;
- u8 vsync_pulse_width_hi:2;
- u8 vsync_off_hi:2;
- u8 hsync_pulse_width_hi:2;
- u8 hsync_off_hi:2;
- u8 himage_lo;
- u8 vimage_lo;
- u8 vimage_hi:4;
- u8 himage_hi:4;
- u8 h_border;
- u8 v_border;
- u8 rsvd1:3;
- u8 digital:2;
- u8 vsync_positive:1;
- u8 hsync_positive:1;
- u8 non_interlaced:1;
-} __packed;
-
-struct lvds_pnp_id {
- u16 mfg_name;
- u16 product_code;
- u32 serial;
- u8 mfg_week;
- u8 mfg_year;
-} __packed;
-
-struct bdb_lvds_lfp_data_entry {
- struct lvds_fp_timing fp_timing;
- struct lvds_dvo_timing dvo_timing;
- struct lvds_pnp_id pnp_id;
-} __packed;
-
-struct bdb_lvds_lfp_data {
- struct bdb_lvds_lfp_data_entry data[16];
-} __packed;
-
-#define BDB_BACKLIGHT_TYPE_NONE 0
-#define BDB_BACKLIGHT_TYPE_PWM 2
-
-struct bdb_lfp_backlight_data_entry {
- u8 type:2;
- u8 active_low_pwm:1;
- u8 obsolete1:5;
- u16 pwm_freq_hz;
- u8 min_brightness;
- u8 obsolete2;
- u8 obsolete3;
-} __packed;
-
-struct bdb_lfp_backlight_control_method {
- u8 type:4;
- u8 controller:4;
-} __packed;
-
-struct bdb_lfp_backlight_data {
- u8 entry_size;
- struct bdb_lfp_backlight_data_entry data[16];
- u8 level[16];
- struct bdb_lfp_backlight_control_method backlight_control[16];
-} __packed;
+/*
+ * Block 9 - SRD Feature Block
+ */
-struct aimdb_header {
- char signature[16];
- char oem_device[20];
- u16 aimdb_version;
- u16 aimdb_header_size;
- u16 aimdb_size;
-} __packed;
+struct psr_table {
+ /* Feature bits */
+ u8 full_link:1;
+ u8 require_aux_to_wakeup:1;
+ u8 feature_bits_rsvd:6;
-struct aimdb_block {
- u8 aimdb_id;
- u16 aimdb_size;
-} __packed;
+ /* Wait times */
+ u8 idle_frames:4;
+ u8 lines_to_wait:3;
+ u8 wait_times_rsvd:1;
-struct vch_panel_data {
- u16 fp_timing_offset;
- u8 fp_timing_size;
- u16 dvo_timing_offset;
- u8 dvo_timing_size;
- u16 text_fitting_offset;
- u8 text_fitting_size;
- u16 graphics_fitting_offset;
- u8 graphics_fitting_size;
-} __packed;
+ /* TP wake up time in multiple of 100 */
+ u16 tp1_wakeup_time;
+ u16 tp2_tp3_wakeup_time;
-struct vch_bdb_22 {
- struct aimdb_block aimdb_block;
- struct vch_panel_data panels[16];
+ /* PSR2 TP2/TP3 wakeup time for 16 panels */
+ u32 psr2_tp2_tp3_wakeup_time;
} __packed;
-struct bdb_sdvo_lvds_options {
- u8 panel_backlight;
- u8 h40_set_panel_type;
- u8 panel_type;
- u8 ssc_clk_freq;
- u16 als_low_trip;
- u16 als_high_trip;
- u8 sclalarcoeff_tab_row_num;
- u8 sclalarcoeff_tab_row_size;
- u8 coefficient[8];
- u8 panel_misc_bits_1;
- u8 panel_misc_bits_2;
- u8 panel_misc_bits_3;
- u8 panel_misc_bits_4;
+struct bdb_psr {
+ struct psr_table psr_table[16];
} __packed;
+/*
+ * Block 12 - Driver Features Data Block
+ */
#define BDB_DRIVER_FEATURE_NO_LVDS 0
#define BDB_DRIVER_FEATURE_INT_LVDS 1
#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
-#define BDB_DRIVER_FEATURE_EDP 3
+#define BDB_DRIVER_FEATURE_INT_SDVO_LVDS 3
struct bdb_driver_features {
u8 boot_dev_algorithm:1;
@@ -684,6 +547,69 @@ struct bdb_driver_features {
u16 pc_feature_valid:1;
} __packed;
+/*
+ * Block 22 - SDVO LVDS General Options
+ */
+
+struct bdb_sdvo_lvds_options {
+ u8 panel_backlight;
+ u8 h40_set_panel_type;
+ u8 panel_type;
+ u8 ssc_clk_freq;
+ u16 als_low_trip;
+ u16 als_high_trip;
+ u8 sclalarcoeff_tab_row_num;
+ u8 sclalarcoeff_tab_row_size;
+ u8 coefficient[8];
+ u8 panel_misc_bits_1;
+ u8 panel_misc_bits_2;
+ u8 panel_misc_bits_3;
+ u8 panel_misc_bits_4;
+} __packed;
+
+/*
+ * Block 23 - SDVO LVDS Panel DTDs
+ */
+
+struct lvds_dvo_timing {
+ u16 clock; /**< In 10khz */
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_off_lo;
+ u8 hsync_pulse_width_lo;
+ u8 vsync_pulse_width_lo:4;
+ u8 vsync_off_lo:4;
+ u8 vsync_pulse_width_hi:2;
+ u8 vsync_off_hi:2;
+ u8 hsync_pulse_width_hi:2;
+ u8 hsync_off_hi:2;
+ u8 himage_lo;
+ u8 vimage_lo;
+ u8 vimage_hi:4;
+ u8 himage_hi:4;
+ u8 h_border;
+ u8 v_border;
+ u8 rsvd1:3;
+ u8 digital:2;
+ u8 vsync_positive:1;
+ u8 hsync_positive:1;
+ u8 non_interlaced:1;
+} __packed;
+
+struct bdb_sdvo_panel_dtds {
+ struct lvds_dvo_timing dtds[4];
+} __packed;
+
+/*
+ * Block 27 - eDP VBT Block
+ */
+
#define EDP_18BPP 0
#define EDP_24BPP 1
#define EDP_30BPP 2
@@ -736,151 +662,133 @@ struct bdb_edp {
struct edp_full_link_params full_link_params[16]; /* 199 */
} __packed;
-struct psr_table {
- /* Feature bits */
- u8 full_link:1;
- u8 require_aux_to_wakeup:1;
- u8 feature_bits_rsvd:6;
+/*
+ * Block 40 - LFP Data Block
+ */
- /* Wait times */
- u8 idle_frames:4;
- u8 lines_to_wait:3;
- u8 wait_times_rsvd:1;
+/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
+#define MODE_MASK 0x3
- /* TP wake up time in multiple of 100 */
- u16 tp1_wakeup_time;
- u16 tp2_tp3_wakeup_time;
+struct bdb_lvds_options {
+ u8 panel_type;
+ u8 panel_type2; /* 212 */
+ /* LVDS capabilities, stored in a dword */
+ u8 pfit_mode:2;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_ratio_auto:1;
+ u8 pixel_dither:1;
+ u8 lvds_edid:1;
+ u8 rsvd2:1;
+ u8 rsvd4;
+ /* LVDS Panel channel bits stored here */
+ u32 lvds_panel_channel_bits;
+ /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
+ u16 ssc_bits;
+ u16 ssc_freq;
+ u16 ssc_ddt;
+ /* Panel color depth defined here */
+ u16 panel_color_depth;
+ /* LVDS panel type bits stored here */
+ u32 dps_panel_type_bits;
+ /* LVDS backlight control type bits stored here */
+ u32 blt_control_type_bits;
+
+ u16 lcdvcc_s0_enable; /* 200 */
+ u32 rotation; /* 228 */
} __packed;
-struct bdb_psr {
- struct psr_table psr_table[16];
+/*
+ * Block 41 - LFP Data Table Pointers
+ */
+
+/* LFP pointer table contains entries to the struct below */
+struct lvds_lfp_data_ptr {
+ u16 fp_timing_offset; /* offsets are from start of bdb */
+ u8 fp_table_size;
+ u16 dvo_timing_offset;
+ u8 dvo_table_size;
+ u16 panel_pnp_id_offset;
+ u8 pnp_table_size;
+} __packed;
+
+struct bdb_lvds_lfp_data_ptrs {
+ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+ struct lvds_lfp_data_ptr ptr[16];
} __packed;
/*
- * Driver<->VBIOS interaction occurs through scratch bits in
- * GR18 & SWF*.
+ * Block 42 - LFP Data Tables
*/
-/* GR18 bits are set on display switch and hotkey events */
-#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
-#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
-#define GR18_HK_NONE (0x0<<3)
-#define GR18_HK_LFP_STRETCH (0x1<<3)
-#define GR18_HK_TOGGLE_DISP (0x2<<3)
-#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
-#define GR18_HK_POPUP_DISABLED (0x6<<3)
-#define GR18_HK_POPUP_ENABLED (0x7<<3)
-#define GR18_HK_PFIT (0x8<<3)
-#define GR18_HK_APM_CHANGE (0xa<<3)
-#define GR18_HK_MULTIPLE (0xc<<3)
-#define GR18_USER_INT_EN (1<<2)
-#define GR18_A0000_FLUSH_EN (1<<1)
-#define GR18_SMM_EN (1<<0)
-
-/* Set by driver, cleared by VBIOS */
-#define SWF00_YRES_SHIFT 16
-#define SWF00_XRES_SHIFT 0
-#define SWF00_RES_MASK 0xffff
-
-/* Set by VBIOS at boot time and driver at runtime */
-#define SWF01_TV2_FORMAT_SHIFT 8
-#define SWF01_TV1_FORMAT_SHIFT 0
-#define SWF01_TV_FORMAT_MASK 0xffff
-
-#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
-#define SWF10_GTT_OVERRIDE_EN (1<<28)
-#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
-#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
-#define SWF10_OLD_TOGGLE 0x0
-#define SWF10_TOGGLE_LIST_1 0x1
-#define SWF10_TOGGLE_LIST_2 0x2
-#define SWF10_TOGGLE_LIST_3 0x3
-#define SWF10_TOGGLE_LIST_4 0x4
-#define SWF10_PANNING_EN (1<<23)
-#define SWF10_DRIVER_LOADED (1<<22)
-#define SWF10_EXTENDED_DESKTOP (1<<21)
-#define SWF10_EXCLUSIVE_MODE (1<<20)
-#define SWF10_OVERLAY_EN (1<<19)
-#define SWF10_PLANEB_HOLDOFF (1<<18)
-#define SWF10_PLANEA_HOLDOFF (1<<17)
-#define SWF10_VGA_HOLDOFF (1<<16)
-#define SWF10_ACTIVE_DISP_MASK 0xffff
-#define SWF10_PIPEB_LFP2 (1<<15)
-#define SWF10_PIPEB_EFP2 (1<<14)
-#define SWF10_PIPEB_TV2 (1<<13)
-#define SWF10_PIPEB_CRT2 (1<<12)
-#define SWF10_PIPEB_LFP (1<<11)
-#define SWF10_PIPEB_EFP (1<<10)
-#define SWF10_PIPEB_TV (1<<9)
-#define SWF10_PIPEB_CRT (1<<8)
-#define SWF10_PIPEA_LFP2 (1<<7)
-#define SWF10_PIPEA_EFP2 (1<<6)
-#define SWF10_PIPEA_TV2 (1<<5)
-#define SWF10_PIPEA_CRT2 (1<<4)
-#define SWF10_PIPEA_LFP (1<<3)
-#define SWF10_PIPEA_EFP (1<<2)
-#define SWF10_PIPEA_TV (1<<1)
-#define SWF10_PIPEA_CRT (1<<0)
-
-#define SWF11_MEMORY_SIZE_SHIFT 16
-#define SWF11_SV_TEST_EN (1<<15)
-#define SWF11_IS_AGP (1<<14)
-#define SWF11_DISPLAY_HOLDOFF (1<<13)
-#define SWF11_DPMS_REDUCED (1<<12)
-#define SWF11_IS_VBE_MODE (1<<11)
-#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
-#define SWF11_DPMS_MASK 0x07
-#define SWF11_DPMS_OFF (1<<2)
-#define SWF11_DPMS_SUSPEND (1<<1)
-#define SWF11_DPMS_STANDBY (1<<0)
-#define SWF11_DPMS_ON 0
-
-#define SWF14_GFX_PFIT_EN (1<<31)
-#define SWF14_TEXT_PFIT_EN (1<<30)
-#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
-#define SWF14_POPUP_EN (1<<28)
-#define SWF14_DISPLAY_HOLDOFF (1<<27)
-#define SWF14_DISP_DETECT_EN (1<<26)
-#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
-#define SWF14_DRIVER_STATUS (1<<24)
-#define SWF14_OS_TYPE_WIN9X (1<<23)
-#define SWF14_OS_TYPE_WINNT (1<<22)
-/* 21:19 rsvd */
-#define SWF14_PM_TYPE_MASK 0x00070000
-#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
-#define SWF14_PM_ACPI (0x3 << 16)
-#define SWF14_PM_APM_12 (0x2 << 16)
-#define SWF14_PM_APM_11 (0x1 << 16)
-#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
- /* if GR18 indicates a display switch */
-#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
-#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
-#define SWF14_DS_PIPEB_TV2_EN (1<<13)
-#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
-#define SWF14_DS_PIPEB_LFP_EN (1<<11)
-#define SWF14_DS_PIPEB_EFP_EN (1<<10)
-#define SWF14_DS_PIPEB_TV_EN (1<<9)
-#define SWF14_DS_PIPEB_CRT_EN (1<<8)
-#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
-#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
-#define SWF14_DS_PIPEA_TV2_EN (1<<5)
-#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
-#define SWF14_DS_PIPEA_LFP_EN (1<<3)
-#define SWF14_DS_PIPEA_EFP_EN (1<<2)
-#define SWF14_DS_PIPEA_TV_EN (1<<1)
-#define SWF14_DS_PIPEA_CRT_EN (1<<0)
- /* if GR18 indicates a panel fitting request */
-#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
- /* if GR18 indicates an APM change request */
-#define SWF14_APM_HIBERNATE 0x4
-#define SWF14_APM_SUSPEND 0x3
-#define SWF14_APM_STANDBY 0x1
-#define SWF14_APM_RESTORE 0x0
-
-/* Block 52 contains MIPI configuration block
- * 6 * bdb_mipi_config, followed by 6 pps data block
- * block below
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+ u16 x_res;
+ u16 y_res;
+ u32 lvds_reg;
+ u32 lvds_reg_val;
+ u32 pp_on_reg;
+ u32 pp_on_reg_val;
+ u32 pp_off_reg;
+ u32 pp_off_reg_val;
+ u32 pp_cycle_reg;
+ u32 pp_cycle_reg_val;
+ u32 pfit_reg;
+ u32 pfit_reg_val;
+ u16 terminator;
+} __packed;
+
+struct lvds_pnp_id {
+ u16 mfg_name;
+ u16 product_code;
+ u32 serial;
+ u8 mfg_week;
+ u8 mfg_year;
+} __packed;
+
+struct lvds_lfp_data_entry {
+ struct lvds_fp_timing fp_timing;
+ struct lvds_dvo_timing dvo_timing;
+ struct lvds_pnp_id pnp_id;
+} __packed;
+
+struct bdb_lvds_lfp_data {
+ struct lvds_lfp_data_entry data[16];
+} __packed;
+
+/*
+ * Block 43 - LFP Backlight Control Data Block
*/
+
+#define BDB_BACKLIGHT_TYPE_NONE 0
+#define BDB_BACKLIGHT_TYPE_PWM 2
+
+struct lfp_backlight_data_entry {
+ u8 type:2;
+ u8 active_low_pwm:1;
+ u8 obsolete1:5;
+ u16 pwm_freq_hz;
+ u8 min_brightness;
+ u8 obsolete2;
+ u8 obsolete3;
+} __packed;
+
+struct lfp_backlight_control_method {
+ u8 type:4;
+ u8 controller:4;
+} __packed;
+
+struct bdb_lfp_backlight_data {
+ u8 entry_size;
+ struct lfp_backlight_data_entry data[16];
+ u8 level[16];
+ struct lfp_backlight_control_method backlight_control[16];
+} __packed;
+
+/*
+ * Block 52 - MIPI Configuration Block
+ */
+
#define MAX_MIPI_CONFIGURATIONS 6
struct bdb_mipi_config {
@@ -888,24 +796,13 @@ struct bdb_mipi_config {
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
} __packed;
-/* Block 53 contains MIPI sequences as needed by the panel
- * for enabling it. This block can be variable in size and
- * can be maximum of 6 blocks
+/*
+ * Block 53 - MIPI Sequence Block
*/
+
struct bdb_mipi_sequence {
u8 version;
- u8 data[0];
+ u8 data[0]; /* up to 6 variable length blocks */
} __packed;
-enum mipi_gpio_pin_index {
- MIPI_GPIO_UNDEFINED = 0,
- MIPI_GPIO_PANEL_ENABLE,
- MIPI_GPIO_BL_ENABLE,
- MIPI_GPIO_PWM_ENABLE,
- MIPI_GPIO_RESET_N,
- MIPI_GPIO_PWR_DOWN_R,
- MIPI_GPIO_STDBY_RST_N,
- MIPI_GPIO_MAX
-};
-
#endif /* _INTEL_VBT_DEFS_H_ */
diff --git a/tools/meson.build b/tools/meson.build
index 5d00f2e3..6e72b263 100644
--- a/tools/meson.build
+++ b/tools/meson.build
@@ -12,8 +12,7 @@ foreach prog : tools_progs_noisnt
endforeach
tools_progs = [
- # FIXME we already have a libtestcase with this name as target
- #'igt_stats',
+ 'igt_stats',
'intel_audio_dump',
'intel_backlight',
'intel_bios_dumper',
@@ -93,7 +92,8 @@ install_subdir('registers', install_dir : datadir,
shared_library('intel_aubdump', 'aubdump.c',
dependencies : [ lib_igt_chipset, dlsym ],
name_prefix : '',
- install : true)
+ install : true,
+ soversion : '0')
executable('intel_gpu_top', 'intel_gpu_top.c',
install : true,