aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Hutchings <ben@decadent.org.uk>2019-11-10 03:02:32 +0000
committerBen Hutchings <ben@decadent.org.uk>2019-11-10 03:02:32 +0000
commit49c95b56da9685e43a6c6e53c32eef0fd40432f7 (patch)
tree862641fad37d1bb8c0133695ee7d366a1dd5a09e
parentb9ba9fabba1914083e1aa790513de3e40661e9cf (diff)
downloadkernel_replicant_linux-49c95b56da9685e43a6c6e53c32eef0fd40432f7.tar.gz
kernel_replicant_linux-49c95b56da9685e43a6c6e53c32eef0fd40432f7.tar.bz2
kernel_replicant_linux-49c95b56da9685e43a6c6e53c32eef0fd40432f7.zip
[x86] i915: Add mitigations for two hardware security flaws
-rw-r--r--debian/changelog14
-rw-r--r--debian/patches/bugfix/x86/i915/0001-drm-i915-Rename-gen7-cmdparser-tables.patch176
-rw-r--r--debian/patches/bugfix/x86/i915/0002-drm-i915-Disable-Secure-Batches-for-gen6.patch92
-rw-r--r--debian/patches/bugfix/x86/i915/0003-drm-i915-Remove-Master-tables-from-cmdparser.patch295
-rw-r--r--debian/patches/bugfix/x86/i915/0004-drm-i915-Add-support-for-mandatory-cmdparsing.patch111
-rw-r--r--debian/patches/bugfix/x86/i915/0005-drm-i915-Support-ro-ppgtt-mapped-cmdparser-shadow-bu.patch199
-rw-r--r--debian/patches/bugfix/x86/i915/0006-drm-i915-Allow-parsing-of-unsized-batches.patch57
-rw-r--r--debian/patches/bugfix/x86/i915/0007-drm-i915-Add-gen9-BCS-cmdparsing.patch248
-rw-r--r--debian/patches/bugfix/x86/i915/0008-drm-i915-cmdparser-Use-explicit-goto-for-error-paths.patch94
-rw-r--r--debian/patches/bugfix/x86/i915/0009-drm-i915-cmdparser-Add-support-for-backward-jumps.patch398
-rw-r--r--debian/patches/bugfix/x86/i915/0010-drm-i915-cmdparser-Ignore-Length-operands-during-com.patch37
-rw-r--r--debian/patches/bugfix/x86/i915/0011-drm-i915-Lower-RM-timeout-to-avoid-DSI-hard-hangs.patch72
-rw-r--r--debian/patches/bugfix/x86/i915/0012-drm-i915-gen8-Add-RC6-CTX-corruption-WA.patch284
-rw-r--r--debian/patches/series12
14 files changed, 2089 insertions, 0 deletions
diff --git a/debian/changelog b/debian/changelog
index dc589a8e660a..dc9728d4b9f2 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -23,6 +23,20 @@ linux (5.3.9-2) UNRELEASED; urgency=medium
- kvm: Add helper function for creating VM worker threads
- kvm: x86: mmu: Recovery of shattered NX large pages
- Documentation: Add ITLB_MULTIHIT documentation
+ * [x86] i915: Mitigate local privilege escalation on gen9 (CVE-2019-0155):
+ - drm/i915: Rename gen7 cmdparser tables
+ - drm/i915: Disable Secure Batches for gen6+
+ - drm/i915: Remove Master tables from cmdparser
+ - drm/i915: Add support for mandatory cmdparsing
+ - drm/i915: Support ro ppgtt mapped cmdparser shadow buffers
+ - drm/i915: Allow parsing of unsized batches
+ - drm/i915: Add gen9 BCS cmdparsing
+ - drm/i915/cmdparser: Use explicit goto for error paths
+ - drm/i915/cmdparser: Add support for backward jumps
+ - drm/i915/cmdparser: Ignore Length operands during command matching
+ * [x86] i915: Mitigate local denial-of-service on gen8/gen9 (CVE-2019-0154):
+ - drm/i915: Lower RM timeout to avoid DSI hard hangs
+ - drm/i915/gen8+: Add RC6 CTX corruption WA
-- Ben Hutchings <ben@decadent.org.uk> Sat, 09 Nov 2019 18:53:39 +0000
diff --git a/debian/patches/bugfix/x86/i915/0001-drm-i915-Rename-gen7-cmdparser-tables.patch b/debian/patches/bugfix/x86/i915/0001-drm-i915-Rename-gen7-cmdparser-tables.patch
new file mode 100644
index 000000000000..4f36beb6ab25
--- /dev/null
+++ b/debian/patches/bugfix/x86/i915/0001-drm-i915-Rename-gen7-cmdparser-tables.patch
@@ -0,0 +1,176 @@
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Fri, 20 Apr 2018 14:26:01 -0700
+Subject: drm/i915: Rename gen7 cmdparser tables
+Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-0155
+
+commit 0a2f661b6c21815a7fa60e30babe975fee8e73c6 upstream.
+
+We're about to introduce some new tables for later gens, and the
+current naming for the gen7 tables will no longer make sense.
+
+v2: rebase
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+---
+ drivers/gpu/drm/i915/i915_cmd_parser.c | 70 +++++++++++++-------------
+ 1 file changed, 35 insertions(+), 35 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -212,7 +212,7 @@ struct drm_i915_cmd_table {
+
+ /* Command Mask Fixed Len Action
+ ---------------------------------------------------------- */
+-static const struct drm_i915_cmd_descriptor common_cmds[] = {
++static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
+ CMD( MI_NOOP, SMI, F, 1, S ),
+ CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
+ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
+@@ -245,7 +245,7 @@ static const struct drm_i915_cmd_descrip
+ CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
+ };
+
+-static const struct drm_i915_cmd_descriptor render_cmds[] = {
++static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
+ CMD( MI_FLUSH, SMI, F, 1, S ),
+ CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_PREDICATE, SMI, F, 1, S ),
+@@ -329,7 +329,7 @@ static const struct drm_i915_cmd_descrip
+ CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
+ };
+
+-static const struct drm_i915_cmd_descriptor video_cmds[] = {
++static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
+ CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
+@@ -373,7 +373,7 @@ static const struct drm_i915_cmd_descrip
+ CMD( MFX_WAIT, SMFX, F, 1, S ),
+ };
+
+-static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
++static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
+ CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
+@@ -411,7 +411,7 @@ static const struct drm_i915_cmd_descrip
+ }}, ),
+ };
+
+-static const struct drm_i915_cmd_descriptor blt_cmds[] = {
++static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
+ CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
+ .bits = {{
+@@ -464,35 +464,35 @@ static const struct drm_i915_cmd_descrip
+ #undef B
+ #undef M
+
+-static const struct drm_i915_cmd_table gen7_render_cmds[] = {
+- { common_cmds, ARRAY_SIZE(common_cmds) },
+- { render_cmds, ARRAY_SIZE(render_cmds) },
++static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
++ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
++ { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
+ };
+
+-static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
+- { common_cmds, ARRAY_SIZE(common_cmds) },
+- { render_cmds, ARRAY_SIZE(render_cmds) },
++static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
++ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
++ { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
+ { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
+ };
+
+-static const struct drm_i915_cmd_table gen7_video_cmds[] = {
+- { common_cmds, ARRAY_SIZE(common_cmds) },
+- { video_cmds, ARRAY_SIZE(video_cmds) },
++static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
++ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
++ { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
+ };
+
+-static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
+- { common_cmds, ARRAY_SIZE(common_cmds) },
+- { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
++static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
++ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
++ { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
+ };
+
+-static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
+- { common_cmds, ARRAY_SIZE(common_cmds) },
+- { blt_cmds, ARRAY_SIZE(blt_cmds) },
++static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
++ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
++ { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
+ };
+
+-static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
+- { common_cmds, ARRAY_SIZE(common_cmds) },
+- { blt_cmds, ARRAY_SIZE(blt_cmds) },
++static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
++ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
++ { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
+ { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
+ };
+
+@@ -872,12 +872,12 @@ void intel_engine_init_cmd_parser(struct
+ switch (engine->class) {
+ case RENDER_CLASS:
+ if (IS_HASWELL(engine->i915)) {
+- cmd_tables = hsw_render_ring_cmds;
++ cmd_tables = hsw_render_ring_cmd_table;
+ cmd_table_count =
+- ARRAY_SIZE(hsw_render_ring_cmds);
++ ARRAY_SIZE(hsw_render_ring_cmd_table);
+ } else {
+- cmd_tables = gen7_render_cmds;
+- cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
++ cmd_tables = gen7_render_cmd_table;
++ cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
+ }
+
+ if (IS_HASWELL(engine->i915)) {
+@@ -891,17 +891,17 @@ void intel_engine_init_cmd_parser(struct
+ engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
+ break;
+ case VIDEO_DECODE_CLASS:
+- cmd_tables = gen7_video_cmds;
+- cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
++ cmd_tables = gen7_video_cmd_table;
++ cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
+ engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+ break;
+ case COPY_ENGINE_CLASS:
+ if (IS_HASWELL(engine->i915)) {
+- cmd_tables = hsw_blt_ring_cmds;
+- cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
++ cmd_tables = hsw_blt_ring_cmd_table;
++ cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
+ } else {
+- cmd_tables = gen7_blt_cmds;
+- cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
++ cmd_tables = gen7_blt_cmd_table;
++ cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
+ }
+
+ if (IS_HASWELL(engine->i915)) {
+@@ -915,8 +915,8 @@ void intel_engine_init_cmd_parser(struct
+ engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+ break;
+ case VIDEO_ENHANCEMENT_CLASS:
+- cmd_tables = hsw_vebox_cmds;
+- cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
++ cmd_tables = hsw_vebox_cmd_table;
++ cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
+ /* VECS can use the same length_mask function as VCS */
+ engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+ break;
diff --git a/debian/patches/bugfix/x86/i915/0002-drm-i915-Disable-Secure-Batches-for-gen6.patch b/debian/patches/bugfix/x86/i915/0002-drm-i915-Disable-Secure-Batches-for-gen6.patch
new file mode 100644
index 000000000000..3292f94b09fd
--- /dev/null
+++ b/debian/patches/bugfix/x86/i915/0002-drm-i915-Disable-Secure-Batches-for-gen6.patch
@@ -0,0 +1,92 @@
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Fri, 8 Jun 2018 08:53:46 -0700
+Subject: drm/i915: Disable Secure Batches for gen6+
+Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-0155
+
+commit 44157641d448cbc0c4b73c5231d2b911f0cb0427 upstream.
+
+Retroactively stop reporting support for secure batches
+through the api for gen6+ so that older binaries trigger
+the fallback path instead.
+
+Older binaries use secure batches pre gen6 to access resources
+that are not available to normal usermode processes. However,
+all known userspace explicitly checks for HAS_SECURE_BATCHES
+before relying on the secure batch feature.
+
+Since there are no known binaries relying on this for newer gens
+we can kill secure batches from gen6, via I915_PARAM_HAS_SECURE_BATCHES.
+
+v2: rebase (Mika)
+v3: rebase (Mika)
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+---
+ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 12 ++++++++++--
+ drivers/gpu/drm/i915/i915_drv.c | 2 +-
+ drivers/gpu/drm/i915/i915_drv.h | 1 +
+ 3 files changed, 12 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -2351,6 +2351,7 @@ i915_gem_do_execbuffer(struct drm_device
+ struct drm_i915_gem_exec_object2 *exec,
+ struct drm_syncobj **fences)
+ {
++ struct drm_i915_private *i915 = to_i915(dev);
+ struct i915_execbuffer eb;
+ struct dma_fence *in_fence = NULL;
+ struct dma_fence *exec_fence = NULL;
+@@ -2362,7 +2363,7 @@ i915_gem_do_execbuffer(struct drm_device
+ BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
+ ~__EXEC_OBJECT_UNKNOWN_FLAGS);
+
+- eb.i915 = to_i915(dev);
++ eb.i915 = i915;
+ eb.file = file;
+ eb.args = args;
+ if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
+@@ -2382,8 +2383,15 @@ i915_gem_do_execbuffer(struct drm_device
+
+ eb.batch_flags = 0;
+ if (args->flags & I915_EXEC_SECURE) {
++ if (INTEL_GEN(i915) >= 11)
++ return -ENODEV;
++
++ /* Return -EPERM to trigger fallback code on old binaries. */
++ if (!HAS_SECURE_BATCHES(i915))
++ return -EPERM;
++
+ if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
+- return -EPERM;
++ return -EPERM;
+
+ eb.batch_flags |= I915_DISPATCH_SECURE;
+ }
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -387,7 +387,7 @@ static int i915_getparam_ioctl(struct dr
+ value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
+ break;
+ case I915_PARAM_HAS_SECURE_BATCHES:
+- value = capable(CAP_SYS_ADMIN);
++ value = HAS_SECURE_BATCHES(dev_priv) && capable(CAP_SYS_ADMIN);
+ break;
+ case I915_PARAM_CMD_PARSER_VERSION:
+ value = i915_cmd_parser_get_version(dev_priv);
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -2249,6 +2249,7 @@ IS_SUBPLATFORM(const struct drm_i915_pri
+ #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
+ #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
+ #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
++#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
+ #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
+ IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
+
diff --git a/debian/patches/bugfix/x86/i915/0003-drm-i915-Remove-Master-tables-from-cmdparser.patch b/debian/patches/bugfix/x86/i915/0003-drm-i915-Remove-Master-tables-from-cmdparser.patch
new file mode 100644
index 000000000000..f7105385c75e
--- /dev/null
+++ b/debian/patches/bugfix/x86/i915/0003-drm-i915-Remove-Master-tables-from-cmdparser.patch
@@ -0,0 +1,295 @@
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Fri, 8 Jun 2018 10:05:26 -0700
+Subject: drm/i915: Remove Master tables from cmdparser
+Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-0155
+
+commit 66d8aba1cd6db34af10de465c0d52af679288cb6 upstream.
+
+The previous patch has killed support for secure batches
+on gen6+, and hence the cmdparsers master tables are
+now dead code. Remove them.
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+---
+ .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 7 +-
+ drivers/gpu/drm/i915/i915_cmd_parser.c | 84 +++++--------------
+ drivers/gpu/drm/i915/i915_drv.h | 3 +-
+ 3 files changed, 26 insertions(+), 68 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -2009,7 +2009,7 @@ static int i915_reset_gen7_sol_offsets(s
+ return 0;
+ }
+
+-static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
++static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
+ {
+ struct drm_i915_gem_object *shadow_batch_obj;
+ struct i915_vma *vma;
+@@ -2024,8 +2024,7 @@ static struct i915_vma *eb_parse(struct
+ eb->batch->obj,
+ shadow_batch_obj,
+ eb->batch_start_offset,
+- eb->batch_len,
+- is_master);
++ eb->batch_len);
+ if (err) {
+ if (err == -EACCES) /* unhandled chained batch */
+ vma = NULL;
+@@ -2484,7 +2483,7 @@ i915_gem_do_execbuffer(struct drm_device
+ if (eb_use_cmdparser(&eb)) {
+ struct i915_vma *vma;
+
+- vma = eb_parse(&eb, drm_is_current_master(file));
++ vma = eb_parse(&eb);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_vma;
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -52,13 +52,11 @@
+ * granting userspace undue privileges. There are three categories of privilege.
+ *
+ * First, commands which are explicitly defined as privileged or which should
+- * only be used by the kernel driver. The parser generally rejects such
+- * commands, though it may allow some from the drm master process.
++ * only be used by the kernel driver. The parser rejects such commands
+ *
+ * Second, commands which access registers. To support correct/enhanced
+ * userspace functionality, particularly certain OpenGL extensions, the parser
+- * provides a whitelist of registers which userspace may safely access (for both
+- * normal and drm master processes).
++ * provides a whitelist of registers which userspace may safely access
+ *
+ * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
+ * The parser always rejects such commands.
+@@ -83,9 +81,9 @@
+ * in the per-engine command tables.
+ *
+ * Other command table entries map fairly directly to high level categories
+- * mentioned above: rejected, master-only, register whitelist. The parser
+- * implements a number of checks, including the privileged memory checks, via a
+- * general bitmasking mechanism.
++ * mentioned above: rejected, register whitelist. The parser implements a number
++ * of checks, including the privileged memory checks, via a general bitmasking
++ * mechanism.
+ */
+
+ /*
+@@ -103,8 +101,6 @@ struct drm_i915_cmd_descriptor {
+ * CMD_DESC_REJECT: The command is never allowed
+ * CMD_DESC_REGISTER: The command should be checked against the
+ * register whitelist for the appropriate ring
+- * CMD_DESC_MASTER: The command is allowed if the submitting process
+- * is the DRM master
+ */
+ u32 flags;
+ #define CMD_DESC_FIXED (1<<0)
+@@ -112,7 +108,6 @@ struct drm_i915_cmd_descriptor {
+ #define CMD_DESC_REJECT (1<<2)
+ #define CMD_DESC_REGISTER (1<<3)
+ #define CMD_DESC_BITMASK (1<<4)
+-#define CMD_DESC_MASTER (1<<5)
+
+ /*
+ * The command's unique identification bits and the bitmask to get them.
+@@ -208,14 +203,13 @@ struct drm_i915_cmd_table {
+ #define R CMD_DESC_REJECT
+ #define W CMD_DESC_REGISTER
+ #define B CMD_DESC_BITMASK
+-#define M CMD_DESC_MASTER
+
+ /* Command Mask Fixed Len Action
+ ---------------------------------------------------------- */
+ static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
+ CMD( MI_NOOP, SMI, F, 1, S ),
+ CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
+- CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
++ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ),
+ CMD( MI_ARB_CHECK, SMI, F, 1, S ),
+ CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
+ CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
+@@ -312,7 +306,7 @@ static const struct drm_i915_cmd_descrip
+ CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
+ CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
+- CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
++ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
+ CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
+ CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
+@@ -445,7 +439,7 @@ static const struct drm_i915_cmd_descrip
+ };
+
+ static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
+- CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
++ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
+ CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
+ };
+
+@@ -462,7 +456,6 @@ static const struct drm_i915_cmd_descrip
+ #undef R
+ #undef W
+ #undef B
+-#undef M
+
+ static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+@@ -611,47 +604,29 @@ static const struct drm_i915_reg_descrip
+ REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
+ };
+
+-static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
+- REG32(FORCEWAKE_MT),
+- REG32(DERRMR),
+- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
+- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
+- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
+-};
+-
+-static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
+- REG32(FORCEWAKE_MT),
+- REG32(DERRMR),
+-};
+-
+ #undef REG64
+ #undef REG32
+
+ struct drm_i915_reg_table {
+ const struct drm_i915_reg_descriptor *regs;
+ int num_regs;
+- bool master;
+ };
+
+ static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
+- { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
+- { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
++ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
+ };
+
+ static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
+- { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
+- { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
++ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
+ };
+
+ static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
+- { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
+- { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false },
+- { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
++ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
++ { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) },
+ };
+
+ static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
+- { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
+- { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
++ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
+ };
+
+ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
+@@ -1028,22 +1003,16 @@ __find_reg(const struct drm_i915_reg_des
+ }
+
+ static const struct drm_i915_reg_descriptor *
+-find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
++find_reg(const struct intel_engine_cs *engine, u32 addr)
+ {
+ const struct drm_i915_reg_table *table = engine->reg_tables;
++ const struct drm_i915_reg_descriptor *reg = NULL;
+ int count = engine->reg_table_count;
+
+- for (; count > 0; ++table, --count) {
+- if (!table->master || is_master) {
+- const struct drm_i915_reg_descriptor *reg;
+-
+- reg = __find_reg(table->regs, table->num_regs, addr);
+- if (reg != NULL)
+- return reg;
+- }
+- }
++ for (; !reg && (count > 0); ++table, --count)
++ reg = __find_reg(table->regs, table->num_regs, addr);
+
+- return NULL;
++ return reg;
+ }
+
+ /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
+@@ -1127,8 +1096,7 @@ static u32 *copy_batch(struct drm_i915_g
+
+ static bool check_cmd(const struct intel_engine_cs *engine,
+ const struct drm_i915_cmd_descriptor *desc,
+- const u32 *cmd, u32 length,
+- const bool is_master)
++ const u32 *cmd, u32 length)
+ {
+ if (desc->flags & CMD_DESC_SKIP)
+ return true;
+@@ -1138,12 +1106,6 @@ static bool check_cmd(const struct intel
+ return false;
+ }
+
+- if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
+- DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
+- *cmd);
+- return false;
+- }
+-
+ if (desc->flags & CMD_DESC_REGISTER) {
+ /*
+ * Get the distance between individual register offset
+@@ -1157,7 +1119,7 @@ static bool check_cmd(const struct intel
+ offset += step) {
+ const u32 reg_addr = cmd[offset] & desc->reg.mask;
+ const struct drm_i915_reg_descriptor *reg =
+- find_reg(engine, is_master, reg_addr);
++ find_reg(engine, reg_addr);
+
+ if (!reg) {
+ DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
+@@ -1244,7 +1206,6 @@ static bool check_cmd(const struct intel
+ * @shadow_batch_obj: copy of the batch buffer in question
+ * @batch_start_offset: byte offset in the batch at which execution starts
+ * @batch_len: length of the commands in batch_obj
+- * @is_master: is the submitting process the drm master?
+ *
+ * Parses the specified batch buffer looking for privilege violations as
+ * described in the overview.
+@@ -1256,8 +1217,7 @@ int intel_engine_cmd_parser(struct intel
+ struct drm_i915_gem_object *batch_obj,
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u32 batch_start_offset,
+- u32 batch_len,
+- bool is_master)
++ u32 batch_len)
+ {
+ u32 *cmd, *batch_end;
+ struct drm_i915_cmd_descriptor default_desc = noop_desc;
+@@ -1323,7 +1283,7 @@ int intel_engine_cmd_parser(struct intel
+ break;
+ }
+
+- if (!check_cmd(engine, desc, cmd, length, is_master)) {
++ if (!check_cmd(engine, desc, cmd, length)) {
+ ret = -EACCES;
+ break;
+ }
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -2717,8 +2717,7 @@ int intel_engine_cmd_parser(struct intel
+ struct drm_i915_gem_object *batch_obj,
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u32 batch_start_offset,
+- u32 batch_len,
+- bool is_master);
++ u32 batch_len);
+
+ /* i915_perf.c */
+ extern void i915_perf_init(struct drm_i915_private *dev_priv);
diff --git a/debian/patches/bugfix/x86/i915/0004-drm-i915-Add-support-for-mandatory-cmdparsing.patch b/debian/patches/bugfix/x86/i915/0004-drm-i915-Add-support-for-mandatory-cmdparsing.patch
new file mode 100644
index 000000000000..a5980e42766f
--- /dev/null
+++ b/debian/patches/bugfix/x86/i915/0004-drm-i915-Add-support-for-mandatory-cmdparsing.patch
@@ -0,0 +1,111 @@
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Wed, 1 Aug 2018 09:33:59 -0700
+Subject: drm/i915: Add support for mandatory cmdparsing
+Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-0155
+
+commit 311a50e76a33d1e029563c24b2ff6db0c02b5afe upstream.
+
+The existing cmdparser for gen7 can be bypassed by specifying
+batch_len=0 in the execbuf call. This is safe because bypassing
+simply reduces the cmd-set available.
+
+In a later patch we will introduce cmdparsing for gen9, as a
+security measure, which must be strictly enforced since without
+it we are vulnerable to DoS attacks.
+
+Introduce the concept of 'required' cmd parsing that cannot be
+bypassed by submitting zero-length bb's.
+
+v2: rebase (Mika)
+v2: rebase (Mika)
+v3: fix conflict on engine flags (Mika)
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+---
+ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 3 ++-
+ drivers/gpu/drm/i915/gt/intel_engine_types.h | 13 ++++++++++---
+ drivers/gpu/drm/i915/i915_cmd_parser.c | 6 +++---
+ 3 files changed, 15 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -295,7 +295,8 @@ static inline u64 gen8_noncanonical_addr
+
+ static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
+ {
+- return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
++ return intel_engine_requires_cmd_parser(eb->engine) ||
++ (intel_engine_using_cmd_parser(eb->engine) && eb->batch_len);
+ }
+
+ static int eb_create(struct i915_execbuffer *eb)
+--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
++++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
+@@ -460,12 +460,13 @@ struct intel_engine_cs {
+
+ struct intel_engine_hangcheck hangcheck;
+
+-#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
++#define I915_ENGINE_USING_CMD_PARSER BIT(0)
+ #define I915_ENGINE_SUPPORTS_STATS BIT(1)
+ #define I915_ENGINE_HAS_PREEMPTION BIT(2)
+ #define I915_ENGINE_HAS_SEMAPHORES BIT(3)
+ #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
+ #define I915_ENGINE_IS_VIRTUAL BIT(5)
++#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
+ unsigned int flags;
+
+ /*
+@@ -526,9 +527,15 @@ struct intel_engine_cs {
+ };
+
+ static inline bool
+-intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
++intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
+ {
+- return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
++ return engine->flags & I915_ENGINE_USING_CMD_PARSER;
++}
++
++static inline bool
++intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
++{
++ return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
+ }
+
+ static inline bool
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -917,7 +917,7 @@ void intel_engine_init_cmd_parser(struct
+ return;
+ }
+
+- engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER;
++ engine->flags |= I915_ENGINE_USING_CMD_PARSER;
+ }
+
+ /**
+@@ -929,7 +929,7 @@ void intel_engine_init_cmd_parser(struct
+ */
+ void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
+ {
+- if (!intel_engine_needs_cmd_parser(engine))
++ if (!intel_engine_using_cmd_parser(engine))
+ return;
+
+ fini_hash_table(engine);
+@@ -1317,7 +1317,7 @@ int i915_cmd_parser_get_version(struct d
+
+ /* If the command parser is not enabled, report 0 - unsupported */
+ for_each_engine(engine, dev_priv, id) {
+- if (intel_engine_needs_cmd_parser(engine)) {
++ if (intel_engine_using_cmd_parser(engine)) {
+ active = true;
+ break;
+ }
diff --git a/debian/patches/bugfix/x86/i915/0005-drm-i915-Support-ro-ppgtt-mapped-cmdparser-shadow-bu.patch b/debian/patches/bugfix/x86/i915/0005-drm-i915-Support-ro-ppgtt-mapped-cmdparser-shadow-bu.patch
new file mode 100644
index 000000000000..c094dc8bd667
--- /dev/null
+++ b/debian/patches/bugfix/x86/i915/0005-drm-i915-Support-ro-ppgtt-mapped-cmdparser-shadow-bu.patch
@@ -0,0 +1,199 @@
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Tue, 22 May 2018 13:59:06 -0700
+Subject: drm/i915: Support ro ppgtt mapped cmdparser shadow buffers
+Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-0155
+
+commit 4f7af1948abcb18b4772fe1bcd84d7d27d96258c upstream.
+
+For Gen7, the original cmdparser motive was to permit limited
+use of register read/write instructions in unprivileged BB's.
+This worked by copying the user supplied bb to a kmd owned
+bb, and running it in secure mode, from the ggtt, only if
+the scanner finds no unsafe commands or registers.
+
+For Gen8+ we can't use this same technique because running bb's
+from the ggtt also disables access to ppgtt space. But we also
+do not actually require 'secure' execution since we are only
+trying to reduce the available command/register set. Instead we
+will copy the user buffer to a kmd owned read-only bb in ppgtt,
+and run in the usual non-secure mode.
+
+Note that ro pages are only supported by ppgtt (not ggtt), but
+luckily that's exactly what we need.
+
+Add the required paths to map the shadow buffer to ppgtt ro for Gen8+
+
+v2: IS_GEN7/IS_GEN (Mika)
+v3: rebase
+v4: rebase
+v5: rebase
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+---
+ .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 58 +++++++++++++------
+ drivers/gpu/drm/i915/i915_drv.h | 14 +++++
+ drivers/gpu/drm/i915/i915_gem.c | 16 ++++-
+ 3 files changed, 69 insertions(+), 19 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -2010,6 +2010,34 @@ static int i915_reset_gen7_sol_offsets(s
+ return 0;
+ }
+
++static struct i915_vma *
++shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
++{
++ struct drm_i915_private *dev_priv = eb->i915;
++ struct i915_vma * const vma = *eb->vma;
++ struct i915_address_space *vm;
++ u64 flags;
++
++ /*
++ * PPGTT backed shadow buffers must be mapped RO, to prevent
++ * post-scan tampering
++ */
++ if (CMDPARSER_USES_GGTT(dev_priv)) {
++ flags = PIN_GLOBAL;
++ vm = &dev_priv->ggtt.vm;
++ eb->batch_flags |= I915_DISPATCH_SECURE;
++ } else if (vma->vm->has_read_only) {
++ flags = PIN_USER;
++ vm = vma->vm;
++ i915_gem_object_set_readonly(obj);
++ } else {
++ DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
++ return ERR_PTR(-EINVAL);
++ }
++
++ return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
++}
++
+ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
+ {
+ struct drm_i915_gem_object *shadow_batch_obj;
+@@ -2027,14 +2055,21 @@ static struct i915_vma *eb_parse(struct
+ eb->batch_start_offset,
+ eb->batch_len);
+ if (err) {
+- if (err == -EACCES) /* unhandled chained batch */
++ /*
++ * Unsafe GGTT-backed buffers can still be submitted safely
++ * as non-secure.
++ * For PPGTT backing however, we have no choice but to forcibly
++ * reject unsafe buffers
++ */
++ if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES))
++ /* Execute original buffer non-secure */
+ vma = NULL;
+ else
+ vma = ERR_PTR(err);
+ goto out;
+ }
+
+- vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
++ vma = shadow_batch_pin(eb, shadow_batch_obj);
+ if (IS_ERR(vma))
+ goto out;
+
+@@ -2043,7 +2078,9 @@ static struct i915_vma *eb_parse(struct
+ __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
+ vma->exec_flags = &eb->flags[eb->buffer_count];
+ eb->buffer_count++;
+-
++ eb->batch_start_offset = 0;
++ eb->batch = vma;
++ /* eb->batch_len unchanged */
+ out:
+ i915_gem_object_unpin_pages(shadow_batch_obj);
+ return vma;
+@@ -2489,21 +2526,6 @@ i915_gem_do_execbuffer(struct drm_device
+ err = PTR_ERR(vma);
+ goto err_vma;
+ }
+-
+- if (vma) {
+- /*
+- * Batch parsed and accepted:
+- *
+- * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+- * bit from MI_BATCH_BUFFER_START commands issued in
+- * the dispatch_execbuffer implementations. We
+- * specifically don't want that set on batches the
+- * command parser has accepted.
+- */
+- eb.batch_flags |= I915_DISPATCH_SECURE;
+- eb.batch_start_offset = 0;
+- eb.batch = vma;
+- }
+ }
+
+ if (eb.batch_len == 0)
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -2246,6 +2246,12 @@ IS_SUBPLATFORM(const struct drm_i915_pri
+ #define VEBOX_MASK(dev_priv) \
+ ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
+
++/*
++ * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
++ * All later gens can run the final buffer from the ppgtt
++ */
++#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
++
+ #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
+ #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
+ #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
+@@ -2529,6 +2535,14 @@ i915_gem_object_ggtt_pin(struct drm_i915
+
+ int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+
++struct i915_vma * __must_check
++i915_gem_object_pin(struct drm_i915_gem_object *obj,
++ struct i915_address_space *vm,
++ const struct i915_ggtt_view *view,
++ u64 size,
++ u64 alignment,
++ u64 flags);
++
+ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
+
+ static inline int __must_check
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1025,6 +1025,20 @@ i915_gem_object_ggtt_pin(struct drm_i915
+ {
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_address_space *vm = &dev_priv->ggtt.vm;
++
++ return i915_gem_object_pin(obj, vm, view, size, alignment,
++ flags | PIN_GLOBAL);
++}
++
++struct i915_vma *
++i915_gem_object_pin(struct drm_i915_gem_object *obj,
++ struct i915_address_space *vm,
++ const struct i915_ggtt_view *view,
++ u64 size,
++ u64 alignment,
++ u64 flags)
++{
++ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_vma *vma;
+ int ret;
+
+@@ -1091,7 +1105,7 @@ i915_gem_object_ggtt_pin(struct drm_i915
+ return ERR_PTR(ret);
+ }
+
+- ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
++ ret = i915_vma_pin(vma, size, alignment, flags);
+ if (ret)
+ return ERR_PTR(ret);
+
diff --git a/debian/patches/bugfix/x86/i915/0006-drm-i915-Allow-parsing-of-unsized-batches.patch b/debian/patches/bugfix/x86/i915/0006-drm-i915-Allow-parsing-of-unsized-batches.patch
new file mode 100644
index 000000000000..0b5018f05e88
--- /dev/null
+++ b/debian/patches/bugfix/x86/i915/0006-drm-i915-Allow-parsing-of-unsized-batches.patch
@@ -0,0 +1,57 @@
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Wed, 1 Aug 2018 09:45:50 -0700
+Subject: drm/i915: Allow parsing of unsized batches
+Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-0155
+
+commit 435e8fc059dbe0eec823a75c22da2972390ba9e0 upstream.
+
+In "drm/i915: Add support for mandatory cmdparsing" we introduced the
+concept of mandatory parsing. This allows the cmdparser to be invoked
+even when user passes batch_len=0 to the execbuf ioctl's.
+
+However, the cmdparser needs to know the extents of the buffer being
+scanned. Refactor the code to ensure the cmdparser uses the actual
+object size, instead of the incoming length, if user passes 0.
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+---
+ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -296,7 +296,8 @@ static inline u64 gen8_noncanonical_addr
+ static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
+ {
+ return intel_engine_requires_cmd_parser(eb->engine) ||
+- (intel_engine_using_cmd_parser(eb->engine) && eb->batch_len);
++ (intel_engine_using_cmd_parser(eb->engine) &&
++ eb->args->batch_len);
+ }
+
+ static int eb_create(struct i915_execbuffer *eb)
+@@ -2518,6 +2519,9 @@ i915_gem_do_execbuffer(struct drm_device
+ goto err_vma;
+ }
+
++ if (eb.batch_len == 0)
++ eb.batch_len = eb.batch->size - eb.batch_start_offset;
++
+ if (eb_use_cmdparser(&eb)) {
+ struct i915_vma *vma;
+
+@@ -2528,9 +2532,6 @@ i915_gem_do_execbuffer(struct drm_device
+ }
+ }
+
+- if (eb.batch_len == 0)
+- eb.batch_len = eb.batch->size - eb.batch_start_offset;
+-
+ /*
+ * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+ * batch" bit. Hence we need to pin secure batches into the global gtt.
diff --git a/debian/patches/bugfix/x86/i915/0007-drm-i915-Add-gen9-BCS-cmdparsing.patch b/debian/patches/bugfix/x86/i915/0007-drm-i915-Add-gen9-BCS-cmdparsing.patch
new file mode 100644
index 000000000000..4685eadec38e
--- /dev/null
+++ b/debian/patches/bugfix/x86/i915/0007-drm-i915-Add-gen9-BCS-cmdparsing.patch
@@ -0,0 +1,248 @@
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Mon, 23 Apr 2018 11:12:15 -0700
+Subject: drm/i915: Add gen9 BCS cmdparsing
+Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-0155
+
+commit 0f2f39758341df70202ae1c42d5a1e4ee392b6d3 upstream.
+
+For gen9 we enable cmdparsing on the BCS ring, specifically
+to catch inadvertent accesses to sensitive registers
+
+Unlike gen7/hsw, we use the parser only to block certain
+registers. We can rely on h/w to block restricted commands,
+so the command tables only provide enough info to allow the
+parser to delineate each command, and identify commands that
+access registers.
+
+Note: This patch deliberately ignores checkpatch issues in
+favour of matching the style of the surrounding code. We'll
+correct the entire file in one go in a later patch.
+
+v3: rebase (Mika)
+v4: Add RING_TIMESTAMP registers to whitelist (Jon)
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+---
+ drivers/gpu/drm/i915/i915_cmd_parser.c | 116 ++++++++++++++++++++++---
+ drivers/gpu/drm/i915/i915_reg.h | 4 +
+ 2 files changed, 110 insertions(+), 10 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -443,6 +443,47 @@ static const struct drm_i915_cmd_descrip
+ CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
+ };
+
++/*
++ * For Gen9 we can still rely on the h/w to enforce cmd security, and only
++ * need to re-enforce the register access checks. We therefore only need to
++ * teach the cmdparser how to find the end of each command, and identify
++ * register accesses. The table doesn't need to reject any commands, and so
++ * the only commands listed here are:
++ * 1) Those that touch registers
++ * 2) Those that do not have the default 8-bit length
++ *
++ * Note that the default MI length mask chosen for this table is 0xFF, not
++ * the 0x3F used on older devices. This is because the vast majority of MI
++ * cmds on Gen9 use a standard 8-bit Length field.
++ * All the Gen9 blitter instructions are standard 0xFF length mask, and
++ * none allow access to non-general registers, so in fact no BLT cmds are
++ * included in the table at all.
++ *
++ */
++static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
++ CMD( MI_NOOP, SMI, F, 1, S ),
++ CMD( MI_USER_INTERRUPT, SMI, F, 1, S ),
++ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ),
++ CMD( MI_FLUSH, SMI, F, 1, S ),
++ CMD( MI_ARB_CHECK, SMI, F, 1, S ),
++ CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
++ CMD( MI_ARB_ON_OFF, SMI, F, 1, S ),
++ CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
++ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ),
++ CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ),
++ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ),
++ CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
++ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
++ CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ),
++ CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W,
++ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
++ CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ),
++ CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W,
++ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
++ CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
++ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
++};
++
+ static const struct drm_i915_cmd_descriptor noop_desc =
+ CMD(MI_NOOP, SMI, F, 1, S);
+
+@@ -489,6 +530,11 @@ static const struct drm_i915_cmd_table h
+ { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
+ };
+
++static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
++ { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
++};
++
++
+ /*
+ * Register whitelists, sorted by increasing register offset.
+ */
+@@ -604,6 +650,29 @@ static const struct drm_i915_reg_descrip
+ REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
+ };
+
++static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
++ REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
++ REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
++ REG32(BCS_SWCTRL),
++ REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
++ REG64_IDX(BCS_GPR, 0),
++ REG64_IDX(BCS_GPR, 1),
++ REG64_IDX(BCS_GPR, 2),
++ REG64_IDX(BCS_GPR, 3),
++ REG64_IDX(BCS_GPR, 4),
++ REG64_IDX(BCS_GPR, 5),
++ REG64_IDX(BCS_GPR, 6),
++ REG64_IDX(BCS_GPR, 7),
++ REG64_IDX(BCS_GPR, 8),
++ REG64_IDX(BCS_GPR, 9),
++ REG64_IDX(BCS_GPR, 10),
++ REG64_IDX(BCS_GPR, 11),
++ REG64_IDX(BCS_GPR, 12),
++ REG64_IDX(BCS_GPR, 13),
++ REG64_IDX(BCS_GPR, 14),
++ REG64_IDX(BCS_GPR, 15),
++};
++
+ #undef REG64
+ #undef REG32
+
+@@ -629,6 +698,10 @@ static const struct drm_i915_reg_table h
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
+ };
+
++static const struct drm_i915_reg_table gen9_blt_reg_tables[] = {
++ { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) },
++};
++
+ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
+ {
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
+@@ -684,6 +757,17 @@ static u32 gen7_blt_get_cmd_length_mask(
+ return 0;
+ }
+
++static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
++{
++ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
++
++ if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
++ return 0xFF;
++
++ DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
++ return 0;
++}
++
+ static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
+ const struct drm_i915_cmd_table *cmd_tables,
+ int cmd_table_count)
+@@ -841,7 +925,8 @@ void intel_engine_init_cmd_parser(struct
+ int cmd_table_count;
+ int ret;
+
+- if (!IS_GEN(engine->i915, 7))
++ if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
++ engine->class == COPY_ENGINE_CLASS))
+ return;
+
+ switch (engine->class) {
+@@ -862,7 +947,6 @@ void intel_engine_init_cmd_parser(struct
+ engine->reg_tables = ivb_render_reg_tables;
+ engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
+ }
+-
+ engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
+ break;
+ case VIDEO_DECODE_CLASS:
+@@ -871,7 +955,16 @@ void intel_engine_init_cmd_parser(struct
+ engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+ break;
+ case COPY_ENGINE_CLASS:
+- if (IS_HASWELL(engine->i915)) {
++ engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
++ if (IS_GEN(engine->i915, 9)) {
++ cmd_tables = gen9_blt_cmd_table;
++ cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
++ engine->get_cmd_length_mask =
++ gen9_blt_get_cmd_length_mask;
++
++ /* BCS Engine unsafe without parser */
++ engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER;
++ } else if (IS_HASWELL(engine->i915)) {
+ cmd_tables = hsw_blt_ring_cmd_table;
+ cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
+ } else {
+@@ -879,15 +972,17 @@ void intel_engine_init_cmd_parser(struct
+ cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
+ }
+
+- if (IS_HASWELL(engine->i915)) {
++ if (IS_GEN(engine->i915, 9)) {
++ engine->reg_tables = gen9_blt_reg_tables;
++ engine->reg_table_count =
++ ARRAY_SIZE(gen9_blt_reg_tables);
++ } else if (IS_HASWELL(engine->i915)) {
+ engine->reg_tables = hsw_blt_reg_tables;
+ engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
+ } else {
+ engine->reg_tables = ivb_blt_reg_tables;
+ engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
+ }
+-
+- engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+ break;
+ case VIDEO_ENHANCEMENT_CLASS:
+ cmd_tables = hsw_vebox_cmd_table;
+@@ -1260,9 +1355,9 @@ int intel_engine_cmd_parser(struct intel
+ }
+
+ /*
+- * If the batch buffer contains a chained batch, return an
+- * error that tells the caller to abort and dispatch the
+- * workload as a non-secure batch.
++ * We don't try to handle BATCH_BUFFER_START because it adds
++ * non-trivial complexity. Instead we abort the scan and return
++ * and error to indicate that the batch is unsafe.
+ */
+ if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+ ret = -EACCES;
+@@ -1342,6 +1437,7 @@ int i915_cmd_parser_get_version(struct d
+ * the parser enabled.
+ * 9. Don't whitelist or handle oacontrol specially, as ownership
+ * for oacontrol state is moving to i915-perf.
++ * 10. Support for Gen9 BCS Parsing
+ */
+- return 9;
++ return 10;
+ }
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -577,6 +577,10 @@ static inline bool i915_mmio_reg_valid(i
+ */
+ #define BCS_SWCTRL _MMIO(0x22200)
+
++/* There are 16 GPR registers */
++#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8)
++#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4)
++
+ #define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
+ #define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
+ #define HS_INVOCATION_COUNT _MMIO(0x2300)
diff --git a/debian/patches/bugfix/x86/i915/0008-drm-i915-cmdparser-Use-explicit-goto-for-error-paths.patch b/debian/patches/bugfix/x86/i915/0008-drm-i915-cmdparser-Use-explicit-goto-for-error-paths.patch
new file mode 100644
index 000000000000..9677432ba196
--- /dev/null
+++ b/debian/patches/bugfix/x86/i915/0008-drm-i915-cmdparser-Use-explicit-goto-for-error-paths.patch
@@ -0,0 +1,94 @@
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Thu, 27 Sep 2018 10:23:17 -0700
+Subject: drm/i915/cmdparser: Use explicit goto for error paths
+Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-0155
+
+commit 0546a29cd884fb8184731c79ab008927ca8859d0 upstream.
+
+In the next patch we will be adding a second valid
+termination condition which will require a small
+amount of refactoring to share logic with the BB_END
+case.
+
+Refactor all error conditions to jump to a dedicated
+exit path, with 'break' reserved only for a successful
+parse.
+
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+---
+ drivers/gpu/drm/i915/i915_cmd_parser.c | 25 +++++++++++++------------
+ 1 file changed, 13 insertions(+), 12 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -1337,21 +1337,15 @@ int intel_engine_cmd_parser(struct intel
+ do {
+ u32 length;
+
+- if (*cmd == MI_BATCH_BUFFER_END) {
+- if (needs_clflush_after) {
+- void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
+- drm_clflush_virt_range(ptr,
+- (void *)(cmd + 1) - ptr);
+- }
++ if (*cmd == MI_BATCH_BUFFER_END)
+ break;
+- }
+
+ desc = find_cmd(engine, *cmd, desc, &default_desc);
+ if (!desc) {
+ DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
+ *cmd);
+ ret = -EINVAL;
+- break;
++ goto err;
+ }
+
+ /*
+@@ -1361,7 +1355,7 @@ int intel_engine_cmd_parser(struct intel
+ */
+ if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+ ret = -EACCES;
+- break;
++ goto err;
+ }
+
+ if (desc->flags & CMD_DESC_FIXED)
+@@ -1375,22 +1369,29 @@ int intel_engine_cmd_parser(struct intel
+ length,
+ batch_end - cmd);
+ ret = -EINVAL;
+- break;
++ goto err;
+ }
+
+ if (!check_cmd(engine, desc, cmd, length)) {
+ ret = -EACCES;
+- break;
++ goto err;
+ }
+
+ cmd += length;
+ if (cmd >= batch_end) {
+ DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
+ ret = -EINVAL;
+- break;
++ goto err;
+ }
+ } while (1);
+
++ if (needs_clflush_after) {
++ void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
++
++ drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
++ }
++
++err:
+ i915_gem_object_unpin_map(shadow_batch_obj);
+ return ret;
+ }
diff --git a/debian/patches/bugfix/x86/i915/0009-drm-i915-cmdparser-Add-support-for-backward-jumps.patch b/debian/patches/bugfix/x86/i915/0009-drm-i915-cmdparser-Add-support-for-backward-jumps.patch
new file mode 100644
index 000000000000..58b1957ee02b
--- /dev/null
+++ b/debian/patches/bugfix/x86/i915/0009-drm-i915-cmdparser-Add-support-for-backward-jumps.patch
@@ -0,0 +1,398 @@
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Thu, 20 Sep 2018 09:58:36 -0700
+Subject: drm/i915/cmdparser: Add support for backward jumps
+Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-0155
+
+commit f8c08d8faee5567803c8c533865296ca30286bbf upstream.
+
+To keep things manageable, the pre-gen9 cmdparser does not
+attempt to track any form of nested BB_START's. This did not
+prevent usermode from using nested starts, or even chained
+batches because the cmdparser is not strictly enforced pre gen9.
+
+Instead, the existence of a nested BB_START would cause the batch
+to be emitted in insecure mode, and any privileged capabilities
+would not be available.
+
+For Gen9, the cmdparser becomes mandatory (for BCS at least), and
+so not providing any form of nested BB_START support becomes
+overly restrictive. Any such batch will simply not run.
+
+We make heavy use of backward jumps in igt, and it is much easier
+to add support for this restricted subset of nested jumps, than to
+rewrite the whole of our test suite to avoid them.
+
+Add the required logic to support limited backward jumps, to
+instructions that have already been validated by the parser.
+
+Note that it's not sufficient to simply approve any BB_START
+that jumps backwards in the buffer because this would allow an
+attacker to embed a rogue instruction sequence within the
+operand words of a harmless instruction (say LRI) and jump to
+that.
+
+We introduce a bit array to track every instr offset successfully
+validated, and test the target of BB_START against this. If the
+target offset hits, it is re-written to the same offset in the
+shadow buffer and the BB_START cmd is allowed.
+
+Note: This patch deliberately ignores checkpatch issues in the
+cmdtables, in order to match the style of the surrounding code.
+We'll correct the entire file in one go in a later patch.
+
+v2: set dispatch secure late (Mika)
+v3: rebase (Mika)
+v4: Clear whitelist on each parse
+Minor review updates (Chris)
+v5: Correct backward jump batching
+v6: fix compilation error due to struct eb shuffle (Mika)
+
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+---
+ drivers/gpu/drm/i915/gem/i915_gem_context.c | 5 +
+ .../gpu/drm/i915/gem/i915_gem_context_types.h | 5 +
+ .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 32 +++-
+ drivers/gpu/drm/i915/i915_cmd_parser.c | 151 ++++++++++++++++--
+ drivers/gpu/drm/i915/i915_drv.h | 9 +-
+ 5 files changed, 176 insertions(+), 26 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -315,6 +315,8 @@ static void i915_gem_context_free(struct
+ free_engines(rcu_access_pointer(ctx->engines));
+ mutex_destroy(&ctx->engines_mutex);
+
++ kfree(ctx->jump_whitelist);
++
+ if (ctx->timeline)
+ i915_timeline_put(ctx->timeline);
+
+@@ -465,6 +467,9 @@ __create_context(struct drm_i915_private
+ for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
+ ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
+
++ ctx->jump_whitelist = NULL;
++ ctx->jump_whitelist_cmds = 0;
++
+ return ctx;
+
+ err_free:
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+@@ -197,6 +197,11 @@ struct i915_gem_context {
+ * per vm, which may be one per context or shared with the global GTT)
+ */
+ struct radix_tree_root handles_vma;
++
++ /** jump_whitelist: Bit array for tracking cmds during cmdparsing */
++ unsigned long *jump_whitelist;
++ /** jump_whitelist_cmds: No of cmd slots available */
++ u32 jump_whitelist_cmds;
+ };
+
+ #endif /* __I915_GEM_CONTEXT_TYPES_H__ */
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -2026,7 +2026,6 @@ shadow_batch_pin(struct i915_execbuffer
+ if (CMDPARSER_USES_GGTT(dev_priv)) {
+ flags = PIN_GLOBAL;
+ vm = &dev_priv->ggtt.vm;
+- eb->batch_flags |= I915_DISPATCH_SECURE;
+ } else if (vma->vm->has_read_only) {
+ flags = PIN_USER;
+ vm = vma->vm;
+@@ -2043,6 +2042,8 @@ static struct i915_vma *eb_parse(struct
+ {
+ struct drm_i915_gem_object *shadow_batch_obj;
+ struct i915_vma *vma;
++ u64 batch_start;
++ u64 shadow_batch_start;
+ int err;
+
+ shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
+@@ -2050,12 +2051,27 @@ static struct i915_vma *eb_parse(struct
+ if (IS_ERR(shadow_batch_obj))
+ return ERR_CAST(shadow_batch_obj);
+
+- err = intel_engine_cmd_parser(eb->engine,
++ vma = shadow_batch_pin(eb, shadow_batch_obj);
++ if (IS_ERR(vma))
++ goto out;
++
++ batch_start = gen8_canonical_addr(eb->batch->node.start) +
++ eb->batch_start_offset;
++
++ shadow_batch_start = gen8_canonical_addr(vma->node.start);
++
++ err = intel_engine_cmd_parser(eb->gem_context,
++ eb->engine,
+ eb->batch->obj,
+- shadow_batch_obj,
++ batch_start,
+ eb->batch_start_offset,
+- eb->batch_len);
++ eb->batch_len,
++ shadow_batch_obj,
++ shadow_batch_start);
++
+ if (err) {
++ i915_vma_unpin(vma);
++
+ /*
+ * Unsafe GGTT-backed buffers can still be submitted safely
+ * as non-secure.
+@@ -2070,10 +2086,6 @@ static struct i915_vma *eb_parse(struct
+ goto out;
+ }
+
+- vma = shadow_batch_pin(eb, shadow_batch_obj);
+- if (IS_ERR(vma))
+- goto out;
+-
+ eb->vma[eb->buffer_count] = i915_vma_get(vma);
+ eb->flags[eb->buffer_count] =
+ __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
+@@ -2081,6 +2093,10 @@ static struct i915_vma *eb_parse(struct
+ eb->buffer_count++;
+ eb->batch_start_offset = 0;
+ eb->batch = vma;
++
++ if (CMDPARSER_USES_GGTT(eb->i915))
++ eb->batch_flags |= I915_DISPATCH_SECURE;
++
+ /* eb->batch_len unchanged */
+ out:
+ i915_gem_object_unpin_pages(shadow_batch_obj);
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -482,6 +482,19 @@ static const struct drm_i915_cmd_descrip
+ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
++
++ /*
++ * We allow BB_START but apply further checks. We just sanitize the
++ * basic fields here.
++ */
++#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
++#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
++ CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
++ .bits = {{
++ .offset = 0,
++ .mask = MI_BB_START_OPERAND_MASK,
++ .expected = MI_BB_START_OPERAND_EXPECT,
++ }}, ),
+ };
+
+ static const struct drm_i915_cmd_descriptor noop_desc =
+@@ -1292,15 +1305,113 @@ static bool check_cmd(const struct intel
+ return true;
+ }
+
++static int check_bbstart(const struct i915_gem_context *ctx,
++ u32 *cmd, u32 offset, u32 length,
++ u32 batch_len,
++ u64 batch_start,
++ u64 shadow_batch_start)
++{
++ u64 jump_offset, jump_target;
++ u32 target_cmd_offset, target_cmd_index;
++
++ /* For igt compatibility on older platforms */
++ if (CMDPARSER_USES_GGTT(ctx->i915)) {
++ DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
++ return -EACCES;
++ }
++
++ if (length != 3) {
++ DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
++ length);
++ return -EINVAL;
++ }
++
++ jump_target = *(u64*)(cmd+1);
++ jump_offset = jump_target - batch_start;
++
++ /*
++ * Any underflow of jump_target is guaranteed to be outside the range
++ * of a u32, so >= test catches both too large and too small
++ */
++ if (jump_offset >= batch_len) {
++ DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
++ jump_target);
++ return -EINVAL;
++ }
++
++ /*
++ * This cannot overflow a u32 because we already checked jump_offset
++ * is within the BB, and the batch_len is a u32
++ */
++ target_cmd_offset = lower_32_bits(jump_offset);
++ target_cmd_index = target_cmd_offset / sizeof(u32);
++
++ *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
++
++ if (target_cmd_index == offset)
++ return 0;
++
++ if (ctx->jump_whitelist_cmds <= target_cmd_index) {
++ DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
++ return -EINVAL;
++ } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
++ DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
++ jump_target);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
++{
++ const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
++ const u32 exact_size = BITS_TO_LONGS(batch_cmds);
++ u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
++ unsigned long *next_whitelist;
++
++ if (CMDPARSER_USES_GGTT(ctx->i915))
++ return;
++
++ if (batch_cmds <= ctx->jump_whitelist_cmds) {
++ memset(ctx->jump_whitelist, 0, exact_size * sizeof(u32));
++ return;
++ }
++
++again:
++ next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
++ if (next_whitelist) {
++ kfree(ctx->jump_whitelist);
++ ctx->jump_whitelist = next_whitelist;
++ ctx->jump_whitelist_cmds =
++ next_size * BITS_PER_BYTE * sizeof(long);
++ return;
++ }
++
++ if (next_size > exact_size) {
++ next_size = exact_size;
++ goto again;
++ }
++
++ DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
++ memset(ctx->jump_whitelist, 0,
++ BITS_TO_LONGS(ctx->jump_whitelist_cmds) * sizeof(u32));
++
++ return;
++}
++
+ #define LENGTH_BIAS 2
+
+ /**
+ * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
++ * @ctx: the context in which the batch is to execute
+ * @engine: the engine on which the batch is to execute
+ * @batch_obj: the batch buffer in question
+- * @shadow_batch_obj: copy of the batch buffer in question
++ * @batch_start: Canonical base address of batch
+ * @batch_start_offset: byte offset in the batch at which execution starts
+ * @batch_len: length of the commands in batch_obj
++ * @shadow_batch_obj: copy of the batch buffer in question
++ * @shadow_batch_start: Canonical base address of shadow_batch_obj
+ *
+ * Parses the specified batch buffer looking for privilege violations as
+ * described in the overview.
+@@ -1308,13 +1419,17 @@ static bool check_cmd(const struct intel
+ * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
+ * if the batch appears legal but should use hardware parsing
+ */
+-int intel_engine_cmd_parser(struct intel_engine_cs *engine,
++
++int intel_engine_cmd_parser(struct i915_gem_context *ctx,
++ struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *batch_obj,
+- struct drm_i915_gem_object *shadow_batch_obj,
++ u64 batch_start,
+ u32 batch_start_offset,
+- u32 batch_len)
++ u32 batch_len,
++ struct drm_i915_gem_object *shadow_batch_obj,
++ u64 shadow_batch_start)
+ {
+- u32 *cmd, *batch_end;
++ u32 *cmd, *batch_end, offset = 0;
+ struct drm_i915_cmd_descriptor default_desc = noop_desc;
+ const struct drm_i915_cmd_descriptor *desc = &default_desc;
+ bool needs_clflush_after = false;
+@@ -1328,6 +1443,8 @@ int intel_engine_cmd_parser(struct intel
+ return PTR_ERR(cmd);
+ }
+
++ init_whitelist(ctx, batch_len);
++
+ /*
+ * We use the batch length as size because the shadow object is as
+ * large or larger and copy_batch() will write MI_NOPs to the extra
+@@ -1348,16 +1465,6 @@ int intel_engine_cmd_parser(struct intel
+ goto err;
+ }
+
+- /*
+- * We don't try to handle BATCH_BUFFER_START because it adds
+- * non-trivial complexity. Instead we abort the scan and return
+- * and error to indicate that the batch is unsafe.
+- */
+- if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+- ret = -EACCES;
+- goto err;
+- }
+-
+ if (desc->flags & CMD_DESC_FIXED)
+ length = desc->length.fixed;
+ else
+@@ -1377,7 +1484,21 @@ int intel_engine_cmd_parser(struct intel
+ goto err;
+ }
+
++ if (desc->cmd.value == MI_BATCH_BUFFER_START) {
++ ret = check_bbstart(ctx, cmd, offset, length,
++ batch_len, batch_start,
++ shadow_batch_start);
++
++ if (ret)
++ goto err;
++ break;
++ }
++
++ if (ctx->jump_whitelist_cmds > offset)
++ set_bit(offset, ctx->jump_whitelist);
++
+ cmd += length;
++ offset += length;
+ if (cmd >= batch_end) {
+ DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
+ ret = -EINVAL;
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -2727,11 +2727,14 @@ const char *i915_cache_level_str(struct
+ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
+ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
+ void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
+-int intel_engine_cmd_parser(struct intel_engine_cs *engine,
++int intel_engine_cmd_parser(struct i915_gem_context *cxt,
++ struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *batch_obj,
+- struct drm_i915_gem_object *shadow_batch_obj,
++ u64 user_batch_start,
+ u32 batch_start_offset,
+- u32 batch_len);
++ u32 batch_len,
++ struct drm_i915_gem_object *shadow_batch_obj,
++ u64 shadow_batch_start);
+
+ /* i915_perf.c */
+ extern void i915_perf_init(struct drm_i915_private *dev_priv);
diff --git a/debian/patches/bugfix/x86/i915/0010-drm-i915-cmdparser-Ignore-Length-operands-during-com.patch b/debian/patches/bugfix/x86/i915/0010-drm-i915-cmdparser-Ignore-Length-operands-during-com.patch
new file mode 100644
index 000000000000..1f23d5b1830b
--- /dev/null
+++ b/debian/patches/bugfix/x86/i915/0010-drm-i915-cmdparser-Ignore-Length-operands-during-com.patch
@@ -0,0 +1,37 @@
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Thu, 20 Sep 2018 09:45:10 -0700
+Subject: drm/i915/cmdparser: Ignore Length operands during command matching
+Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-0155
+
+commit 926abff21a8f29ef159a3ac893b05c6e50e043c3 upstream.
+
+Some of the gen instruction macros (e.g. MI_DISPLAY_FLIP) have the
+length directly encoded in them. Since these are used directly in
+the tables, the Length becomes part of the comparison used for
+matching during parsing. Thus, if the cmd being parsed has a
+different length to that in the table, it is not matched and the
+cmd is accepted via the default variable length path.
+
+Fix by masking out everything except the Opcode in the cmd tables
+
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+---
+ drivers/gpu/drm/i915/i915_cmd_parser.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -188,7 +188,7 @@ struct drm_i915_cmd_table {
+ #define CMD(op, opm, f, lm, fl, ...) \
+ { \
+ .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
+- .cmd = { (op), ~0u << (opm) }, \
++ .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \
+ .length = { (lm) }, \
+ __VA_ARGS__ \
+ }
diff --git a/debian/patches/bugfix/x86/i915/0011-drm-i915-Lower-RM-timeout-to-avoid-DSI-hard-hangs.patch b/debian/patches/bugfix/x86/i915/0011-drm-i915-Lower-RM-timeout-to-avoid-DSI-hard-hangs.patch
new file mode 100644
index 000000000000..5a6ff611b932
--- /dev/null
+++ b/debian/patches/bugfix/x86/i915/0011-drm-i915-Lower-RM-timeout-to-avoid-DSI-hard-hangs.patch
@@ -0,0 +1,72 @@
+From: Uma Shankar <uma.shankar@intel.com>
+Date: Tue, 7 Aug 2018 21:15:35 +0530
+Subject: drm/i915: Lower RM timeout to avoid DSI hard hangs
+Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-0154
+
+commit 1d85a299c4db57c55e0229615132c964d17aa765 upstream.
+
+In BXT/APL, device 2 MMIO reads from MIPI controller requires its PLL
+to be turned ON. When MIPI PLL is turned off (MIPI Display is not
+active or connected), and someone (host or GT engine) tries to read
+MIPI registers, it causes hard hang. This is a hardware restriction
+or limitation.
+
+Driver by itself doesn't read MIPI registers when MIPI display is off.
+But any userspace application can submit unprivileged batch buffer for
+execution. In that batch buffer there can be mmio reads. And these
+reads are allowed even for unprivileged applications. If these
+register reads are for MIPI DSI controller and MIPI display is not
+active during that time, then the MMIO read operation causes system
+hard hang and only way to recover is hard reboot. A genuine
+process/application won't submit batch buffer like this and doesn't
+cause any issue. But on a compromised system, a malign userspace
+process/app can generate such batch buffer and can trigger system
+hard hang (denial of service attack).
+
+The fix is to lower the internal MMIO timeout value to an optimum
+value of 950us as recommended by hardware team. If the timeout is
+beyond 1ms (which will hit for any value we choose if MMIO READ on a
+DSI specific register is performed without PLL ON), it causes the
+system hang. But if the timeout value is lower than it will be below
+the threshold (even if timeout happens) and system will not get into
+a hung state. This will avoid a system hang without losing any
+programming or GT interrupts, taking the worst case of lowest CDCLK
+frequency and early DC5 abort into account.
+
+Signed-off-by: Uma Shankar <uma.shankar@intel.com>
+Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
+---
+ drivers/gpu/drm/i915/i915_reg.h | 4 ++++
+ drivers/gpu/drm/i915/intel_pm.c | 8 ++++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -7233,6 +7233,10 @@ enum {
+ #define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C)
+ #define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038)
+
++/* Display Internal Timeout Register */
++#define RM_TIMEOUT _MMIO(0x42060)
++#define MMIO_TIMEOUT_US(us) ((us) << 0)
++
+ /* interrupts */
+ #define DE_MASTER_IRQ_CONTROL (1 << 31)
+ #define DE_SPRITEB_FLIP_DONE (1 << 29)
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -125,6 +125,14 @@ static void bxt_init_clock_gating(struct
+ */
+ I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ PWM1_GATING_DIS | PWM2_GATING_DIS);
++
++ /*
++ * Lower the display internal timeout.
++ * This is needed to avoid any hard hangs when DSI port PLL
++ * is off and a MMIO access is attempted by any privilege
++ * application, using batch buffers or any other means.
++ */
++ I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
+ }
+
+ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
diff --git a/debian/patches/bugfix/x86/i915/0012-drm-i915-gen8-Add-RC6-CTX-corruption-WA.patch b/debian/patches/bugfix/x86/i915/0012-drm-i915-gen8-Add-RC6-CTX-corruption-WA.patch
new file mode 100644
index 000000000000..23a95c984f3b
--- /dev/null
+++ b/debian/patches/bugfix/x86/i915/0012-drm-i915-gen8-Add-RC6-CTX-corruption-WA.patch
@@ -0,0 +1,284 @@
+From: Imre Deak <imre.deak@intel.com>
+Date: Mon, 9 Jul 2018 18:24:27 +0300
+Subject: drm/i915/gen8+: Add RC6 CTX corruption WA
+Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-0154
+
+commit 7e34f4e4aad3fd34c02b294a3cf2321adf5b4438 upstream.
+
+In some circumstances the RC6 context can get corrupted. We can detect
+this and take the required action, that is disable RC6 and runtime PM.
+The HW recovers from the corrupted state after a system suspend/resume
+cycle, so detect the recovery and re-enable RC6 and runtime PM.
+
+v2: rebase (Mika)
+v3:
+- Move intel_suspend_gt_powersave() to the end of the GEM suspend
+ sequence.
+- Add commit message.
+v4:
+- Rebased on intel_uncore_forcewake_put(i915->uncore, ...) API
+ change.
+v5: rebased on gem/gt split (Mika)
+
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+---
+ drivers/gpu/drm/i915/gt/intel_gt_pm.c | 8 ++
+ drivers/gpu/drm/i915/i915_drv.c | 4 +
+ drivers/gpu/drm/i915/i915_drv.h | 8 +-
+ drivers/gpu/drm/i915/i915_reg.h | 2 +
+ drivers/gpu/drm/i915/intel_pm.c | 107 +++++++++++++++++++++++++-
+ drivers/gpu/drm/i915/intel_pm.h | 3 +
+ 6 files changed, 128 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+@@ -36,6 +36,9 @@ static int intel_gt_unpark(struct intel_
+ i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+ GEM_BUG_ON(!i915->gt.awake);
+
++ if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
++ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
++
+ intel_enable_gt_powersave(i915);
+
+ i915_update_gfx_val(i915);
+@@ -70,6 +73,11 @@ static int intel_gt_park(struct intel_wa
+ if (INTEL_GEN(i915) >= 6)
+ gen6_rps_idle(i915);
+
++ if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) {
++ intel_rc6_ctx_wa_check(i915);
++ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
++ }
++
+ GEM_BUG_ON(!wakeref);
+ intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -2156,6 +2156,8 @@ static int i915_drm_suspend_late(struct
+
+ i915_gem_suspend_late(dev_priv);
+
++ intel_rc6_ctx_wa_suspend(dev_priv);
++
+ intel_uncore_suspend(&dev_priv->uncore);
+
+ intel_power_domains_suspend(dev_priv,
+@@ -2372,6 +2374,8 @@ static int i915_drm_resume_early(struct
+
+ intel_power_domains_resume(dev_priv);
+
++ intel_rc6_ctx_wa_resume(dev_priv);
++
+ intel_gt_sanitize(dev_priv, true);
+
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -696,6 +696,8 @@ struct intel_rps {
+
+ struct intel_rc6 {
+ bool enabled;
++ bool ctx_corrupted;
++ intel_wakeref_t ctx_corrupted_wakeref;
+ u64 prev_hw_residency[4];
+ u64 cur_residency[4];
+ };
+@@ -2288,10 +2290,12 @@ IS_SUBPLATFORM(const struct drm_i915_pri
+ /* Early gen2 have a totally busted CS tlb and require pinned batches. */
+ #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
+
++#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
++ (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
++
+ /* WaRsDisableCoarsePowerGating:skl,cnl */
+ #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
+- (IS_CANNONLAKE(dev_priv) || \
+- IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
++ (IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9))
+
+ #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
+ #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -493,6 +493,8 @@ static inline bool i915_mmio_reg_valid(i
+ #define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
+ #define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
+
++#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
++
+ #define GAC_ECO_BITS _MMIO(0x14090)
+ #define ECOBITS_SNB_BIT (1 << 13)
+ #define ECOBITS_PPGTT_CACHE64B (3 << 8)
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -8564,6 +8564,95 @@ static void intel_init_emon(struct drm_i
+ dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
+ }
+
++static bool intel_rc6_ctx_corrupted(struct drm_i915_private *dev_priv)
++{
++ return !I915_READ(GEN8_RC6_CTX_INFO);
++}
++
++static void intel_rc6_ctx_wa_init(struct drm_i915_private *i915)
++{
++ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
++ return;
++
++ if (intel_rc6_ctx_corrupted(i915)) {
++ DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
++ i915->gt_pm.rc6.ctx_corrupted = true;
++ i915->gt_pm.rc6.ctx_corrupted_wakeref =
++ intel_runtime_pm_get(&i915->runtime_pm);
++ }
++}
++
++static void intel_rc6_ctx_wa_cleanup(struct drm_i915_private *i915)
++{
++ if (i915->gt_pm.rc6.ctx_corrupted) {
++ intel_runtime_pm_put(&i915->runtime_pm,
++ i915->gt_pm.rc6.ctx_corrupted_wakeref);
++ i915->gt_pm.rc6.ctx_corrupted = false;
++ }
++}
++
++/**
++ * intel_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA
++ * @i915: i915 device
++ *
++ * Perform any steps needed to clean up the RC6 CTX WA before system suspend.
++ */
++void intel_rc6_ctx_wa_suspend(struct drm_i915_private *i915)
++{
++ if (i915->gt_pm.rc6.ctx_corrupted)
++ intel_runtime_pm_put(&i915->runtime_pm,
++ i915->gt_pm.rc6.ctx_corrupted_wakeref);
++}
++
++/**
++ * intel_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
++ * @i915: i915 device
++ *
++ * Perform any steps needed to re-init the RC6 CTX WA after system resume.
++ */
++void intel_rc6_ctx_wa_resume(struct drm_i915_private *i915)
++{
++ if (!i915->gt_pm.rc6.ctx_corrupted)
++ return;
++
++ if (intel_rc6_ctx_corrupted(i915)) {
++ i915->gt_pm.rc6.ctx_corrupted_wakeref =
++ intel_runtime_pm_get(&i915->runtime_pm);
++ return;
++ }
++
++ DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
++ i915->gt_pm.rc6.ctx_corrupted = false;
++}
++
++static void intel_disable_rc6(struct drm_i915_private *dev_priv);
++
++/**
++ * intel_rc6_ctx_wa_check - check for a new RC6 CTX corruption
++ * @i915: i915 device
++ *
++ * Check if an RC6 CTX corruption has happened since the last check and if so
++ * disable RC6 and runtime power management.
++*/
++void intel_rc6_ctx_wa_check(struct drm_i915_private *i915)
++{
++ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
++ return;
++
++ if (i915->gt_pm.rc6.ctx_corrupted)
++ return;
++
++ if (!intel_rc6_ctx_corrupted(i915))
++ return;
++
++ DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
++
++ intel_disable_rc6(i915);
++ i915->gt_pm.rc6.ctx_corrupted = true;
++ i915->gt_pm.rc6.ctx_corrupted_wakeref =
++ intel_runtime_pm_get_noresume(&i915->runtime_pm);
++}
++
+ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
+ {
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+@@ -8577,6 +8666,8 @@ void intel_init_gt_powersave(struct drm_
+ pm_runtime_get(&dev_priv->drm.pdev->dev);
+ }
+
++ intel_rc6_ctx_wa_init(dev_priv);
++
+ /* Initialize RPS limits (for userspace) */
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_init_gt_powersave(dev_priv);
+@@ -8615,6 +8706,8 @@ void intel_cleanup_gt_powersave(struct d
+ if (IS_VALLEYVIEW(dev_priv))
+ valleyview_cleanup_gt_powersave(dev_priv);
+
++ intel_rc6_ctx_wa_cleanup(dev_priv);
++
+ if (!HAS_RC6(dev_priv))
+ pm_runtime_put(&dev_priv->drm.pdev->dev);
+ }
+@@ -8643,7 +8736,7 @@ static inline void intel_disable_llc_pst
+ i915->gt_pm.llc_pstate.enabled = false;
+ }
+
+-static void intel_disable_rc6(struct drm_i915_private *dev_priv)
++static void __intel_disable_rc6(struct drm_i915_private *dev_priv)
+ {
+ lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
+
+@@ -8662,6 +8755,13 @@ static void intel_disable_rc6(struct drm
+ dev_priv->gt_pm.rc6.enabled = false;
+ }
+
++static void intel_disable_rc6(struct drm_i915_private *dev_priv)
++{
++ mutex_lock(&dev_priv->gt_pm.rps.lock);
++ __intel_disable_rc6(dev_priv);
++ mutex_unlock(&dev_priv->gt_pm.rps.lock);
++}
++
+ static void intel_disable_rps(struct drm_i915_private *dev_priv)
+ {
+ lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
+@@ -8687,7 +8787,7 @@ void intel_disable_gt_powersave(struct d
+ {
+ mutex_lock(&dev_priv->gt_pm.rps.lock);
+
+- intel_disable_rc6(dev_priv);
++ __intel_disable_rc6(dev_priv);
+ intel_disable_rps(dev_priv);
+ if (HAS_LLC(dev_priv))
+ intel_disable_llc_pstate(dev_priv);
+@@ -8714,6 +8814,9 @@ static void intel_enable_rc6(struct drm_
+ if (dev_priv->gt_pm.rc6.enabled)
+ return;
+
++ if (dev_priv->gt_pm.rc6.ctx_corrupted)
++ return;
++
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_enable_rc6(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+--- a/drivers/gpu/drm/i915/intel_pm.h
++++ b/drivers/gpu/drm/i915/intel_pm.h
+@@ -36,6 +36,9 @@ void intel_cleanup_gt_powersave(struct d
+ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
+ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
+ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
++void intel_rc6_ctx_wa_check(struct drm_i915_private *i915);
++void intel_rc6_ctx_wa_suspend(struct drm_i915_private *i915);
++void intel_rc6_ctx_wa_resume(struct drm_i915_private *i915);
+ void gen6_rps_busy(struct drm_i915_private *dev_priv);
+ void gen6_rps_idle(struct drm_i915_private *dev_priv);
+ void gen6_rps_boost(struct i915_request *rq);
diff --git a/debian/patches/series b/debian/patches/series
index 7f486571e2e7..320b83bbecb8 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -159,6 +159,18 @@ bugfix/x86/itlb_multihit/0004-kvm-mmu-ITLB_MULTIHIT-mitigation.patch
bugfix/x86/itlb_multihit/0005-kvm-Add-helper-function-for-creating-VM-worker-threa.patch
bugfix/x86/itlb_multihit/0006-kvm-x86-mmu-Recovery-of-shattered-NX-large-pages.patch
bugfix/x86/itlb_multihit/0007-Documentation-Add-ITLB_MULTIHIT-documentation.patch
+bugfix/x86/i915/0001-drm-i915-Rename-gen7-cmdparser-tables.patch
+bugfix/x86/i915/0002-drm-i915-Disable-Secure-Batches-for-gen6.patch
+bugfix/x86/i915/0003-drm-i915-Remove-Master-tables-from-cmdparser.patch
+bugfix/x86/i915/0004-drm-i915-Add-support-for-mandatory-cmdparsing.patch
+bugfix/x86/i915/0005-drm-i915-Support-ro-ppgtt-mapped-cmdparser-shadow-bu.patch
+bugfix/x86/i915/0006-drm-i915-Allow-parsing-of-unsized-batches.patch
+bugfix/x86/i915/0007-drm-i915-Add-gen9-BCS-cmdparsing.patch
+bugfix/x86/i915/0008-drm-i915-cmdparser-Use-explicit-goto-for-error-paths.patch
+bugfix/x86/i915/0009-drm-i915-cmdparser-Add-support-for-backward-jumps.patch
+bugfix/x86/i915/0010-drm-i915-cmdparser-Ignore-Length-operands-during-com.patch
+bugfix/x86/i915/0011-drm-i915-Lower-RM-timeout-to-avoid-DSI-hard-hangs.patch
+bugfix/x86/i915/0012-drm-i915-gen8-Add-RC6-CTX-corruption-WA.patch
# Fix exported symbol versions
bugfix/all/module-disable-matching-missing-version-crc.patch