diff options
1070 files changed, 19949 insertions, 16936 deletions
diff --git a/.gitignore b/.gitignore index 8f5422cba6e2..2030c7a4d2f8 100644 --- a/.gitignore +++ b/.gitignore @@ -142,3 +142,6 @@ x509.genkey # Kdevelop4 *.kdev4 + +# Clang's compilation database file +/compile_commands.json @@ -98,6 +98,7 @@ Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com> Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com> <javier@osg.samsung.com> <javier.martinez@collabora.co.uk> Jean Tourrilhes <jt@hpl.hp.com> +<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com> Jeff Garzik <jgarzik@pretzel.yyz.us> Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com> Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net> @@ -116,6 +117,7 @@ John Stultz <johnstul@us.ibm.com> Juha Yrjola <at solidboot.com> Juha Yrjola <juha.yrjola@nokia.com> Juha Yrjola <juha.yrjola@solidboot.com> +Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com> Kay Sievers <kay.sievers@vrfy.org> Kenneth W Chen <kenneth.w.chen@intel.com> Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> @@ -132,6 +134,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org> Li Yang <leoyang.li@nxp.com> <leoli@freescale.com> Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com> +Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com> Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com> Mark Brown <broonie@sirena.org.uk> Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com> diff --git a/Documentation/PCI/pci-error-recovery.rst b/Documentation/PCI/pci-error-recovery.rst index 83db42092935..e5d450df06b4 100644 --- a/Documentation/PCI/pci-error-recovery.rst +++ b/Documentation/PCI/pci-error-recovery.rst @@ -403,7 +403,7 @@ That is, the recovery API only requires that: .. note:: Implementation details for the powerpc platform are discussed in - the file Documentation/powerpc/eeh-pci-error-recovery.txt + the file Documentation/powerpc/eeh-pci-error-recovery.rst As of this writing, there is a growing list of device drivers with patches implementing error recovery. Not all of these patches are in @@ -422,3 +422,6 @@ That is, the recovery API only requires that: - drivers/net/cxgb3 - drivers/net/s2io.c - drivers/net/qlge + +The End +------- diff --git a/Documentation/RCU/rculist_nulls.txt b/Documentation/RCU/rculist_nulls.txt index 8151f0195f76..23f115dc87cf 100644 --- a/Documentation/RCU/rculist_nulls.txt +++ b/Documentation/RCU/rculist_nulls.txt @@ -1,7 +1,7 @@ Using hlist_nulls to protect read-mostly linked lists and objects using SLAB_TYPESAFE_BY_RCU allocations. -Please read the basics in Documentation/RCU/listRCU.txt +Please read the basics in Documentation/RCU/listRCU.rst Using special makers (called 'nulls') is a convenient way to solve following problem : diff --git a/Documentation/admin-guide/conf.py b/Documentation/admin-guide/conf.py deleted file mode 100644 index 86f738953799..000000000000 --- a/Documentation/admin-guide/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = 'Linux Kernel User Documentation' - -tags.add("subproject") - -latex_documents = [ - ('index', 'linux-user.tex', 'Linux Kernel User Documentation', - 'The kernel development community', 'manual'), -] diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 46b826fcb5ad..7ccd158b3894 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2545,7 +2545,7 @@ mem_encrypt=on: Activate SME mem_encrypt=off: Do not activate SME - Refer to Documentation/virtual/kvm/amd-memory-encryption.rst + Refer to Documentation/virt/kvm/amd-memory-encryption.rst for details on when memory encryption can be activated. mem_sleep_default= [SUSPEND] Default system suspend mode: diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 7ab93a8404b9..bd5714547cee 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -53,7 +53,7 @@ disabled, there is ``khugepaged`` daemon that scans memory and collapses sequences of basic pages into huge pages. The THP behaviour is controlled via :ref:`sysfs <thp_sysfs>` -interface and using madivse(2) and prctl(2) system calls. +interface and using madvise(2) and prctl(2) system calls. Transparent Hugepage Support maximizes the usefulness of free memory if compared to the reservation approach of hugetlbfs by allowing all diff --git a/Documentation/conf.py b/Documentation/conf.py index 3b2397bcb565..a8fe845832bc 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -16,6 +16,8 @@ import sys import os import sphinx +from subprocess import check_output + # Get Sphinx version major, minor, patch = sphinx.version_info[:3] @@ -276,10 +278,21 @@ latex_elements = { \\setsansfont{DejaVu Sans} \\setromanfont{DejaVu Serif} \\setmonofont{DejaVu Sans Mono} - ''' } +# At least one book (translations) may have Asian characters +# with are only displayed if xeCJK is used + +cjk_cmd = check_output(['fc-list', '--format="%{family[0]}\n"']).decode('utf-8', 'ignore') +if cjk_cmd.find("Noto Sans CJK SC") >= 0: + print ("enabling CJK for LaTeX builder") + latex_elements['preamble'] += ''' + % This is needed for translations + \\usepackage{xeCJK} + \\setCJKmainfont{Noto Sans CJK SC} + ''' + # Fix reference escape troubles with Sphinx 1.4.x if major == 1 and minor > 3: latex_elements['preamble'] += '\\renewcommand*{\\DUrole}[2]{ #2 }\n' @@ -410,6 +423,21 @@ latex_documents = [ 'The kernel development community', 'manual'), ] +# Add all other index files from Documentation/ subdirectories +for fn in os.listdir('.'): + doc = os.path.join(fn, "index") + if os.path.exists(doc + ".rst"): + has = False + for l in latex_documents: + if l[0] == doc: + has = True + break + if not has: + latex_documents.append((doc, fn + '.tex', + 'Linux %s Documentation' % fn.capitalize(), + 'The kernel development community', + 'manual')) + # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None diff --git a/Documentation/core-api/conf.py b/Documentation/core-api/conf.py deleted file mode 100644 index db1f7659f3da..000000000000 --- a/Documentation/core-api/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "Core-API Documentation" - -tags.add("subproject") - -latex_documents = [ - ('index', 'core-api.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/Documentation/crypto/conf.py b/Documentation/crypto/conf.py deleted file mode 100644 index 4335d251ddf3..000000000000 --- a/Documentation/crypto/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = 'Linux Kernel Crypto API' - -tags.add("subproject") - -latex_documents = [ - ('index', 'crypto-api.tex', 'Linux Kernel Crypto API manual', - 'The kernel development community', 'manual'), -] diff --git a/Documentation/dev-tools/conf.py b/Documentation/dev-tools/conf.py deleted file mode 100644 index 7faafa3f7888..000000000000 --- a/Documentation/dev-tools/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "Development tools for the kernel" - -tags.add("subproject") - -latex_documents = [ - ('index', 'dev-tools.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt index 326f29b270ad..2d325bed37e5 100644 --- a/Documentation/devicetree/bindings/arm/idle-states.txt +++ b/Documentation/devicetree/bindings/arm/idle-states.txt @@ -703,4 +703,4 @@ cpus { https://www.devicetree.org/specifications/ [6] ARM Linux Kernel documentation - Booting AArch64 Linux - Documentation/arm64/booting.txt + Documentation/arm64/booting.rst diff --git a/Documentation/devicetree/bindings/arm/renesas.yaml b/Documentation/devicetree/bindings/arm/renesas.yaml index 08c923f8c257..28eb458f761a 100644 --- a/Documentation/devicetree/bindings/arm/renesas.yaml +++ b/Documentation/devicetree/bindings/arm/renesas.yaml @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 %YAML 1.2 --- -$id: http://devicetree.org/schemas/arm/shmobile.yaml# +$id: http://devicetree.org/schemas/arm/renesas.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Renesas SH-Mobile, R-Mobile, and R-Car Platform Device Tree Bindings diff --git a/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml b/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml index aae53fc3cb1e..2bd519d2e855 100644 --- a/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml +++ b/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 %YAML 1.2 --- -$id: http://devicetree.org/schemas/arm/milbeaut.yaml# +$id: http://devicetree.org/schemas/arm/socionext/milbeaut.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Milbeaut platforms device tree bindings diff --git a/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml b/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml index 4326d2cfa15d..a8765ba29476 100644 --- a/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml +++ b/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 %YAML 1.2 --- -$id: http://devicetree.org/schemas/arm/ti/davinci.yaml# +$id: http://devicetree.org/schemas/arm/ti/ti,davinci.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Texas Instruments DaVinci Platforms Device Tree Bindings diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml index c935405458fe..fa4d143a73de 100644 --- a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml +++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 %YAML 1.2 --- -$id: http://devicetree.org/schemas/phy/allwinner,sun4i-a10-ccu.yaml# +$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-ccu.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Allwinner Clock Control Unit Device Tree Bindings diff --git a/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml b/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml index 8cb136c376fb..4f0db8ee226a 100644 --- a/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml +++ b/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml @@ -2,7 +2,7 @@ # Copyright 2019 Linaro Ltd. %YAML 1.2 --- -$id: "http://devicetree.org/schemas/firmware/intel-ixp4xx-network-processing-engine.yaml#" +$id: "http://devicetree.org/schemas/firmware/intel,ixp4xx-network-processing-engine.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" title: Intel IXP4xx Network Processing Engine diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml index 7ba167e2e1ea..c602b6fe1c0c 100644 --- a/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml +++ b/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 %YAML 1.2 --- -$id: http://devicetree.org/schemas/iio/accelerometers/adi,adxl345.yaml# +$id: http://devicetree.org/schemas/iio/accel/adi,adxl345.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Analog Devices ADXL345/ADXL375 3-Axis Digital Accelerometers diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml index a7fafb9bf5c6..e7daffec88d3 100644 --- a/Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml +++ b/Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 %YAML 1.2 --- -$id: http://devicetree.org/schemas/iio/accelerometers/adi,adxl372.yaml# +$id: http://devicetree.org/schemas/iio/accel/adi,adxl372.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Analog Devices ADXL372 3-Axis, +/-(200g) Digital Accelerometer diff --git a/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt b/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt index 4e82fd575cec..c676b03c752e 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt @@ -5,21 +5,19 @@ Required properties: - compatible: should be "amazon,al-fic" - reg: physical base address and size of the registers - interrupt-controller: identifies the node as an interrupt controller -- #interrupt-cells: must be 2. - First cell defines the index of the interrupt within the controller. - Second cell is used to specify the trigger type and must be one of the - following: - - bits[3:0] trigger type and level flags - 1 = low-to-high edge triggered - 4 = active high level-sensitive -- interrupt-parent: specifies the parent interrupt controller. +- #interrupt-cells : must be 2. Specifies the number of cells needed to encode + an interrupt source. Supported trigger types are low-to-high edge + triggered and active high level-sensitive. - interrupts: describes which input line in the interrupt parent, this fic's output is connected to. This field property depends on the parent's binding +Please refer to interrupts.txt in this directory for details of the common +Interrupt Controllers bindings used by client devices. + Example: -amazon_fic: interrupt-controller@0xfd8a8500 { +amazon_fic: interrupt-controller@fd8a8500 { compatible = "amazon,al-fic"; interrupt-controller; #interrupt-cells = <2>; diff --git a/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml b/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml index bae10e261fa9..507c141ea760 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml @@ -2,7 +2,7 @@ # Copyright 2018 Linaro Ltd. %YAML 1.2 --- -$id: "http://devicetree.org/schemas/interrupt/intel-ixp4xx-interrupt.yaml#" +$id: "http://devicetree.org/schemas/interrupt-controller/intel,ixp4xx-interrupt.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" title: Intel IXP4xx XScale Networking Processors Interrupt Controller diff --git a/Documentation/devicetree/bindings/misc/intel,ixp4xx-queue-manager.yaml b/Documentation/devicetree/bindings/misc/intel,ixp4xx-ahb-queue-manager.yaml index d2313b1d9405..0ea21a6f70b4 100644 --- a/Documentation/devicetree/bindings/misc/intel,ixp4xx-queue-manager.yaml +++ b/Documentation/devicetree/bindings/misc/intel,ixp4xx-ahb-queue-manager.yaml @@ -2,7 +2,7 @@ # Copyright 2019 Linaro Ltd. %YAML 1.2 --- -$id: "http://devicetree.org/schemas/misc/intel-ixp4xx-ahb-queue-manager.yaml#" +$id: "http://devicetree.org/schemas/misc/intel,ixp4xx-ahb-queue-manager.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" title: Intel IXP4xx AHB Queue Manager diff --git a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml index d4084c149768..3fb0714e761e 100644 --- a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml +++ b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 %YAML 1.2 --- -$id: http://devicetree.org/schemas/net/allwinner,sun8i-a83t-gmac.yaml# +$id: http://devicetree.org/schemas/net/allwinner,sun8i-a83t-emac.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Allwinner A83t EMAC Device Tree Bindings diff --git a/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml b/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml index c9efd6e2c134..1084e9d2917d 100644 --- a/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml +++ b/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml @@ -37,13 +37,13 @@ required: examples: - | - sid@1c23800 { + efuse@1c23800 { compatible = "allwinner,sun4i-a10-sid"; reg = <0x01c23800 0x10>; }; - | - sid@1c23800 { + efuse@1c23800 { compatible = "allwinner,sun7i-a20-sid"; reg = <0x01c23800 0x200>; }; diff --git a/Documentation/devicetree/bindings/nvmem/nvmem-consumer.yaml b/Documentation/devicetree/bindings/nvmem/nvmem-consumer.yaml new file mode 100644 index 000000000000..b7c00ed31085 --- /dev/null +++ b/Documentation/devicetree/bindings/nvmem/nvmem-consumer.yaml @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/nvmem/nvmem-consumer.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NVMEM (Non Volatile Memory) Consumer Device Tree Bindings + +maintainers: + - Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + +select: true + +properties: + nvmem: + $ref: /schemas/types.yaml#/definitions/phandle-array + description: + List of phandle to the nvmem providers. + + nvmem-cells: + $ref: /schemas/types.yaml#/definitions/phandle-array + description: + List of phandle to the nvmem data cells. + + nvmem-names: + $ref: /schemas/types.yaml#/definitions/string-array + description: + Names for the each nvmem provider. + + nvmem-cell-names: + $ref: /schemas/types.yaml#/definitions/string-array + description: + Names for each nvmem-cells specified. + +dependencies: + nvmem-names: [ nvmem ] + nvmem-cell-names: [ nvmem-cells ] + +examples: + - | + tsens { + /* ... */ + nvmem-cells = <&tsens_calibration>; + nvmem-cell-names = "calibration"; + }; diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.txt b/Documentation/devicetree/bindings/nvmem/nvmem.txt index fd06c09b822b..46a7ef485e24 100644 --- a/Documentation/devicetree/bindings/nvmem/nvmem.txt +++ b/Documentation/devicetree/bindings/nvmem/nvmem.txt @@ -1,80 +1 @@ -= NVMEM(Non Volatile Memory) Data Device Tree Bindings = - -This binding is intended to represent the location of hardware -configuration data stored in NVMEMs like eeprom, efuses and so on. - -On a significant proportion of boards, the manufacturer has stored -some data on NVMEM, for the OS to be able to retrieve these information -and act upon it. Obviously, the OS has to know about where to retrieve -these data from, and where they are stored on the storage device. - -This document is here to document this. - -= Data providers = -Contains bindings specific to provider drivers and data cells as children -of this node. - -Optional properties: - read-only: Mark the provider as read only. - -= Data cells = -These are the child nodes of the provider which contain data cell -information like offset and size in nvmem provider. - -Required properties: -reg: specifies the offset in byte within the storage device. - -Optional properties: - -bits: Is pair of bit location and number of bits, which specifies offset - in bit and number of bits within the address range specified by reg property. - Offset takes values from 0-7. - -For example: - - /* Provider */ - qfprom: qfprom@700000 { - ... - - /* Data cells */ - tsens_calibration: calib@404 { - reg = <0x404 0x10>; - }; - - tsens_calibration_bckp: calib_bckp@504 { - reg = <0x504 0x11>; - bits = <6 128> - }; - - pvs_version: pvs-version@6 { - reg = <0x6 0x2> - bits = <7 2> - }; - - speed_bin: speed-bin@c{ - reg = <0xc 0x1>; - bits = <2 3>; - - }; - ... - }; - -= Data consumers = -Are device nodes which consume nvmem data cells/providers. - -Required-properties: -nvmem-cells: list of phandle to the nvmem data cells. -nvmem-cell-names: names for the each nvmem-cells specified. Required if - nvmem-cells is used. - -Optional-properties: -nvmem : list of phandles to nvmem providers. -nvmem-names: names for the each nvmem provider. required if nvmem is used. - -For example: - - tsens { - ... - nvmem-cells = <&tsens_calibration>; - nvmem-cell-names = "calibration"; - }; +This file has been moved to nvmem.yaml and nvmem-consumer.yaml. diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.yaml b/Documentation/devicetree/bindings/nvmem/nvmem.yaml new file mode 100644 index 000000000000..1c75a059206c --- /dev/null +++ b/Documentation/devicetree/bindings/nvmem/nvmem.yaml @@ -0,0 +1,93 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/nvmem/nvmem.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NVMEM (Non Volatile Memory) Device Tree Bindings + +maintainers: + - Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + +description: | + This binding is intended to represent the location of hardware + configuration data stored in NVMEMs like eeprom, efuses and so on. + + On a significant proportion of boards, the manufacturer has stored + some data on NVMEM, for the OS to be able to retrieve these + information and act upon it. Obviously, the OS has to know about + where to retrieve these data from, and where they are stored on the + storage device. + +properties: + $nodename: + pattern: "^(eeprom|efuse|nvram)(@.*|-[0-9a-f])*$" + + "#address-cells": + const: 1 + + "#size-cells": + const: 1 + + read-only: + $ref: /schemas/types.yaml#/definitions/flag + description: + Mark the provider as read only. + +patternProperties: + "^.*@[0-9a-f]+$": + type: object + + properties: + reg: + maxItems: 1 + description: + Offset and size in bytes within the storage device. + + bits: + maxItems: 1 + items: + items: + - minimum: 0 + maximum: 7 + description: + Offset in bit within the address range specified by reg. + - minimum: 1 + description: + Size in bit within the address range specified by reg. + + required: + - reg + + additionalProperties: false + +examples: + - | + qfprom: eeprom@700000 { + #address-cells = <1>; + #size-cells = <1>; + + /* ... */ + + /* Data cells */ + tsens_calibration: calib@404 { + reg = <0x404 0x10>; + }; + + tsens_calibration_bckp: calib_bckp@504 { + reg = <0x504 0x11>; + bits = <6 128>; + }; + + pvs_version: pvs-version@6 { + reg = <0x6 0x2>; + bits = <7 2>; + }; + + speed_bin: speed-bin@c{ + reg = <0xc 0x1>; + bits = <2 3>; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml index 250f9d5aabdf..fa46670de299 100644 --- a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml +++ b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 %YAML 1.2 --- -$id: http://devicetree.org/schemas/display/allwinner,sun6i-a31-mipi-dphy.yaml# +$id: http://devicetree.org/schemas/phy/allwinner,sun6i-a31-mipi-dphy.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Allwinner A31 MIPI D-PHY Controller Device Tree Bindings diff --git a/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml b/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml index a36a0746c056..2807225db902 100644 --- a/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml +++ b/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml @@ -2,7 +2,7 @@ # Copyright 2018 Linaro Ltd. %YAML 1.2 --- -$id: "http://devicetree.org/schemas/timer/intel-ixp4xx-timer.yaml#" +$id: "http://devicetree.org/schemas/timer/intel,ixp4xx-timer.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" title: Intel IXP4xx XScale Networking Processors Timers diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt index bc7945e9dbfe..17915f64b8ee 100644 --- a/Documentation/devicetree/bindings/usb/usb251xb.txt +++ b/Documentation/devicetree/bindings/usb/usb251xb.txt @@ -64,10 +64,8 @@ Optional properties : - power-on-time-ms : Specifies the time it takes from the time the host initiates the power-on sequence to a port until the port has adequate power. The value is given in ms in a 0 - 510 range (default is 100ms). - - swap-dx-lanes : Specifies the downstream ports which will swap the - differential-pair (D+/D-), default is not-swapped. - - swap-us-lanes : Selects the upstream port differential-pair (D+/D-) - swapping (boolean, default is not-swapped) + - swap-dx-lanes : Specifies the ports which will swap the differential-pair + (D+/D-), default is not-swapped. Examples: usb2512b@2c { diff --git a/Documentation/doc-guide/conf.py b/Documentation/doc-guide/conf.py deleted file mode 100644 index fd3731182d5a..000000000000 --- a/Documentation/doc-guide/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = 'Linux Kernel Documentation Guide' - -tags.add("subproject") - -latex_documents = [ - ('index', 'kernel-doc-guide.tex', 'Linux Kernel Documentation Guide', - 'The kernel development community', 'manual'), -] diff --git a/Documentation/driver-api/80211/conf.py b/Documentation/driver-api/80211/conf.py deleted file mode 100644 index 4424b4b0b9c3..000000000000 --- a/Documentation/driver-api/80211/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "Linux 802.11 Driver Developer's Guide" - -tags.add("subproject") - -latex_documents = [ - ('index', '80211.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/Documentation/driver-api/conf.py b/Documentation/driver-api/conf.py deleted file mode 100644 index 202726d20088..000000000000 --- a/Documentation/driver-api/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "The Linux driver implementer's API guide" - -tags.add("subproject") - -latex_documents = [ - ('index', 'driver-api.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/Documentation/driver-api/generic-counter.rst b/Documentation/driver-api/generic-counter.rst index 0c161b1a3be6..8382f01a53e3 100644 --- a/Documentation/driver-api/generic-counter.rst +++ b/Documentation/driver-api/generic-counter.rst @@ -233,7 +233,7 @@ Userspace Interface Several sysfs attributes are generated by the Generic Counter interface, and reside under the /sys/bus/counter/devices/counterX directory, where counterX refers to the respective counter device. Please see -Documentation/ABI/testing/sys-bus-counter-generic-sysfs for detailed +Documentation/ABI/testing/sysfs-bus-counter for detailed information on each Generic Counter interface sysfs attribute. Through these sysfs attributes, programs and scripts may interact with @@ -325,7 +325,7 @@ sysfs attributes, where Y is the unique ID of the respective Count: For a more detailed breakdown of the available Generic Counter interface sysfs attributes, please refer to the -Documentation/ABI/testing/sys-bus-counter file. +Documentation/ABI/testing/sysfs-bus-counter file. The Signals and Counts associated with the Counter device are registered to the system as well by the counter_register function. The diff --git a/Documentation/driver-api/phy/phy.rst b/Documentation/driver-api/phy/phy.rst index 457c3e0f86d6..8fc1ce0bb905 100644 --- a/Documentation/driver-api/phy/phy.rst +++ b/Documentation/driver-api/phy/phy.rst @@ -179,8 +179,8 @@ PHY Mappings In order to get reference to a PHY without help from DeviceTree, the framework offers lookups which can be compared to clkdev that allow clk structures to be -bound to devices. A lookup can be made be made during runtime when a handle to -the struct phy already exists. +bound to devices. A lookup can be made during runtime when a handle to the +struct phy already exists. The framework offers the following API for registering and unregistering the lookups:: diff --git a/Documentation/driver-api/pm/conf.py b/Documentation/driver-api/pm/conf.py deleted file mode 100644 index a89fac11272f..000000000000 --- a/Documentation/driver-api/pm/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "Device Power Management" - -tags.add("subproject") - -latex_documents = [ - ('index', 'pm.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/Documentation/filesystems/conf.py b/Documentation/filesystems/conf.py deleted file mode 100644 index ea44172af5c4..000000000000 --- a/Documentation/filesystems/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "Linux Filesystems API" - -tags.add("subproject") - -latex_documents = [ - ('index', 'filesystems.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/Documentation/gpu/conf.py b/Documentation/gpu/conf.py deleted file mode 100644 index 1757b040fb32..000000000000 --- a/Documentation/gpu/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "Linux GPU Driver Developer's Guide" - -tags.add("subproject") - -latex_documents = [ - ('index', 'gpu.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index c38ef0dda605..0e322688be5c 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst @@ -430,31 +430,31 @@ WOPCM Layout GuC === +Firmware Layout +------------------- + +.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h + :doc: Firmware Layout + GuC-specific firmware loader ---------------------------- -.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fw.c +.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c :internal: GuC-based command submission ---------------------------- -.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_submission.c +.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c :doc: GuC-based command submission -.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_submission.c +.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c :internal: -GuC Firmware Layout -------------------- - -.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fwif.h - :doc: GuC Firmware Layout - GuC Address Space ----------------- -.. kernel-doc:: drivers/gpu/drm/i915/intel_guc.c +.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc.c :doc: GuC Address Space Tracing diff --git a/Documentation/hwmon/k8temp.rst b/Documentation/hwmon/k8temp.rst index 72da12aa17e5..fe9109521056 100644 --- a/Documentation/hwmon/k8temp.rst +++ b/Documentation/hwmon/k8temp.rst @@ -9,7 +9,7 @@ Supported chips: Addresses scanned: PCI space - Datasheet: http://support.amd.com/us/Processor_TechDocs/32559.pdf + Datasheet: http://www.amd.com/system/files/TechDocs/32559.pdf Author: Rudolf Marek diff --git a/Documentation/index.rst b/Documentation/index.rst index 70ae148ec980..2df5a3da563c 100644 --- a/Documentation/index.rst +++ b/Documentation/index.rst @@ -111,9 +111,11 @@ needed). netlabel/index networking/index pcmcia/index + power/index target/index timers/index watchdog/index + virtual/index input/index hwmon/index gpu/index @@ -143,6 +145,7 @@ implementation. arm64/index ia64/index m68k/index + powerpc/index riscv/index s390/index sh/index diff --git a/Documentation/input/conf.py b/Documentation/input/conf.py deleted file mode 100644 index d2352fdc92ed..000000000000 --- a/Documentation/input/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "The Linux input driver subsystem" - -tags.add("subproject") - -latex_documents = [ - ('index', 'linux-input.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/Documentation/kernel-hacking/conf.py b/Documentation/kernel-hacking/conf.py deleted file mode 100644 index 3d8acf0f33ad..000000000000 --- a/Documentation/kernel-hacking/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "Kernel Hacking Guides" - -tags.add("subproject") - -latex_documents = [ - ('index', 'kernel-hacking.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/Documentation/locking/spinlocks.rst b/Documentation/locking/spinlocks.rst index 098107fb7d86..e93ec6645238 100644 --- a/Documentation/locking/spinlocks.rst +++ b/Documentation/locking/spinlocks.rst @@ -82,7 +82,7 @@ itself. The read lock allows many concurrent readers. Anything that **changes** the list will have to get the write lock. NOTE! RCU is better for list traversal, but requires careful - attention to design detail (see Documentation/RCU/listRCU.txt). + attention to design detail (see Documentation/RCU/listRCU.rst). Also, you cannot "upgrade" a read-lock to a write-lock, so if you at _any_ time need to do any changes (even if you don't do it every time), you have @@ -90,7 +90,7 @@ to get the write-lock at the very beginning. NOTE! We are working hard to remove reader-writer spinlocks in most cases, so please don't add a new one without consensus. (Instead, see - Documentation/RCU/rcu.txt for complete information.) + Documentation/RCU/rcu.rst for complete information.) ---- diff --git a/Documentation/maintainer/conf.py b/Documentation/maintainer/conf.py deleted file mode 100644 index 81e9eb7a7884..000000000000 --- a/Documentation/maintainer/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = 'Linux Kernel Development Documentation' - -tags.add("subproject") - -latex_documents = [ - ('index', 'maintainer.tex', 'Linux Kernel Development Documentation', - 'The kernel development community', 'manual'), -] diff --git a/Documentation/media/conf.py b/Documentation/media/conf.py deleted file mode 100644 index 1f194fcd2cae..000000000000 --- a/Documentation/media/conf.py +++ /dev/null @@ -1,12 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -# SPDX-License-Identifier: GPL-2.0 - -project = 'Linux Media Subsystem Documentation' - -tags.add("subproject") - -latex_documents = [ - ('index', 'media.tex', 'Linux Media Subsystem Documentation', - 'The kernel development community', 'manual'), -] diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index 045bb8148fe9..1adbb8a371c7 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -548,7 +548,7 @@ There are certain things that the Linux kernel memory barriers do not guarantee: [*] For information on bus mastering DMA and coherency please read: - Documentation/PCI/pci.rst + Documentation/driver-api/pci/pci.rst Documentation/DMA-API-HOWTO.txt Documentation/DMA-API.txt diff --git a/Documentation/networking/conf.py b/Documentation/networking/conf.py deleted file mode 100644 index 40f69e67a883..000000000000 --- a/Documentation/networking/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "Linux Networking Documentation" - -tags.add("subproject") - -latex_documents = [ - ('index', 'networking.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/Documentation/power/index.rst b/Documentation/power/index.rst index 20415f21e48a..002e42745263 100644 --- a/Documentation/power/index.rst +++ b/Documentation/power/index.rst @@ -1,4 +1,4 @@ -:orphan: +.. SPDX-License-Identifier: GPL-2.0 ================ Power Management diff --git a/Documentation/powerpc/bootwrapper.txt b/Documentation/powerpc/bootwrapper.rst index d60fced5e1cc..a6292afba573 100644 --- a/Documentation/powerpc/bootwrapper.txt +++ b/Documentation/powerpc/bootwrapper.rst @@ -1,5 +1,7 @@ +======================== The PowerPC boot wrapper ------------------------- +======================== + Copyright (C) Secret Lab Technologies Ltd. PowerPC image targets compresses and wraps the kernel image (vmlinux) with @@ -21,6 +23,7 @@ it uses the wrapper script (arch/powerpc/boot/wrapper) to generate target image. The details of the build system is discussed in the next section. Currently, the following image format targets exist: + ==================== ======================================================== cuImage.%: Backwards compatible uImage for older version of U-Boot (for versions that don't understand the device tree). This image embeds a device tree blob inside @@ -29,31 +32,36 @@ Currently, the following image format targets exist: with boot wrapper code that extracts data from the old bd_info structure and loads the data into the device tree before jumping into the kernel. - Because of the series of #ifdefs found in the + + Because of the series of #ifdefs found in the bd_info structure used in the old U-Boot interfaces, cuImages are platform specific. Each specific U-Boot platform has a different platform init file which populates the embedded device tree with data from the platform specific bd_info file. The platform specific cuImage platform init code can be found in - arch/powerpc/boot/cuboot.*.c. Selection of the correct + `arch/powerpc/boot/cuboot.*.c`. Selection of the correct cuImage init code for a specific board can be found in the wrapper structure. + dtbImage.%: Similar to zImage, except device tree blob is embedded inside the image instead of provided by firmware. The output image file can be either an elf file or a flat binary depending on the platform. - dtbImages are used on systems which do not have an + + dtbImages are used on systems which do not have an interface for passing a device tree directly. dtbImages are similar to simpleImages except that dtbImages have platform specific code for extracting data from the board firmware, but simpleImages do not talk to the firmware at all. - PlayStation 3 support uses dtbImage. So do Embedded + + PlayStation 3 support uses dtbImage. So do Embedded Planet boards using the PlanetCore firmware. Board specific initialization code is typically found in a file named arch/powerpc/boot/<platform>.c; but this can be overridden by the wrapper script. + simpleImage.%: Firmware independent compressed image that does not depend on any particular firmware interface and embeds a device tree blob. This image is a flat binary that @@ -61,14 +69,16 @@ Currently, the following image format targets exist: Firmware cannot pass any configuration data to the kernel with this image type and it depends entirely on the embedded device tree for all information. - The simpleImage is useful for booting systems with + + The simpleImage is useful for booting systems with an unknown firmware interface or for booting from a debugger when no firmware is present (such as on the Xilinx Virtex platform). The only assumption that simpleImage makes is that RAM is correctly initialized and that the MMU is either off or has RAM mapped to base address 0. - simpleImage also supports inserting special platform + + simpleImage also supports inserting special platform specific initialization code to the start of the bootup sequence. The virtex405 platform uses this feature to ensure that the cache is invalidated before caching @@ -81,9 +91,11 @@ Currently, the following image format targets exist: named (virtex405-<board>.dts). Search the wrapper script for 'virtex405' and see the file arch/powerpc/boot/virtex405-head.S for details. + treeImage.%; Image format for used with OpenBIOS firmware found on some ppc4xx hardware. This image embeds a device tree blob inside the image. + uImage: Native image format used by U-Boot. The uImage target does not add any boot code. It just wraps a compressed vmlinux in the uImage data structure. This image @@ -91,12 +103,14 @@ Currently, the following image format targets exist: a device tree to the kernel at boot. If using an older version of U-Boot, then you need to use a cuImage instead. + zImage.%: Image format which does not embed a device tree. Used by OpenFirmware and other firmware interfaces which are able to supply a device tree. This image expects firmware to provide the device tree at boot. Typically, if you have general purpose PowerPC hardware then you want this image format. + ==================== ======================================================== Image types which embed a device tree blob (simpleImage, dtbImage, treeImage, and cuImage) all generate the device tree blob from a file in the diff --git a/Documentation/powerpc/cpu_families.txt b/Documentation/powerpc/cpu_families.rst index fc08e22feb1a..1e063c5440c3 100644 --- a/Documentation/powerpc/cpu_families.txt +++ b/Documentation/powerpc/cpu_families.rst @@ -1,3 +1,4 @@ +============ CPU Families ============ @@ -8,8 +9,8 @@ and are supported by arch/powerpc. Book3S (aka sPAPR) ------------------ - - Hash MMU - - Mix of 32 & 64 bit +- Hash MMU +- Mix of 32 & 64 bit:: +--------------+ +----------------+ | Old POWER | --------------> | RS64 (threads) | @@ -108,8 +109,8 @@ Book3S (aka sPAPR) IBM BookE --------- - - Software loaded TLB. - - All 32 bit +- Software loaded TLB. +- All 32 bit:: +--------------+ | 401 | @@ -155,8 +156,8 @@ IBM BookE Motorola/Freescale 8xx ---------------------- - - Software loaded with hardware assist. - - All 32 bit +- Software loaded with hardware assist. +- All 32 bit:: +-------------+ | MPC8xx Core | @@ -166,9 +167,9 @@ Motorola/Freescale 8xx Freescale BookE --------------- - - Software loaded TLB. - - e6500 adds HW loaded indirect TLB entries. - - Mix of 32 & 64 bit +- Software loaded TLB. +- e6500 adds HW loaded indirect TLB entries. +- Mix of 32 & 64 bit:: +--------------+ | e200 | @@ -207,8 +208,8 @@ Freescale BookE IBM A2 core ----------- - - Book3E, software loaded TLB + HW loaded indirect TLB entries. - - 64 bit +- Book3E, software loaded TLB + HW loaded indirect TLB entries. +- 64 bit:: +--------------+ +----------------+ | A2 core | --> | WSP | diff --git a/Documentation/powerpc/cpu_features.txt b/Documentation/powerpc/cpu_features.rst index ae09df8722c8..b7bcdd2f41bb 100644 --- a/Documentation/powerpc/cpu_features.txt +++ b/Documentation/powerpc/cpu_features.rst @@ -1,3 +1,7 @@ +============ +CPU Features +============ + Hollis Blanchard <hollis@austin.ibm.com> 5 Jun 2002 @@ -32,7 +36,7 @@ anyways). After detecting the processor type, the kernel patches out sections of code that shouldn't be used by writing nop's over it. Using cpufeatures requires just 2 macros (found in arch/powerpc/include/asm/cputable.h), as seen in head.S -transfer_to_handler: +transfer_to_handler:: #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION diff --git a/Documentation/powerpc/cxl.txt b/Documentation/powerpc/cxl.rst index c5e8d5098ed3..920546d81326 100644 --- a/Documentation/powerpc/cxl.txt +++ b/Documentation/powerpc/cxl.rst @@ -1,3 +1,4 @@ +==================================== Coherent Accelerator Interface (CXL) ==================================== @@ -21,6 +22,8 @@ Introduction Hardware overview ================= + :: + POWER8/9 FPGA +----------+ +---------+ | | | | @@ -59,14 +62,16 @@ Hardware overview the fault. The context to which this fault is serviced is based on who owns that acceleration function. - POWER8 <-----> PSL Version 8 is compliant to the CAIA Version 1.0. - POWER9 <-----> PSL Version 9 is compliant to the CAIA Version 2.0. + - POWER8 and PSL Version 8 are compliant to the CAIA Version 1.0. + - POWER9 and PSL Version 9 are compliant to the CAIA Version 2.0. + This PSL Version 9 provides new features such as: + * Interaction with the nest MMU on the P9 chip. * Native DMA support. * Supports sending ASB_Notify messages for host thread wakeup. * Supports Atomic operations. - * .... + * etc. Cards with a PSL9 won't work on a POWER8 system and cards with a PSL8 won't work on a POWER9 system. @@ -147,7 +152,9 @@ User API master devices. A userspace library libcxl is available here: + https://github.com/ibm-capi/libcxl + This provides a C interface to this kernel API. open @@ -165,7 +172,8 @@ open When all available contexts are allocated the open call will fail and return -ENOSPC. - Note: IRQs need to be allocated for each context, which may limit + Note: + IRQs need to be allocated for each context, which may limit the number of contexts that can be created, and therefore how many times the device can be opened. The POWER8 CAPP supports 2040 IRQs and 3 are used by the kernel, so 2037 are @@ -186,7 +194,9 @@ ioctl updated as userspace allocates and frees memory. This ioctl returns once the AFU context is started. - Takes a pointer to a struct cxl_ioctl_start_work: + Takes a pointer to a struct cxl_ioctl_start_work + + :: struct cxl_ioctl_start_work { __u64 flags; @@ -269,7 +279,7 @@ read The buffer passed to read() must be at least 4K bytes. The result of the read will be a buffer of one or more events, - each event is of type struct cxl_event, of varying size. + each event is of type struct cxl_event, of varying size:: struct cxl_event { struct cxl_event_header header; @@ -280,7 +290,9 @@ read }; }; - The struct cxl_event_header is defined as: + The struct cxl_event_header is defined as + + :: struct cxl_event_header { __u16 type; @@ -307,7 +319,9 @@ read For future extensions and padding. If the event type is CXL_EVENT_AFU_INTERRUPT then the event - structure is defined as: + structure is defined as + + :: struct cxl_event_afu_interrupt { __u16 flags; @@ -326,7 +340,9 @@ read For future extensions and padding. If the event type is CXL_EVENT_DATA_STORAGE then the event - structure is defined as: + structure is defined as + + :: struct cxl_event_data_storage { __u16 flags; @@ -356,7 +372,9 @@ read For future extensions If the event type is CXL_EVENT_AFU_ERROR then the event structure - is defined as: + is defined as + + :: struct cxl_event_afu_error { __u16 flags; @@ -393,15 +411,15 @@ open ioctl ----- -CXL_IOCTL_DOWNLOAD_IMAGE: -CXL_IOCTL_VALIDATE_IMAGE: +CXL_IOCTL_DOWNLOAD_IMAGE / CXL_IOCTL_VALIDATE_IMAGE: Starts and controls flashing a new FPGA image. Partial reconfiguration is not supported (yet), so the image must contain a copy of the PSL and AFU(s). Since an image can be quite large, the caller may have to iterate, splitting the image in smaller chunks. - Takes a pointer to a struct cxl_adapter_image: + Takes a pointer to a struct cxl_adapter_image:: + struct cxl_adapter_image { __u64 flags; __u64 data; @@ -442,7 +460,7 @@ Udev rules The following udev rules could be used to create a symlink to the most logical chardev to use in any programming mode (afuX.Yd for dedicated, afuX.Ys for afu directed), since the API is virtually - identical for each: + identical for each:: SUBSYSTEM=="cxl", ATTRS{mode}=="dedicated_process", SYMLINK="cxl/%b" SUBSYSTEM=="cxl", ATTRS{mode}=="afu_directed", \ diff --git a/Documentation/powerpc/cxlflash.txt b/Documentation/powerpc/cxlflash.rst index a64bdaa0a1cf..cea67931b3b9 100644 --- a/Documentation/powerpc/cxlflash.txt +++ b/Documentation/powerpc/cxlflash.rst @@ -1,3 +1,7 @@ +================================ +Coherent Accelerator (CXL) Flash +================================ + Introduction ============ @@ -28,7 +32,7 @@ Introduction responsible for the initialization of the adapter, setting up the special path for user space access, and performing error recovery. It communicates directly the Flash Accelerator Functional Unit (AFU) - as described in Documentation/powerpc/cxl.txt. + as described in Documentation/powerpc/cxl.rst. The cxlflash driver supports two, mutually exclusive, modes of operation at the device (LUN) level: @@ -58,7 +62,7 @@ Overview The CXL Flash Adapter Driver establishes a master context with the AFU. It uses memory mapped I/O (MMIO) for this control and setup. The - Adapter Problem Space Memory Map looks like this: + Adapter Problem Space Memory Map looks like this:: +-------------------------------+ | 512 * 64 KB User MMIO | @@ -375,7 +379,7 @@ CXL Flash Driver Host IOCTLs Each host adapter instance that is supported by the cxlflash driver has a special character device associated with it to enable a set of host management function. These character devices are hosted in a - class dedicated for cxlflash and can be accessed via /dev/cxlflash/*. + class dedicated for cxlflash and can be accessed via `/dev/cxlflash/*`. Applications can be written to perform various functions using the host ioctl APIs below. diff --git a/Documentation/powerpc/DAWR-POWER9.txt b/Documentation/powerpc/dawr-power9.rst index ecdbb076438c..c96ab6befd9c 100644 --- a/Documentation/powerpc/DAWR-POWER9.txt +++ b/Documentation/powerpc/dawr-power9.rst @@ -1,10 +1,11 @@ +===================== DAWR issues on POWER9 -============================ +===================== On POWER9 the Data Address Watchpoint Register (DAWR) can cause a checkstop if it points to cache inhibited (CI) memory. Currently Linux has no way to disinguish CI memory when configuring the DAWR, so (for now) the DAWR is -disabled by this commit: +disabled by this commit:: commit 9654153158d3e0684a1bdb76dbababdb7111d5a0 Author: Michael Neuling <mikey@neuling.org> @@ -12,7 +13,7 @@ disabled by this commit: powerpc: Disable DAWR in the base POWER9 CPU features Technical Details: -============================ +================== DAWR has 6 different ways of being set. 1) ptrace @@ -37,7 +38,7 @@ DAWR on the migration. For xmon, the 'bd' command will return an error on P9. Consequences for users -============================ +====================== For GDB watchpoints (ie 'watch' command) on POWER9 bare metal , GDB will accept the command. Unfortunately since there is no hardware @@ -57,8 +58,8 @@ trapped in GDB. The watchpoint is remembered, so if the guest is migrated back to the POWER8 host, it will start working again. Force enabling the DAWR -============================= -Kernels (since ~v5.2) have an option to force enable the DAWR via: +======================= +Kernels (since ~v5.2) have an option to force enable the DAWR via:: echo Y > /sys/kernel/debug/powerpc/dawr_enable_dangerous @@ -86,5 +87,7 @@ dawr_enable_dangerous file will fail if the hypervisor doesn't support writing the DAWR. To double check the DAWR is working, run this kernel selftest: + tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c + Any errors/failures/skips mean something is wrong. diff --git a/Documentation/powerpc/dscr.txt b/Documentation/powerpc/dscr.rst index ece300c64f76..2ab99006014c 100644 --- a/Documentation/powerpc/dscr.txt +++ b/Documentation/powerpc/dscr.rst @@ -1,5 +1,6 @@ - DSCR (Data Stream Control Register) - ================================================ +=================================== +DSCR (Data Stream Control Register) +=================================== DSCR register in powerpc allows user to have some control of prefetch of data stream in the processor. Please refer to the ISA documents or related manual @@ -10,14 +11,17 @@ user interface. (A) Data Structures: - (1) thread_struct: + (1) thread_struct:: + dscr /* Thread DSCR value */ dscr_inherit /* Thread has changed default DSCR */ - (2) PACA: + (2) PACA:: + dscr_default /* per-CPU DSCR default value */ - (3) sysfs.c: + (3) sysfs.c:: + dscr_default /* System DSCR default value */ (B) Scheduler Changes: @@ -35,8 +39,8 @@ user interface. (C) SYSFS Interface: - Global DSCR default: /sys/devices/system/cpu/dscr_default - CPU specific DSCR default: /sys/devices/system/cpu/cpuN/dscr + - Global DSCR default: /sys/devices/system/cpu/dscr_default + - CPU specific DSCR default: /sys/devices/system/cpu/cpuN/dscr Changing the global DSCR default in the sysfs will change all the CPU specific DSCR defaults immediately in their PACA structures. Again if diff --git a/Documentation/powerpc/eeh-pci-error-recovery.txt b/Documentation/powerpc/eeh-pci-error-recovery.rst index 678189280bb4..438a87ebc095 100644 --- a/Documentation/powerpc/eeh-pci-error-recovery.txt +++ b/Documentation/powerpc/eeh-pci-error-recovery.rst @@ -1,10 +1,10 @@ +========================== +PCI Bus EEH Error Recovery +========================== +Linas Vepstas <linas@austin.ibm.com> - PCI Bus EEH Error Recovery - -------------------------- - Linas Vepstas - <linas@austin.ibm.com> - 12 January 2005 +12 January 2005 Overview: @@ -143,17 +143,17 @@ seen in /proc/ppc64/eeh (subject to change). Normally, almost all of these occur during boot, when the PCI bus is scanned, where a large number of 0xff reads are part of the bus scan procedure. -If a frozen slot is detected, code in -arch/powerpc/platforms/pseries/eeh.c will print a stack trace to -syslog (/var/log/messages). This stack trace has proven to be very -useful to device-driver authors for finding out at what point the EEH -error was detected, as the error itself usually occurs slightly +If a frozen slot is detected, code in +arch/powerpc/platforms/pseries/eeh.c will print a stack trace to +syslog (/var/log/messages). This stack trace has proven to be very +useful to device-driver authors for finding out at what point the EEH +error was detected, as the error itself usually occurs slightly beforehand. Next, it uses the Linux kernel notifier chain/work queue mechanism to allow any interested parties to find out about the failure. Device drivers, or other parts of the kernel, can use -eeh_register_notifier(struct notifier_block *) to find out about EEH +`eeh_register_notifier(struct notifier_block *)` to find out about EEH events. The event will include a pointer to the pci device, the device node and some state info. Receivers of the event can "do as they wish"; the default handler will be described further in this @@ -162,10 +162,13 @@ section. To assist in the recovery of the device, eeh.c exports the following functions: -rtas_set_slot_reset() -- assert the PCI #RST line for 1/8th of a second -rtas_configure_bridge() -- ask firmware to configure any PCI bridges +rtas_set_slot_reset() + assert the PCI #RST line for 1/8th of a second +rtas_configure_bridge() + ask firmware to configure any PCI bridges located topologically under the pci slot. -eeh_save_bars() and eeh_restore_bars(): save and restore the PCI +eeh_save_bars() and eeh_restore_bars(): + save and restore the PCI config-space info for a device and any devices under it. @@ -191,7 +194,7 @@ events get delivered to user-space scripts. Following is an example sequence of events that cause a device driver close function to be called during the first phase of an EEH reset. -The following sequence is an example of the pcnet32 device driver. +The following sequence is an example of the pcnet32 device driver:: rpa_php_unconfig_pci_adapter (struct slot *) // in rpaphp_pci.c { @@ -241,53 +244,54 @@ The following sequence is an example of the pcnet32 device driver. }}}}}} - in drivers/pci/pci_driver.c, - struct device_driver->remove() is just pci_device_remove() - which calls struct pci_driver->remove() which is pcnet32_remove_one() - which calls unregister_netdev() (in net/core/dev.c) - which calls dev_close() (in net/core/dev.c) - which calls dev->stop() which is pcnet32_close() - which then does the appropriate shutdown. +in drivers/pci/pci_driver.c, +struct device_driver->remove() is just pci_device_remove() +which calls struct pci_driver->remove() which is pcnet32_remove_one() +which calls unregister_netdev() (in net/core/dev.c) +which calls dev_close() (in net/core/dev.c) +which calls dev->stop() which is pcnet32_close() +which then does the appropriate shutdown. --- + Following is the analogous stack trace for events sent to user-space -when the pci device is unconfigured. +when the pci device is unconfigured:: -rpa_php_unconfig_pci_adapter() { // in rpaphp_pci.c - calls - pci_remove_bus_device (struct pci_dev *) { // in /drivers/pci/remove.c + rpa_php_unconfig_pci_adapter() { // in rpaphp_pci.c calls - pci_destroy_dev (struct pci_dev *) { + pci_remove_bus_device (struct pci_dev *) { // in /drivers/pci/remove.c calls - device_unregister (&dev->dev) { // in /drivers/base/core.c + pci_destroy_dev (struct pci_dev *) { calls - device_del(struct device * dev) { // in /drivers/base/core.c + device_unregister (&dev->dev) { // in /drivers/base/core.c calls - kobject_del() { //in /libs/kobject.c + device_del(struct device * dev) { // in /drivers/base/core.c calls - kobject_uevent() { // in /libs/kobject.c + kobject_del() { //in /libs/kobject.c calls - kset_uevent() { // in /lib/kobject.c + kobject_uevent() { // in /libs/kobject.c calls - kset->uevent_ops->uevent() // which is really just - a call to - dev_uevent() { // in /drivers/base/core.c + kset_uevent() { // in /lib/kobject.c calls - dev->bus->uevent() which is really just a call to - pci_uevent () { // in drivers/pci/hotplug.c - which prints device name, etc.... + kset->uevent_ops->uevent() // which is really just + a call to + dev_uevent() { // in /drivers/base/core.c + calls + dev->bus->uevent() which is really just a call to + pci_uevent () { // in drivers/pci/hotplug.c + which prints device name, etc.... + } } - } - then kobject_uevent() sends a netlink uevent to userspace - --> userspace uevent - (during early boot, nobody listens to netlink events and - kobject_uevent() executes uevent_helper[], which runs the - event process /sbin/hotplug) + then kobject_uevent() sends a netlink uevent to userspace + --> userspace uevent + (during early boot, nobody listens to netlink events and + kobject_uevent() executes uevent_helper[], which runs the + event process /sbin/hotplug) + } } - } - kobject_del() then calls sysfs_remove_dir(), which would - trigger any user-space daemon that was watching /sysfs, - and notice the delete event. + kobject_del() then calls sysfs_remove_dir(), which would + trigger any user-space daemon that was watching /sysfs, + and notice the delete event. Pro's and Con's of the Current Design @@ -299,12 +303,12 @@ individual device drivers, so that the current design throws a wide net. The biggest negative of the design is that it potentially disturbs network daemons and file systems that didn't need to be disturbed. --- A minor complaint is that resetting the network card causes +- A minor complaint is that resetting the network card causes user-space back-to-back ifdown/ifup burps that potentially disturb network daemons, that didn't need to even know that the pci card was being rebooted. --- A more serious concern is that the same reset, for SCSI devices, +- A more serious concern is that the same reset, for SCSI devices, causes havoc to mounted file systems. Scripts cannot post-facto unmount a file system without flushing pending buffers, but this is impossible, because I/O has already been stopped. Thus, @@ -322,7 +326,7 @@ network daemons and file systems that didn't need to be disturbed. from the block layer. It would be very natural to add an EEH reset into this chain of events. --- If a SCSI error occurs for the root device, all is lost unless +- If a SCSI error occurs for the root device, all is lost unless the sysadmin had the foresight to run /bin, /sbin, /etc, /var and so on, out of ramdisk/tmpfs. @@ -330,5 +334,3 @@ network daemons and file systems that didn't need to be disturbed. Conclusions ----------- There's forward progress ... - - diff --git a/Documentation/powerpc/firmware-assisted-dump.txt b/Documentation/powerpc/firmware-assisted-dump.rst index 10e7f4d16c14..9ca12830a48e 100644 --- a/Documentation/powerpc/firmware-assisted-dump.txt +++ b/Documentation/powerpc/firmware-assisted-dump.rst @@ -1,7 +1,8 @@ +====================== +Firmware-Assisted Dump +====================== - Firmware-Assisted Dump - ------------------------ - July 2011 +July 2011 The goal of firmware-assisted dump is to enable the dump of a crashed system, and to do so from a fully-reset system, and @@ -27,11 +28,11 @@ in production use. Comparing with kdump or other strategies, firmware-assisted dump offers several strong, practical advantages: --- Unlike kdump, the system has been reset, and loaded +- Unlike kdump, the system has been reset, and loaded with a fresh copy of the kernel. In particular, PCI and I/O devices have been reinitialized and are in a clean, consistent state. --- Once the dump is copied out, the memory that held the dump +- Once the dump is copied out, the memory that held the dump is immediately available to the running kernel. And therefore, unlike kdump, fadump doesn't need a 2nd reboot to get back the system to the production configuration. @@ -40,17 +41,18 @@ The above can only be accomplished by coordination with, and assistance from the Power firmware. The procedure is as follows: --- The first kernel registers the sections of memory with the +- The first kernel registers the sections of memory with the Power firmware for dump preservation during OS initialization. These registered sections of memory are reserved by the first kernel during early boot. --- When a system crashes, the Power firmware will save +- When a system crashes, the Power firmware will save the low memory (boot memory of size larger of 5% of system RAM or 256MB) of RAM to the previous registered region. It will also save system registers, and hardware PTE's. - NOTE: The term 'boot memory' means size of the low memory chunk + NOTE: + The term 'boot memory' means size of the low memory chunk that is required for a kernel to boot successfully when booted with restricted memory. By default, the boot memory size will be the larger of 5% of system RAM or 256MB. @@ -64,12 +66,12 @@ as follows: as fadump uses a predefined offset to reserve memory for boot memory dump preservation in case of a crash. --- After the low memory (boot memory) area has been saved, the +- After the low memory (boot memory) area has been saved, the firmware will reset PCI and other hardware state. It will *not* clear the RAM. It will then launch the bootloader, as normal. --- The freshly booted kernel will notice that there is a new +- The freshly booted kernel will notice that there is a new node (ibm,dump-kernel) in the device tree, indicating that there is crash data available from a previous boot. During the early boot OS will reserve rest of the memory above @@ -77,17 +79,18 @@ as follows: size. This will make sure that the second kernel will not touch any of the dump memory area. --- User-space tools will read /proc/vmcore to obtain the contents +- User-space tools will read /proc/vmcore to obtain the contents of memory, which holds the previous crashed kernel dump in ELF format. The userspace tools may copy this info to disk, or network, nas, san, iscsi, etc. as desired. --- Once the userspace tool is done saving dump, it will echo +- Once the userspace tool is done saving dump, it will echo '1' to /sys/kernel/fadump_release_mem to release the reserved memory back to general use, except the memory required for next firmware-assisted dump registration. - e.g. + e.g.:: + # echo 1 > /sys/kernel/fadump_release_mem Please note that the firmware-assisted dump feature @@ -95,7 +98,7 @@ is only available on Power6 and above systems with recent firmware versions. Implementation details: ----------------------- +----------------------- During boot, a check is made to see if firmware supports this feature on that particular machine. If it does, then @@ -121,7 +124,7 @@ Allocator (CMA) for memory reservation if CMA is configured for kernel. With CMA reservation this memory will be available for applications to use it, while kernel is prevented from using it. With this fadump will still be able to capture all of the kernel memory and most of the user -space memory except the user pages that were present in CMA region. +space memory except the user pages that were present in CMA region:: o Memory Reservation during first kernel @@ -166,7 +169,7 @@ The tools to examine the dump will be same as the ones used for kdump. How to enable firmware-assisted dump (fadump): -------------------------------------- +---------------------------------------------- 1. Set config option CONFIG_FA_DUMP=y and build kernel. 2. Boot into linux kernel with 'fadump=on' kernel cmdline option. @@ -177,19 +180,20 @@ How to enable firmware-assisted dump (fadump): to specify size of the memory to reserve for boot memory dump preservation. -NOTE: 1. 'fadump_reserve_mem=' parameter has been deprecated. Instead - use 'crashkernel=' to specify size of the memory to reserve - for boot memory dump preservation. - 2. If firmware-assisted dump fails to reserve memory then it - will fallback to existing kdump mechanism if 'crashkernel=' - option is set at kernel cmdline. - 3. if user wants to capture all of user space memory and ok with - reserved memory not available to production system, then - 'fadump=nocma' kernel parameter can be used to fallback to - old behaviour. +NOTE: + 1. 'fadump_reserve_mem=' parameter has been deprecated. Instead + use 'crashkernel=' to specify size of the memory to reserve + for boot memory dump preservation. + 2. If firmware-assisted dump fails to reserve memory then it + will fallback to existing kdump mechanism if 'crashkernel=' + option is set at kernel cmdline. + 3. if user wants to capture all of user space memory and ok with + reserved memory not available to production system, then + 'fadump=nocma' kernel parameter can be used to fallback to + old behaviour. Sysfs/debugfs files: ------------- +-------------------- Firmware-assisted dump feature uses sysfs file system to hold the control files and debugfs file to display memory reserved region. @@ -197,20 +201,20 @@ the control files and debugfs file to display memory reserved region. Here is the list of files under kernel sysfs: /sys/kernel/fadump_enabled - This is used to display the fadump status. - 0 = fadump is disabled - 1 = fadump is enabled + + - 0 = fadump is disabled + - 1 = fadump is enabled This interface can be used by kdump init scripts to identify if fadump is enabled in the kernel and act accordingly. /sys/kernel/fadump_registered - This is used to display the fadump registration status as well as to control (start/stop) the fadump registration. - 0 = fadump is not registered. - 1 = fadump is registered and ready to handle system crash. + + - 0 = fadump is not registered. + - 1 = fadump is registered and ready to handle system crash. To register fadump echo 1 > /sys/kernel/fadump_registered and echo 0 > /sys/kernel/fadump_registered for un-register and stop the @@ -219,13 +223,12 @@ Here is the list of files under kernel sysfs: easily integrated with kdump service start/stop. /sys/kernel/fadump_release_mem - This file is available only when fadump is active during second kernel. This is used to release the reserved memory region that are held for saving crash dump. To release the - reserved memory echo 1 to it: + reserved memory echo 1 to it:: - echo 1 > /sys/kernel/fadump_release_mem + echo 1 > /sys/kernel/fadump_release_mem After echo 1, the content of the /sys/kernel/debug/powerpc/fadump_region file will change to reflect the new memory reservations. @@ -238,38 +241,39 @@ Here is the list of files under powerpc debugfs: (Assuming debugfs is mounted on /sys/kernel/debug directory.) /sys/kernel/debug/powerpc/fadump_region - This file shows the reserved memory regions if fadump is enabled otherwise this file is empty. The output format - is: - <region>: [<start>-<end>] <reserved-size> bytes, Dumped: <dump-size> + is:: + + <region>: [<start>-<end>] <reserved-size> bytes, Dumped: <dump-size> e.g. - Contents when fadump is registered during first kernel + Contents when fadump is registered during first kernel:: - # cat /sys/kernel/debug/powerpc/fadump_region - CPU : [0x0000006ffb0000-0x0000006fff001f] 0x40020 bytes, Dumped: 0x0 - HPTE: [0x0000006fff0020-0x0000006fff101f] 0x1000 bytes, Dumped: 0x0 - DUMP: [0x0000006fff1020-0x0000007fff101f] 0x10000000 bytes, Dumped: 0x0 + # cat /sys/kernel/debug/powerpc/fadump_region + CPU : [0x0000006ffb0000-0x0000006fff001f] 0x40020 bytes, Dumped: 0x0 + HPTE: [0x0000006fff0020-0x0000006fff101f] 0x1000 bytes, Dumped: 0x0 + DUMP: [0x0000006fff1020-0x0000007fff101f] 0x10000000 bytes, Dumped: 0x0 - Contents when fadump is active during second kernel + Contents when fadump is active during second kernel:: - # cat /sys/kernel/debug/powerpc/fadump_region - CPU : [0x0000006ffb0000-0x0000006fff001f] 0x40020 bytes, Dumped: 0x40020 - HPTE: [0x0000006fff0020-0x0000006fff101f] 0x1000 bytes, Dumped: 0x1000 - DUMP: [0x0000006fff1020-0x0000007fff101f] 0x10000000 bytes, Dumped: 0x10000000 - : [0x00000010000000-0x0000006ffaffff] 0x5ffb0000 bytes, Dumped: 0x5ffb0000 + # cat /sys/kernel/debug/powerpc/fadump_region + CPU : [0x0000006ffb0000-0x0000006fff001f] 0x40020 bytes, Dumped: 0x40020 + HPTE: [0x0000006fff0020-0x0000006fff101f] 0x1000 bytes, Dumped: 0x1000 + DUMP: [0x0000006fff1020-0x0000007fff101f] 0x10000000 bytes, Dumped: 0x10000000 + : [0x00000010000000-0x0000006ffaffff] 0x5ffb0000 bytes, Dumped: 0x5ffb0000 -NOTE: Please refer to Documentation/filesystems/debugfs.txt on +NOTE: + Please refer to Documentation/filesystems/debugfs.txt on how to mount the debugfs filesystem. TODO: ----- - o Need to come up with the better approach to find out more + - Need to come up with the better approach to find out more accurate boot memory size that is required for a kernel to boot successfully when booted with restricted memory. - o The fadump implementation introduces a fadump crash info structure + - The fadump implementation introduces a fadump crash info structure in the scratch area before the ELF core header. The idea of introducing this structure is to pass some important crash info data to the second kernel which will help second kernel to populate ELF core header with @@ -277,7 +281,9 @@ TODO: design implementation does not address a possibility of introducing additional fields (in future) to this structure without affecting compatibility. Need to come up with the better approach to address this. + The possible approaches are: + 1. Introduce version field for version tracking, bump up the version whenever a new field is added to the structure in future. The version field can be used to find out what fields are valid for the current @@ -285,8 +291,11 @@ TODO: 2. Reserve the area of predefined size (say PAGE_SIZE) for this structure and have unused area as reserved (initialized to zero) for future field additions. + The advantage of approach 1 over 2 is we don't need to reserve extra space. ---- + Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> + This document is based on the original documentation written for phyp + assisted dump by Linas Vepstas and Manish Ahuja. diff --git a/Documentation/powerpc/hvcs.txt b/Documentation/powerpc/hvcs.rst index a730ca5a07f8..6808acde672f 100644 --- a/Documentation/powerpc/hvcs.txt +++ b/Documentation/powerpc/hvcs.rst @@ -1,19 +1,22 @@ -=========================================================================== - HVCS - IBM "Hypervisor Virtual Console Server" Installation Guide - for Linux Kernel 2.6.4+ - Copyright (C) 2004 IBM Corporation +=============================================================== +HVCS IBM "Hypervisor Virtual Console Server" Installation Guide +=============================================================== -=========================================================================== -NOTE:Eight space tabs are the optimum editor setting for reading this file. -=========================================================================== +for Linux Kernel 2.6.4+ - Author(s) : Ryan S. Arnold <rsa@us.ibm.com> - Date Created: March, 02, 2004 - Last Changed: August, 24, 2004 +Copyright (C) 2004 IBM Corporation ---------------------------------------------------------------------------- -Table of contents: +.. =========================================================================== +.. NOTE:Eight space tabs are the optimum editor setting for reading this file. +.. =========================================================================== + + +Author(s): Ryan S. Arnold <rsa@us.ibm.com> + +Date Created: March, 02, 2004 +Last Changed: August, 24, 2004 + +.. Table of contents: 1. Driver Introduction: 2. System Requirements @@ -27,8 +30,8 @@ Table of contents: 8. Questions & Answers: 9. Reporting Bugs: ---------------------------------------------------------------------------- 1. Driver Introduction: +======================= This is the device driver for the IBM Hypervisor Virtual Console Server, "hvcs". The IBM hvcs provides a tty driver interface to allow Linux user @@ -38,8 +41,8 @@ ppc64 system. Physical hardware consoles per partition are not practical on this hardware so system consoles are accessed by this driver using firmware interfaces to virtual terminal devices. ---------------------------------------------------------------------------- 2. System Requirements: +======================= This device driver was written using 2.6.4 Linux kernel APIs and will only build and run on kernels of this version or later. @@ -52,8 +55,8 @@ Sysfs must be mounted on the system so that the user can determine which major and minor numbers are associated with each vty-server. Directions for sysfs mounting are outside the scope of this document. ---------------------------------------------------------------------------- 3. Build Options: +================= The hvcs driver registers itself as a tty driver. The tty layer dynamically allocates a block of major and minor numbers in a quantity @@ -65,11 +68,11 @@ If the default number of device entries is adequate then this driver can be built into the kernel. If not, the default can be over-ridden by inserting the driver as a module with insmod parameters. ---------------------------------------------------------------------------- 3.1 Built-in: +------------- The following menuconfig example demonstrates selecting to build this -driver into the kernel. +driver into the kernel:: Device Drivers ---> Character devices ---> @@ -77,11 +80,11 @@ driver into the kernel. Begin the kernel make process. ---------------------------------------------------------------------------- 3.2 Module: +----------- The following menuconfig example demonstrates selecting to build this -driver as a kernel module. +driver as a kernel module:: Device Drivers ---> Character devices ---> @@ -89,11 +92,11 @@ driver as a kernel module. The make process will build the following kernel modules: - hvcs.ko - hvcserver.ko + - hvcs.ko + - hvcserver.ko To insert the module with the default allocation execute the following -commands in the order they appear: +commands in the order they appear:: insmod hvcserver.ko insmod hvcs.ko @@ -103,7 +106,7 @@ be inserted first, otherwise the hvcs module will not find some of the symbols it expects. To override the default use an insmod parameter as follows (requesting 4 -tty devices as an example): +tty devices as an example):: insmod hvcs.ko hvcs_parm_num_devs=4 @@ -115,31 +118,31 @@ source file before building. NOTE: The length of time it takes to insmod the driver seems to be related to the number of tty interfaces the registering driver requests. -In order to remove the driver module execute the following command: +In order to remove the driver module execute the following command:: rmmod hvcs.ko The recommended method for installing hvcs as a module is to use depmod to build a current modules.dep file in /lib/modules/`uname -r` and then -execute: +execute:: -modprobe hvcs hvcs_parm_num_devs=4 + modprobe hvcs hvcs_parm_num_devs=4 The modules.dep file indicates that hvcserver.ko needs to be inserted before hvcs.ko and modprobe uses this file to smartly insert the modules in the proper order. The following modprobe command is used to remove hvcs and hvcserver in the -proper order: +proper order:: -modprobe -r hvcs + modprobe -r hvcs ---------------------------------------------------------------------------- 4. Installation: +================ The tty layer creates sysfs entries which contain the major and minor numbers allocated for the hvcs driver. The following snippet of "tree" -output of the sysfs directory shows where these numbers are presented: +output of the sysfs directory shows where these numbers are presented:: sys/ |-- *other sysfs base dirs* @@ -164,7 +167,7 @@ output of the sysfs directory shows where these numbers are presented: |-- *other sysfs base dirs* For the above examples the following output is a result of cat'ing the -"dev" entry in the hvcs directory: +"dev" entry in the hvcs directory:: Pow5:/sys/class/tty/hvcs0/ # cat dev 254:0 @@ -184,7 +187,7 @@ systems running hvcs will already have the device entries created or udev will do it automatically. Given the example output above, to manually create a /dev/hvcs* node entry -mknod can be used as follows: +mknod can be used as follows:: mknod /dev/hvcs0 c 254 0 mknod /dev/hvcs1 c 254 1 @@ -195,15 +198,15 @@ Using mknod to manually create the device entries makes these device nodes persistent. Once created they will exist prior to the driver insmod. Attempting to connect an application to /dev/hvcs* prior to insertion of -the hvcs module will result in an error message similar to the following: +the hvcs module will result in an error message similar to the following:: "/dev/hvcs*: No such device". NOTE: Just because there is a device node present doesn't mean that there is a vty-server device configured for that node. ---------------------------------------------------------------------------- 5. Connection +============= Since this driver controls devices that provide a tty interface a user can interact with the device node entries using any standard tty-interactive @@ -249,7 +252,7 @@ vty-server adapter is associated with which /dev/hvcs* node a special sysfs attribute has been added to each vty-server sysfs entry. This entry is called "index" and showing it reveals an integer that refers to the /dev/hvcs* entry to use to connect to that device. For instance cating the -index attribute of vty-server adapter 30000004 shows the following. +index attribute of vty-server adapter 30000004 shows the following:: Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat index 2 @@ -262,8 +265,8 @@ system the /dev/hvcs* entry that interacts with a particular vty-server adapter is not guaranteed to remain the same across system reboots. Look in the Q & A section for more on this issue. ---------------------------------------------------------------------------- 6. Disconnection +================ As a security feature to prevent the delivery of stale data to an unintended target the Power5 system firmware disables the fetching of data @@ -305,7 +308,7 @@ connection between the vty-server and target vty ONLY if the vterm_state previously read '1'. The write directive is ignored if the vterm_state read '0' or if any value other than '0' was written to the vterm_state attribute. The following example will show the method used for verifying -the vty-server connection status and disconnecting a vty-server connection. +the vty-server connection status and disconnecting a vty-server connection:: Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat vterm_state 1 @@ -318,12 +321,12 @@ the vty-server connection status and disconnecting a vty-server connection. All vty-server connections are automatically terminated when the device is hotplug removed and when the module is removed. ---------------------------------------------------------------------------- 7. Configuration +================ Each vty-server has a sysfs entry in the /sys/devices/vio directory, which is symlinked in several other sysfs tree directories, notably under the -hvcs driver entry, which looks like the following example: +hvcs driver entry, which looks like the following example:: Pow5:/sys/bus/vio/drivers/hvcs # ls . .. 30000003 30000004 rescan @@ -344,7 +347,7 @@ completed or was never executed. Vty-server entries in this directory are a 32 bit partition unique unit address that is created by firmware. An example vty-server sysfs entry -looks like the following: +looks like the following:: Pow5:/sys/bus/vio/drivers/hvcs/30000004 # ls . current_vty devspec name partner_vtys @@ -352,21 +355,21 @@ looks like the following: Each entry is provided, by default with a "name" attribute. Reading the "name" attribute will reveal the device type as shown in the following -example: +example:: Pow5:/sys/bus/vio/drivers/hvcs/30000003 # cat name vty-server Each entry is also provided, by default, with a "devspec" attribute which reveals the full device specification when read, as shown in the following -example: +example:: Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat devspec /vdevice/vty-server@30000004 Each vty-server sysfs dir is provided with two read-only attributes that provide lists of easily parsed partner vty data: "partner_vtys" and -"partner_clcs". +"partner_clcs":: Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat partner_vtys 30000000 @@ -396,7 +399,7 @@ A vty-server can only be connected to a single vty at a time. The entry, read. The current_vty can be changed by writing a valid partner clc to the entry -as in the following example: +as in the following example:: Pow5:/sys/bus/vio/drivers/hvcs/30000004 # echo U5112.428.10304 8A-V4-C0 > current_vty @@ -408,9 +411,9 @@ currently open connection is freed. Information on the "vterm_state" attribute was covered earlier on the chapter entitled "disconnection". ---------------------------------------------------------------------------- 8. Questions & Answers: -=========================================================================== +======================= + Q: What are the security concerns involving hvcs? A: There are three main security concerns: @@ -429,6 +432,7 @@ A: There are three main security concerns: partition) will experience the previously logged in session. --------------------------------------------------------------------------- + Q: How do I multiplex a console that I grab through hvcs so that other people can see it: @@ -440,6 +444,7 @@ term type "screen" to others. This means that curses based programs may not display properly in screen sessions. --------------------------------------------------------------------------- + Q: Why are the colors all messed up? Q: Why are the control characters acting strange or not working? Q: Why is the console output all strange and unintelligible? @@ -455,6 +460,7 @@ disconnect from the console. This will ensure that the next user gets their own TERM type set when they login. --------------------------------------------------------------------------- + Q: When I try to CONNECT kermit to an hvcs device I get: "Sorry, can't open connection: /dev/hvcs*"What is happening? @@ -490,6 +496,7 @@ A: There is not a corresponding vty-server device that maps to an existing /dev/hvcs* entry. --------------------------------------------------------------------------- + Q: When I try to CONNECT kermit to an hvcs device I get: "Sorry, write access to UUCP lockfile directory denied." @@ -497,6 +504,7 @@ A: The /dev/hvcs* entry you have specified doesn't exist where you said it does? Maybe you haven't inserted the module (on systems with udev). --------------------------------------------------------------------------- + Q: If I already have one Linux partition installed can I use hvcs on said partition to provide the console for the install of a second Linux partition? @@ -505,6 +513,7 @@ A: Yes granted that your are connected to the /dev/hvcs* device using kermit or cu or some other program that doesn't provide terminal emulation. --------------------------------------------------------------------------- + Q: Can I connect to more than one partition's console at a time using this driver? @@ -512,6 +521,7 @@ A: Yes. Of course this means that there must be more than one vty-server configured for this partition and each must point to a disconnected vty. --------------------------------------------------------------------------- + Q: Does the hvcs driver support dynamic (hotplug) addition of devices? A: Yes, if you have dlpar and hotplug enabled for your system and it has @@ -519,6 +529,7 @@ been built into the kernel the hvcs drivers is configured to dynamically handle additions of new devices and removals of unused devices. --------------------------------------------------------------------------- + Q: For some reason /dev/hvcs* doesn't map to the same vty-server adapter after a reboot. What happened? @@ -533,6 +544,7 @@ on how to determine which vty-server goes with which /dev/hvcs* node. Hint; look at the sysfs "index" attribute for the vty-server. --------------------------------------------------------------------------- + Q: Can I use /dev/hvcs* as a conduit to another partition and use a tty device on that partition as the other end of the pipe? @@ -554,7 +566,9 @@ read or write to /dev/hvcs*. Now you have a tty conduit between two partitions. --------------------------------------------------------------------------- + 9. Reporting Bugs: +================== The proper channel for reporting bugs is either through the Linux OS distribution company that provided your OS or by posting issues to the diff --git a/Documentation/powerpc/index.rst b/Documentation/powerpc/index.rst new file mode 100644 index 000000000000..549b1cdd77ae --- /dev/null +++ b/Documentation/powerpc/index.rst @@ -0,0 +1,34 @@ +.. SPDX-License-Identifier: GPL-2.0 + +======= +powerpc +======= + +.. toctree:: + :maxdepth: 1 + + bootwrapper + cpu_families + cpu_features + cxl + cxlflash + dawr-power9 + dscr + eeh-pci-error-recovery + firmware-assisted-dump + hvcs + isa-versions + mpc52xx + pci_iov_resource_on_powernv + pmu-ebb + ptrace + qe_firmware + syscall64-abi + transactional_memory + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/powerpc/isa-versions.rst b/Documentation/powerpc/isa-versions.rst index 66c24140ebf1..a363d8c1603c 100644 --- a/Documentation/powerpc/isa-versions.rst +++ b/Documentation/powerpc/isa-versions.rst @@ -1,13 +1,12 @@ -:orphan: - +========================== CPU to ISA Version Mapping ========================== Mapping of some CPU versions to relevant ISA versions. -========= ==================== +========= ==================================================================== CPU Architecture version -========= ==================== +========= ==================================================================== Power9 Power ISA v3.0B Power8 Power ISA v2.07 Power7 Power ISA v2.06 @@ -24,7 +23,7 @@ PPC970 - PowerPC User Instruction Set Architecture Book I v2.01 - PowerPC Virtual Environment Architecture Book II v2.01 - PowerPC Operating Environment Architecture Book III v2.01 - Plus Altivec/VMX ~= 2.03 -========= ==================== +========= ==================================================================== Key Features @@ -60,9 +59,9 @@ Power5 No PPC970 No ========== ==== -========== ==================== +========== ==================================== CPU Transactional Memory -========== ==================== +========== ==================================== Power9 Yes (* see transactional_memory.txt) Power8 Yes Power7 No @@ -73,4 +72,4 @@ Power5++ No Power5+ No Power5 No PPC970 No -========== ==================== +========== ==================================== diff --git a/Documentation/powerpc/mpc52xx.txt b/Documentation/powerpc/mpc52xx.rst index 0d540a31ea1a..8676ac63e077 100644 --- a/Documentation/powerpc/mpc52xx.txt +++ b/Documentation/powerpc/mpc52xx.rst @@ -1,11 +1,13 @@ +============================= Linux 2.6.x on MPC52xx family ------------------------------ +============================= For the latest info, go to http://www.246tNt.com/mpc52xx/ To compile/use : - - U-Boot: + - U-Boot:: + # <edit Makefile to set ARCH=ppc & CROSS_COMPILE=... ( also EXTRAVERSION if you wish to ). # make lite5200_defconfig @@ -16,7 +18,8 @@ To compile/use : => tftpboot 400000 pRamdisk => bootm 200000 400000 - - DBug: + - DBug:: + # <edit Makefile to set ARCH=ppc & CROSS_COMPILE=... ( also EXTRAVERSION if you wish to ). # make lite5200_defconfig @@ -28,7 +31,8 @@ To compile/use : DBug> dn -i zImage.initrd.lite5200 -Some remarks : +Some remarks: + - The port is named mpc52xxx, and config options are PPC_MPC52xx. The MGT5100 is not supported, and I'm not sure anyone is interesting in working on it so. I didn't took 5xxx because there's apparently a lot of 5xxx that have diff --git a/Documentation/powerpc/pci_iov_resource_on_powernv.txt b/Documentation/powerpc/pci_iov_resource_on_powernv.rst index b55c5cd83f8d..f5a5793e1613 100644 --- a/Documentation/powerpc/pci_iov_resource_on_powernv.txt +++ b/Documentation/powerpc/pci_iov_resource_on_powernv.rst @@ -1,6 +1,13 @@ +=================================================== +PCI Express I/O Virtualization Resource on Powerenv +=================================================== + Wei Yang <weiyang@linux.vnet.ibm.com> + Benjamin Herrenschmidt <benh@au1.ibm.com> + Bjorn Helgaas <bhelgaas@google.com> + 26 Aug 2014 This document describes the requirement from hardware for PCI MMIO resource @@ -10,6 +17,7 @@ Endpoints and the implementation on P8 (IODA2). The next two sections talks about considerations on enabling SRIOV on IODA2. 1. Introduction to Partitionable Endpoints +========================================== A Partitionable Endpoint (PE) is a way to group the various resources associated with a device or a set of devices to provide isolation between @@ -35,6 +43,7 @@ is a completely separate HW entity that replicates the entire logic, so has its own set of PEs, etc. 2. Implementation of Partitionable Endpoints on P8 (IODA2) +========================================================== P8 supports up to 256 Partitionable Endpoints per PHB. @@ -149,6 +158,7 @@ P8 supports up to 256 Partitionable Endpoints per PHB. sense, but we haven't done it yet. 3. Considerations for SR-IOV on PowerKVM +======================================== * SR-IOV Background @@ -224,7 +234,7 @@ P8 supports up to 256 Partitionable Endpoints per PHB. IODA supports 256 PEs, so segmented windows contain 256 segments, so if total_VFs is less than 256, we have the situation in Figure 1.0, where segments [total_VFs, 255] of the M64 window may map to some MMIO range on - other devices: + other devices:: 0 1 total_VFs - 1 +------+------+- -+------+------+ @@ -243,7 +253,7 @@ P8 supports up to 256 Partitionable Endpoints per PHB. Figure 1.0 Direct map VF(n) BAR space Our current solution is to allocate 256 segments even if the VF(n) BAR - space doesn't need that much, as shown in Figure 1.1: + space doesn't need that much, as shown in Figure 1.1:: 0 1 total_VFs - 1 255 +------+------+- -+------+------+- -+------+------+ @@ -269,6 +279,7 @@ P8 supports up to 256 Partitionable Endpoints per PHB. responds to segments [total_VFs, 255]. 4. Implications for the Generic PCI Code +======================================== The PCIe SR-IOV spec requires that the base of the VF(n) BAR space be aligned to the size of an individual VF BAR. diff --git a/Documentation/powerpc/pmu-ebb.txt b/Documentation/powerpc/pmu-ebb.rst index 73cd163dbfb8..4f474758eb55 100644 --- a/Documentation/powerpc/pmu-ebb.txt +++ b/Documentation/powerpc/pmu-ebb.rst @@ -1,3 +1,4 @@ +======================== PMU Event Based Branches ======================== diff --git a/Documentation/powerpc/ptrace.rst b/Documentation/powerpc/ptrace.rst new file mode 100644 index 000000000000..864d4b6dddd1 --- /dev/null +++ b/Documentation/powerpc/ptrace.rst @@ -0,0 +1,156 @@ +====== +Ptrace +====== + +GDB intends to support the following hardware debug features of BookE +processors: + +4 hardware breakpoints (IAC) +2 hardware watchpoints (read, write and read-write) (DAC) +2 value conditions for the hardware watchpoints (DVC) + +For that, we need to extend ptrace so that GDB can query and set these +resources. Since we're extending, we're trying to create an interface +that's extendable and that covers both BookE and server processors, so +that GDB doesn't need to special-case each of them. We added the +following 3 new ptrace requests. + +1. PTRACE_PPC_GETHWDEBUGINFO +============================ + +Query for GDB to discover the hardware debug features. The main info to +be returned here is the minimum alignment for the hardware watchpoints. +BookE processors don't have restrictions here, but server processors have +an 8-byte alignment restriction for hardware watchpoints. We'd like to avoid +adding special cases to GDB based on what it sees in AUXV. + +Since we're at it, we added other useful info that the kernel can return to +GDB: this query will return the number of hardware breakpoints, hardware +watchpoints and whether it supports a range of addresses and a condition. +The query will fill the following structure provided by the requesting process:: + + struct ppc_debug_info { + unit32_t version; + unit32_t num_instruction_bps; + unit32_t num_data_bps; + unit32_t num_condition_regs; + unit32_t data_bp_alignment; + unit32_t sizeof_condition; /* size of the DVC register */ + uint64_t features; /* bitmask of the individual flags */ + }; + +features will have bits indicating whether there is support for:: + + #define PPC_DEBUG_FEATURE_INSN_BP_RANGE 0x1 + #define PPC_DEBUG_FEATURE_INSN_BP_MASK 0x2 + #define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x4 + #define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x8 + #define PPC_DEBUG_FEATURE_DATA_BP_DAWR 0x10 + +2. PTRACE_SETHWDEBUG + +Sets a hardware breakpoint or watchpoint, according to the provided structure:: + + struct ppc_hw_breakpoint { + uint32_t version; + #define PPC_BREAKPOINT_TRIGGER_EXECUTE 0x1 + #define PPC_BREAKPOINT_TRIGGER_READ 0x2 + #define PPC_BREAKPOINT_TRIGGER_WRITE 0x4 + uint32_t trigger_type; /* only some combinations allowed */ + #define PPC_BREAKPOINT_MODE_EXACT 0x0 + #define PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE 0x1 + #define PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE 0x2 + #define PPC_BREAKPOINT_MODE_MASK 0x3 + uint32_t addr_mode; /* address match mode */ + + #define PPC_BREAKPOINT_CONDITION_MODE 0x3 + #define PPC_BREAKPOINT_CONDITION_NONE 0x0 + #define PPC_BREAKPOINT_CONDITION_AND 0x1 + #define PPC_BREAKPOINT_CONDITION_EXACT 0x1 /* different name for the same thing as above */ + #define PPC_BREAKPOINT_CONDITION_OR 0x2 + #define PPC_BREAKPOINT_CONDITION_AND_OR 0x3 + #define PPC_BREAKPOINT_CONDITION_BE_ALL 0x00ff0000 /* byte enable bits */ + #define PPC_BREAKPOINT_CONDITION_BE(n) (1<<((n)+16)) + uint32_t condition_mode; /* break/watchpoint condition flags */ + + uint64_t addr; + uint64_t addr2; + uint64_t condition_value; + }; + +A request specifies one event, not necessarily just one register to be set. +For instance, if the request is for a watchpoint with a condition, both the +DAC and DVC registers will be set in the same request. + +With this GDB can ask for all kinds of hardware breakpoints and watchpoints +that the BookE supports. COMEFROM breakpoints available in server processors +are not contemplated, but that is out of the scope of this work. + +ptrace will return an integer (handle) uniquely identifying the breakpoint or +watchpoint just created. This integer will be used in the PTRACE_DELHWDEBUG +request to ask for its removal. Return -ENOSPC if the requested breakpoint +can't be allocated on the registers. + +Some examples of using the structure to: + +- set a breakpoint in the first breakpoint register:: + + p.version = PPC_DEBUG_CURRENT_VERSION; + p.trigger_type = PPC_BREAKPOINT_TRIGGER_EXECUTE; + p.addr_mode = PPC_BREAKPOINT_MODE_EXACT; + p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE; + p.addr = (uint64_t) address; + p.addr2 = 0; + p.condition_value = 0; + +- set a watchpoint which triggers on reads in the second watchpoint register:: + + p.version = PPC_DEBUG_CURRENT_VERSION; + p.trigger_type = PPC_BREAKPOINT_TRIGGER_READ; + p.addr_mode = PPC_BREAKPOINT_MODE_EXACT; + p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE; + p.addr = (uint64_t) address; + p.addr2 = 0; + p.condition_value = 0; + +- set a watchpoint which triggers only with a specific value:: + + p.version = PPC_DEBUG_CURRENT_VERSION; + p.trigger_type = PPC_BREAKPOINT_TRIGGER_READ; + p.addr_mode = PPC_BREAKPOINT_MODE_EXACT; + p.condition_mode = PPC_BREAKPOINT_CONDITION_AND | PPC_BREAKPOINT_CONDITION_BE_ALL; + p.addr = (uint64_t) address; + p.addr2 = 0; + p.condition_value = (uint64_t) condition; + +- set a ranged hardware breakpoint:: + + p.version = PPC_DEBUG_CURRENT_VERSION; + p.trigger_type = PPC_BREAKPOINT_TRIGGER_EXECUTE; + p.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE; + p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE; + p.addr = (uint64_t) begin_range; + p.addr2 = (uint64_t) end_range; + p.condition_value = 0; + +- set a watchpoint in server processors (BookS):: + + p.version = 1; + p.trigger_type = PPC_BREAKPOINT_TRIGGER_RW; + p.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE; + or + p.addr_mode = PPC_BREAKPOINT_MODE_EXACT; + + p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE; + p.addr = (uint64_t) begin_range; + /* For PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE addr2 needs to be specified, where + * addr2 - addr <= 8 Bytes. + */ + p.addr2 = (uint64_t) end_range; + p.condition_value = 0; + +3. PTRACE_DELHWDEBUG + +Takes an integer which identifies an existing breakpoint or watchpoint +(i.e., the value returned from PTRACE_SETHWDEBUG), and deletes the +corresponding breakpoint or watchpoint.. diff --git a/Documentation/powerpc/ptrace.txt b/Documentation/powerpc/ptrace.txt deleted file mode 100644 index 99c5ce88d0fe..000000000000 --- a/Documentation/powerpc/ptrace.txt +++ /dev/null @@ -1,151 +0,0 @@ -GDB intends to support the following hardware debug features of BookE -processors: - -4 hardware breakpoints (IAC) -2 hardware watchpoints (read, write and read-write) (DAC) -2 value conditions for the hardware watchpoints (DVC) - -For that, we need to extend ptrace so that GDB can query and set these -resources. Since we're extending, we're trying to create an interface -that's extendable and that covers both BookE and server processors, so -that GDB doesn't need to special-case each of them. We added the -following 3 new ptrace requests. - -1. PTRACE_PPC_GETHWDEBUGINFO - -Query for GDB to discover the hardware debug features. The main info to -be returned here is the minimum alignment for the hardware watchpoints. -BookE processors don't have restrictions here, but server processors have -an 8-byte alignment restriction for hardware watchpoints. We'd like to avoid -adding special cases to GDB based on what it sees in AUXV. - -Since we're at it, we added other useful info that the kernel can return to -GDB: this query will return the number of hardware breakpoints, hardware -watchpoints and whether it supports a range of addresses and a condition. -The query will fill the following structure provided by the requesting process: - -struct ppc_debug_info { - unit32_t version; - unit32_t num_instruction_bps; - unit32_t num_data_bps; - unit32_t num_condition_regs; - unit32_t data_bp_alignment; - unit32_t sizeof_condition; /* size of the DVC register */ - uint64_t features; /* bitmask of the individual flags */ -}; - -features will have bits indicating whether there is support for: - -#define PPC_DEBUG_FEATURE_INSN_BP_RANGE 0x1 -#define PPC_DEBUG_FEATURE_INSN_BP_MASK 0x2 -#define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x4 -#define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x8 -#define PPC_DEBUG_FEATURE_DATA_BP_DAWR 0x10 - -2. PTRACE_SETHWDEBUG - -Sets a hardware breakpoint or watchpoint, according to the provided structure: - -struct ppc_hw_breakpoint { - uint32_t version; -#define PPC_BREAKPOINT_TRIGGER_EXECUTE 0x1 -#define PPC_BREAKPOINT_TRIGGER_READ 0x2 -#define PPC_BREAKPOINT_TRIGGER_WRITE 0x4 - uint32_t trigger_type; /* only some combinations allowed */ -#define PPC_BREAKPOINT_MODE_EXACT 0x0 -#define PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE 0x1 -#define PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE 0x2 -#define PPC_BREAKPOINT_MODE_MASK 0x3 - uint32_t addr_mode; /* address match mode */ - -#define PPC_BREAKPOINT_CONDITION_MODE 0x3 -#define PPC_BREAKPOINT_CONDITION_NONE 0x0 -#define PPC_BREAKPOINT_CONDITION_AND 0x1 -#define PPC_BREAKPOINT_CONDITION_EXACT 0x1 /* different name for the same thing as above */ -#define PPC_BREAKPOINT_CONDITION_OR 0x2 -#define PPC_BREAKPOINT_CONDITION_AND_OR 0x3 -#define PPC_BREAKPOINT_CONDITION_BE_ALL 0x00ff0000 /* byte enable bits */ -#define PPC_BREAKPOINT_CONDITION_BE(n) (1<<((n)+16)) - uint32_t condition_mode; /* break/watchpoint condition flags */ - - uint64_t addr; - uint64_t addr2; - uint64_t condition_value; -}; - -A request specifies one event, not necessarily just one register to be set. -For instance, if the request is for a watchpoint with a condition, both the -DAC and DVC registers will be set in the same request. - -With this GDB can ask for all kinds of hardware breakpoints and watchpoints -that the BookE supports. COMEFROM breakpoints available in server processors -are not contemplated, but that is out of the scope of this work. - -ptrace will return an integer (handle) uniquely identifying the breakpoint or -watchpoint just created. This integer will be used in the PTRACE_DELHWDEBUG -request to ask for its removal. Return -ENOSPC if the requested breakpoint -can't be allocated on the registers. - -Some examples of using the structure to: - -- set a breakpoint in the first breakpoint register - - p.version = PPC_DEBUG_CURRENT_VERSION; - p.trigger_type = PPC_BREAKPOINT_TRIGGER_EXECUTE; - p.addr_mode = PPC_BREAKPOINT_MODE_EXACT; - p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE; - p.addr = (uint64_t) address; - p.addr2 = 0; - p.condition_value = 0; - -- set a watchpoint which triggers on reads in the second watchpoint register - - p.version = PPC_DEBUG_CURRENT_VERSION; - p.trigger_type = PPC_BREAKPOINT_TRIGGER_READ; - p.addr_mode = PPC_BREAKPOINT_MODE_EXACT; - p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE; - p.addr = (uint64_t) address; - p.addr2 = 0; - p.condition_value = 0; - -- set a watchpoint which triggers only with a specific value - - p.version = PPC_DEBUG_CURRENT_VERSION; - p.trigger_type = PPC_BREAKPOINT_TRIGGER_READ; - p.addr_mode = PPC_BREAKPOINT_MODE_EXACT; - p.condition_mode = PPC_BREAKPOINT_CONDITION_AND | PPC_BREAKPOINT_CONDITION_BE_ALL; - p.addr = (uint64_t) address; - p.addr2 = 0; - p.condition_value = (uint64_t) condition; - -- set a ranged hardware breakpoint - - p.version = PPC_DEBUG_CURRENT_VERSION; - p.trigger_type = PPC_BREAKPOINT_TRIGGER_EXECUTE; - p.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE; - p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE; - p.addr = (uint64_t) begin_range; - p.addr2 = (uint64_t) end_range; - p.condition_value = 0; - -- set a watchpoint in server processors (BookS) - - p.version = 1; - p.trigger_type = PPC_BREAKPOINT_TRIGGER_RW; - p.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE; - or - p.addr_mode = PPC_BREAKPOINT_MODE_EXACT; - - p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE; - p.addr = (uint64_t) begin_range; - /* For PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE addr2 needs to be specified, where - * addr2 - addr <= 8 Bytes. - */ - p.addr2 = (uint64_t) end_range; - p.condition_value = 0; - -3. PTRACE_DELHWDEBUG - -Takes an integer which identifies an existing breakpoint or watchpoint -(i.e., the value returned from PTRACE_SETHWDEBUG), and deletes the -corresponding breakpoint or watchpoint.. diff --git a/Documentation/powerpc/qe_firmware.txt b/Documentation/powerpc/qe_firmware.rst index e7ac24aec4ff..42f5103140c9 100644 --- a/Documentation/powerpc/qe_firmware.txt +++ b/Documentation/powerpc/qe_firmware.rst @@ -1,23 +1,23 @@ - Freescale QUICC Engine Firmware Uploading - ----------------------------------------- +========================================= +Freescale QUICC Engine Firmware Uploading +========================================= (c) 2007 Timur Tabi <timur at freescale.com>, Freescale Semiconductor -Table of Contents -================= +.. Table of Contents - I - Software License for Firmware + I - Software License for Firmware - II - Microcode Availability + II - Microcode Availability - III - Description and Terminology + III - Description and Terminology - IV - Microcode Programming Details + IV - Microcode Programming Details - V - Firmware Structure Layout + V - Firmware Structure Layout - VI - Sample Code for Creating Firmware Files + VI - Sample Code for Creating Firmware Files Revision Information ==================== @@ -39,7 +39,7 @@ http://opensource.freescale.com. For other firmware files, please contact your Freescale representative or your operating system vendor. III - Description and Terminology -================================ +================================= In this document, the term 'microcode' refers to the sequence of 32-bit integers that compose the actual QE microcode. @@ -89,7 +89,7 @@ being fixed in the RAM package utilizing they should be activated. This data structure signals the microcode which of these virtual traps is active. This structure contains 6 words that the application should copy to some -specific been defined. This table describes the structure. +specific been defined. This table describes the structure:: --------------------------------------------------------------- | Offset in | | Destination Offset | Size of | @@ -119,7 +119,7 @@ Extended Modes This is a double word bit array (64 bits) that defines special functionality which has an impact on the software drivers. Each bit has its own impact and has special instructions for the s/w associated with it. This structure is -described in this table: +described in this table:: ----------------------------------------------------------------------- | Bit # | Name | Description | @@ -220,7 +220,8 @@ The 'model' field is a 16-bit number that matches the actual SOC. The 'major' and 'minor' fields are the major and minor revision numbers, respectively, of the SOC. -For example, to match the 8323, revision 1.0: +For example, to match the 8323, revision 1.0:: + soc.model = 8323 soc.major = 1 soc.minor = 0 @@ -273,10 +274,10 @@ library and available to any driver that calles qe_get_firmware_info(). 'reserved'. After the last microcode is a 32-bit CRC. It can be calculated using -this algorithm: +this algorithm:: -u32 crc32(const u8 *p, unsigned int len) -{ + u32 crc32(const u8 *p, unsigned int len) + { unsigned int i; u32 crc = 0; @@ -286,7 +287,7 @@ u32 crc32(const u8 *p, unsigned int len) crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0); } return crc; -} + } VI - Sample Code for Creating Firmware Files ============================================ diff --git a/Documentation/powerpc/syscall64-abi.txt b/Documentation/powerpc/syscall64-abi.rst index fa716a0d88bd..e49f69f941b9 100644 --- a/Documentation/powerpc/syscall64-abi.txt +++ b/Documentation/powerpc/syscall64-abi.rst @@ -5,12 +5,12 @@ Power Architecture 64-bit Linux system call ABI syscall ======= -syscall calling sequence[*] matches the Power Architecture 64-bit ELF ABI +syscall calling sequence\ [1]_ matches the Power Architecture 64-bit ELF ABI specification C function calling sequence, including register preservation rules, with the following differences. -[*] Some syscalls (typically low-level management functions) may have - different calling sequences (e.g., rt_sigreturn). +.. [1] Some syscalls (typically low-level management functions) may have + different calling sequences (e.g., rt_sigreturn). Parameters and return value --------------------------- @@ -33,12 +33,14 @@ Register preservation rules Register preservation rules match the ELF ABI calling sequence with the following differences: -r0: Volatile. (System call number.) -r3: Volatile. (Parameter 1, and return value.) -r4-r8: Volatile. (Parameters 2-6.) -cr0: Volatile (cr0.SO is the return error condition) -cr1, cr5-7: Nonvolatile. -lr: Nonvolatile. +=========== ============= ======================================== +r0 Volatile (System call number.) +r3 Volatile (Parameter 1, and return value.) +r4-r8 Volatile (Parameters 2-6.) +cr0 Volatile (cr0.SO is the return error condition) +cr1, cr5-7 Nonvolatile +lr Nonvolatile +=========== ============= ======================================== All floating point and vector data registers as well as control and status registers are nonvolatile. @@ -90,9 +92,12 @@ The vsyscall may or may not use the caller's stack frame save areas. Register preservation rules --------------------------- -r0: Volatile. -cr1, cr5-7: Volatile. -lr: Volatile. + +=========== ======== +r0 Volatile +cr1, cr5-7 Volatile +lr Volatile +=========== ======== Invocation ---------- diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.rst index 52c023e14f26..09955103acb4 100644 --- a/Documentation/powerpc/transactional_memory.txt +++ b/Documentation/powerpc/transactional_memory.rst @@ -1,3 +1,4 @@ +============================ Transactional Memory support ============================ @@ -17,29 +18,29 @@ instructions are presented to delimit transactions; transactions are guaranteed to either complete atomically or roll back and undo any partial changes. -A simple transaction looks like this: +A simple transaction looks like this:: -begin_move_money: - tbegin - beq abort_handler + begin_move_money: + tbegin + beq abort_handler - ld r4, SAVINGS_ACCT(r3) - ld r5, CURRENT_ACCT(r3) - subi r5, r5, 1 - addi r4, r4, 1 - std r4, SAVINGS_ACCT(r3) - std r5, CURRENT_ACCT(r3) + ld r4, SAVINGS_ACCT(r3) + ld r5, CURRENT_ACCT(r3) + subi r5, r5, 1 + addi r4, r4, 1 + std r4, SAVINGS_ACCT(r3) + std r5, CURRENT_ACCT(r3) - tend + tend - b continue + b continue -abort_handler: - ... test for odd failures ... + abort_handler: + ... test for odd failures ... - /* Retry the transaction if it failed because it conflicted with - * someone else: */ - b begin_move_money + /* Retry the transaction if it failed because it conflicted with + * someone else: */ + b begin_move_money The 'tbegin' instruction denotes the start point, and 'tend' the end point. @@ -123,7 +124,7 @@ Transaction-aware signal handlers can read the transactional register state from the second ucontext. This will be necessary for crash handlers to determine, for example, the address of the instruction causing the SIGSEGV. -Example signal handler: +Example signal handler:: void crash_handler(int sig, siginfo_t *si, void *uc) { @@ -133,9 +134,9 @@ Example signal handler: if (ucp_link) { u64 msr = ucp->uc_mcontext.regs->msr; /* May have transactional ucontext! */ -#ifndef __powerpc64__ + #ifndef __powerpc64__ msr |= ((u64)transactional_ucp->uc_mcontext.regs->msr) << 32; -#endif + #endif if (MSR_TM_ACTIVE(msr)) { /* Yes, we crashed during a transaction. Oops. */ fprintf(stderr, "Transaction to be restarted at 0x%llx, but " @@ -176,6 +177,7 @@ Failure cause codes used by kernel These are defined in <asm/reg.h>, and distinguish different reasons why the kernel aborted a transaction: + ====================== ================================ TM_CAUSE_RESCHED Thread was rescheduled. TM_CAUSE_TLBI Software TLB invalid. TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap. @@ -184,6 +186,7 @@ kernel aborted a transaction: TM_CAUSE_MISC Currently unused. TM_CAUSE_ALIGNMENT Alignment fault. TM_CAUSE_EMULATE Emulation that touched memory. + ====================== ================================ These can be checked by the user program's abort handler as TEXASR[0:7]. If bit 7 is set, it indicates that the error is consider persistent. For example @@ -203,7 +206,7 @@ POWER9 ====== TM on POWER9 has issues with storing the complete register state. This -is described in this commit: +is described in this commit:: commit 4bb3c7a0208fc13ca70598efd109901a7cd45ae7 Author: Paul Mackerras <paulus@ozlabs.org> diff --git a/Documentation/process/conf.py b/Documentation/process/conf.py deleted file mode 100644 index 1b01a80ad9ce..000000000000 --- a/Documentation/process/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = 'Linux Kernel Development Documentation' - -tags.add("subproject") - -latex_documents = [ - ('index', 'process.tex', 'Linux Kernel Development Documentation', - 'The kernel development community', 'manual'), -] diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst index 49e0f64a3427..053b24a6dd38 100644 --- a/Documentation/process/deprecated.rst +++ b/Documentation/process/deprecated.rst @@ -119,3 +119,17 @@ array may exceed the remaining memory in the stack segment. This could lead to a crash, possible overwriting sensitive contents at the end of the stack (when built without `CONFIG_THREAD_INFO_IN_TASK=y`), or overwriting memory adjacent to the stack (when built without `CONFIG_VMAP_STACK=y`) + +Implicit switch case fall-through +--------------------------------- +The C language allows switch cases to "fall through" when +a "break" statement is missing at the end of a case. This, +however, introduces ambiguity in the code, as it's not always +clear if the missing break is intentional or a bug. As there +have been a long list of flaws `due to missing "break" statements +<https://cwe.mitre.org/data/definitions/484.html>`_, we no longer allow +"implicit fall-through". In order to identify an intentional fall-through +case, we have adopted the marking used by static analyzers: a comment +saying `/* Fall through */`. Once the C++17 `__attribute__((fallthrough))` +is more widely handled by C compilers, static analyzers, and IDEs, we can +switch to using that instead. diff --git a/Documentation/s390/vfio-ccw.rst b/Documentation/s390/vfio-ccw.rst index 1e210c6afa88..fca9c4f5bd9c 100644 --- a/Documentation/s390/vfio-ccw.rst +++ b/Documentation/s390/vfio-ccw.rst @@ -180,6 +180,13 @@ The process of how these work together. add it to an iommu_group and a vfio_group. Then we could pass through the mdev to a guest. + +VFIO-CCW Regions +---------------- + +The vfio-ccw driver exposes MMIO regions to accept requests from and return +results to userspace. + vfio-ccw I/O region ------------------- @@ -205,6 +212,25 @@ irb_area stores the I/O result. ret_code stores a return code for each access of the region. +This region is always available. + +vfio-ccw cmd region +------------------- + +The vfio-ccw cmd region is used to accept asynchronous instructions +from userspace:: + + #define VFIO_CCW_ASYNC_CMD_HSCH (1 << 0) + #define VFIO_CCW_ASYNC_CMD_CSCH (1 << 1) + struct ccw_cmd_region { + __u32 command; + __u32 ret_code; + } __packed; + +This region is exposed via region type VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD. + +Currently, CLEAR SUBCHANNEL and HALT SUBCHANNEL use this region. + vfio-ccw operation details -------------------------- @@ -306,9 +332,8 @@ Together with the corresponding work in QEMU, we can bring the passed through DASD/ECKD device online in a guest now and use it as a block device. -While the current code allows the guest to start channel programs via -START SUBCHANNEL, support for HALT SUBCHANNEL or CLEAR SUBCHANNEL is -not yet implemented. +The current code allows the guest to start channel programs via +START SUBCHANNEL, and to issue HALT SUBCHANNEL and CLEAR SUBCHANNEL. vfio-ccw supports classic (command mode) channel I/O only. Transport mode (HPF) is not supported. diff --git a/Documentation/sh/conf.py b/Documentation/sh/conf.py deleted file mode 100644 index 1eb684a13ac8..000000000000 --- a/Documentation/sh/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "SuperH architecture implementation manual" - -tags.add("subproject") - -latex_documents = [ - ('index', 'sh.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/Documentation/sound/conf.py b/Documentation/sound/conf.py deleted file mode 100644 index 3f1fc5e74e7b..000000000000 --- a/Documentation/sound/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "Linux Sound Subsystem Documentation" - -tags.add("subproject") - -latex_documents = [ - ('index', 'sound.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py index 301a21aa4f63..eeb394b39e2c 100644 --- a/Documentation/sphinx/load_config.py +++ b/Documentation/sphinx/load_config.py @@ -21,6 +21,29 @@ def loadConfig(namespace): and os.path.normpath(namespace["__file__"]) != os.path.normpath(config_file) ): config_file = os.path.abspath(config_file) + # Let's avoid one conf.py file just due to latex_documents + start = config_file.find('Documentation/') + if start >= 0: + start = config_file.find('/', start + 1) + + end = config_file.rfind('/') + if start >= 0 and end > 0: + dir = config_file[start + 1:end] + + print("source directory: %s" % dir) + new_latex_docs = [] + latex_documents = namespace['latex_documents'] + + for l in latex_documents: + if l[0].find(dir + '/') == 0: + has = True + fn = l[0][len(dir) + 1:] + new_latex_docs.append((fn, l[1], l[2], l[3], l[4])) + break + + namespace['latex_documents'] = new_latex_docs + + # If there is an extra conf.py file, load it if os.path.isfile(config_file): sys.stdout.write("load additional sphinx-config: %s\n" % config_file) config = namespace.copy() @@ -29,4 +52,6 @@ def loadConfig(namespace): del config['__file__'] namespace.update(config) else: - sys.stderr.write("WARNING: additional sphinx-config not found: %s\n" % config_file) + config = namespace.copy() + config['tags'].add("subproject") + namespace.update(config) diff --git a/Documentation/translations/it_IT/doc-guide/sphinx.rst b/Documentation/translations/it_IT/doc-guide/sphinx.rst index 1739cba8863e..f1ad4504b734 100644 --- a/Documentation/translations/it_IT/doc-guide/sphinx.rst +++ b/Documentation/translations/it_IT/doc-guide/sphinx.rst @@ -242,8 +242,9 @@ del kernel: * Per inserire blocchi di testo con caratteri a dimensione fissa (codici di esempio, casi d'uso, eccetera): utilizzate ``::`` quando non è necessario evidenziare la sintassi, specialmente per piccoli frammenti; invece, - utilizzate ``.. code-block:: <language>`` per blocchi di più lunghi che - potranno beneficiare dell'avere la sintassi evidenziata. + utilizzate ``.. code-block:: <language>`` per blocchi più lunghi che + beneficeranno della sintassi evidenziata. Per un breve pezzo di codice da + inserire nel testo, usate \`\`. Il dominio C @@ -267,12 +268,14 @@ molto comune come ``open`` o ``ioctl``: Il nome della funzione (per esempio ioctl) rimane nel testo ma il nome del suo riferimento cambia da ``ioctl`` a ``VIDIOC_LOG_STATUS``. Anche la voce -nell'indice cambia in ``VIDIOC_LOG_STATUS`` e si potrà quindi fare riferimento -a questa funzione scrivendo: - -.. code-block:: rst - - :c:func:`VIDIOC_LOG_STATUS` +nell'indice cambia in ``VIDIOC_LOG_STATUS``. + +Notate che per una funzione non c'è bisogno di usare ``c:func:`` per generarne +i riferimenti nella documentazione. Grazie a qualche magica estensione a +Sphinx, il sistema di generazione della documentazione trasformerà +automaticamente un riferimento ad una ``funzione()`` in un riferimento +incrociato quando questa ha una voce nell'indice. Se trovate degli usi di +``c:func:`` nella documentazione del kernel, sentitevi liberi di rimuoverli. Tabelle a liste diff --git a/Documentation/translations/it_IT/process/index.rst b/Documentation/translations/it_IT/process/index.rst index 2eda85d5cd1e..012de0f3154a 100644 --- a/Documentation/translations/it_IT/process/index.rst +++ b/Documentation/translations/it_IT/process/index.rst @@ -27,6 +27,7 @@ Di seguito le guide che ogni sviluppatore dovrebbe leggere. code-of-conduct development-process submitting-patches + programming-language coding-style maintainer-pgp-guide email-clients diff --git a/Documentation/translations/it_IT/process/kernel-docs.rst b/Documentation/translations/it_IT/process/kernel-docs.rst index 7bd70d661737..38e0a955121a 100644 --- a/Documentation/translations/it_IT/process/kernel-docs.rst +++ b/Documentation/translations/it_IT/process/kernel-docs.rst @@ -1,6 +1,7 @@ .. include:: ../disclaimer-ita.rst :Original: :ref:`Documentation/process/kernel-docs.rst <kernel_docs>` +:Translator: Federico Vaga <federico.vaga@vaga.pv.it> .. _it_kernel_docs: @@ -8,6 +9,10 @@ Indice di documenti per le persone interessate a capire e/o scrivere per il kernel Linux ======================================================================================== -.. warning:: - - TODO ancora da tradurre +.. note:: + Questo documento contiene riferimenti a documenti in lingua inglese; inoltre + utilizza dai campi *ReStructuredText* di supporto alla ricerca e che per + questo motivo è meglio non tradurre al fine di garantirne un corretto + utilizzo. + Per questi motivi il documento non verrà tradotto. Per favore fate + riferimento al documento originale in lingua inglese. diff --git a/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst b/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst index 276db0e37f43..118fb4153e8f 100644 --- a/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst +++ b/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst @@ -248,7 +248,10 @@ possano ricevere la vostra nuova sottochiave:: kernel. Se per qualche ragione preferite rimanere con sottochiavi RSA, nel comando - precedente, sostituite "ed25519" con "rsa2048". + precedente, sostituite "ed25519" con "rsa2048". In aggiunta, se avete + intenzione di usare un dispositivo hardware che non supporta le chiavi + ED25519 ECC, come la Nitrokey Pro o la Yubikey, allora dovreste usare + "nistp256" al posto di "ed25519". Copia di riserva della chiave primaria per gestire il recupero da disastro -------------------------------------------------------------------------- @@ -449,23 +452,27 @@ implementi le funzionalità delle smartcard. Sul mercato ci sono diverse soluzioni disponibili: - `Nitrokey Start`_: è Open hardware e Free Software, è basata sul progetto - `GnuK`_ della FSIJ. Ha il supporto per chiavi ECC, ma meno funzionalità di - sicurezza (come la resistenza alla manomissione o alcuni attacchi ad un - canale laterale). + `GnuK`_ della FSIJ. Questo è uno dei pochi dispositivi a supportare le chiavi + ECC ED25519, ma offre meno funzionalità di sicurezza (come la resistenza + alla manomissione o alcuni attacchi ad un canale laterale). - `Nitrokey Pro`_: è simile alla Nitrokey Start, ma è più resistente alla - manomissione e offre più funzionalità di sicurezza, ma l'ECC. -- `Yubikey 4`_: l'hardware e il software sono proprietari, ma è più economica + manomissione e offre più funzionalità di sicurezza. La Pro 2 supporta la + crittografia ECC (NISTP). +- `Yubikey 5`_: l'hardware e il software sono proprietari, ma è più economica della Nitrokey Pro ed è venduta anche con porta USB-C il che è utile con i computer portatili più recenti. In aggiunta, offre altre funzionalità di - sicurezza come FIDO, U2F, ma non l'ECC + sicurezza come FIDO, U2F, e ora supporta anche le chiavi ECC (NISTP) `Su LWN c'è una buona recensione`_ dei modelli elencati qui sopra e altri. +La scelta dipenderà dal costo, dalla disponibilità nella vostra area +geografica e vostre considerazioni sull'hardware aperto/proprietario. + Se volete usare chiavi ECC, la vostra migliore scelta sul mercato è la Nitrokey Start. .. _`Nitrokey Start`: https://shop.nitrokey.com/shop/product/nitrokey-start-6 -.. _`Nitrokey Pro`: https://shop.nitrokey.com/shop/product/nitrokey-pro-3 -.. _`Yubikey 4`: https://www.yubico.com/product/yubikey-4-series/ +.. _`Nitrokey Pro 2`: https://shop.nitrokey.com/shop/product/nitrokey-pro-2-3 +.. _`Yubikey 5`: https://www.yubico.com/product/yubikey-5-overview/ .. _Gnuk: http://www.fsij.org/doc-gnuk/ .. _`Su LWN c'è una buona recensione`: https://lwn.net/Articles/736231/ diff --git a/Documentation/translations/it_IT/process/programming-language.rst b/Documentation/translations/it_IT/process/programming-language.rst new file mode 100644 index 000000000000..f4b006395849 --- /dev/null +++ b/Documentation/translations/it_IT/process/programming-language.rst @@ -0,0 +1,51 @@ +.. include:: ../disclaimer-ita.rst + +:Original: :ref:`Documentation/process/programming-language.rst <programming_language>` +:Translator: Federico Vaga <federico.vaga@vaga.pv.it> + +.. _it_programming_language: + +Linguaggio di programmazione +============================ + +Il kernel è scritto nel linguaggio di programmazione C [c-language]_. +Più precisamente, il kernel viene compilato con ``gcc`` [gcc]_ usando +l'opzione ``-std=gnu89`` [gcc-c-dialect-options]_: il dialetto GNU +dello standard ISO C90 (con l'aggiunta di alcune funzionalità da C99) + +Questo dialetto contiene diverse estensioni al linguaggio [gnu-extensions]_, +e molte di queste vengono usate sistematicamente dal kernel. + +Il kernel offre un certo livello di supporto per la compilazione con ``clang`` +[clang]_ e ``icc`` [icc]_ su diverse architetture, tuttavia in questo momento +il supporto non è completo e richiede delle patch aggiuntive. + +Attributi +--------- + +Una delle estensioni più comuni e usate nel kernel sono gli attributi +[gcc-attribute-syntax]_. Gli attributi permettono di aggiungere una semantica, +definita dell'implementazione, alle entità del linguaggio (come le variabili, +le funzioni o i tipi) senza dover fare importanti modifiche sintattiche al +linguaggio stesso (come l'aggiunta di nuove parole chiave) [n2049]_. + +In alcuni casi, gli attributi sono opzionali (ovvero un compilatore che non +dovesse supportarli dovrebbe produrre comunque codice corretto, anche se +più lento o che non esegue controlli aggiuntivi durante la compilazione). + +Il kernel definisce alcune pseudo parole chiave (per esempio ``__pure``) +in alternativa alla sintassi GNU per gli attributi (per esempio +``__attribute__((__pure__))``) allo scopo di mostrare quali funzionalità si +possono usare e/o per accorciare il codice. + +Per maggiori informazioni consultate il file d'intestazione +``include/linux/compiler_attributes.h``. + +.. [c-language] http://www.open-std.org/jtc1/sc22/wg14/www/standards +.. [gcc] https://gcc.gnu.org +.. [clang] https://clang.llvm.org +.. [icc] https://software.intel.com/en-us/c-compilers +.. [gcc-c-dialect-options] https://gcc.gnu.org/onlinedocs/gcc/C-Dialect-Options.html +.. [gnu-extensions] https://gcc.gnu.org/onlinedocs/gcc/C-Extensions.html +.. [gcc-attribute-syntax] https://gcc.gnu.org/onlinedocs/gcc/Attribute-Syntax.html +.. [n2049] http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2049.pdf diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt index a33c2a536542..2774624ee843 100644 --- a/Documentation/translations/ko_KR/memory-barriers.txt +++ b/Documentation/translations/ko_KR/memory-barriers.txt @@ -569,7 +569,7 @@ ACQUIRE 는 해당 오í¼ë ˆì´ì…˜ì˜ 로드 부분ì—ë§Œ ì ìš©ë˜ê³ RELEASE ë [*] 버스 ë§ˆìŠ¤í„°ë§ DMA 와 ì¼ê´€ì„±ì— 대해서는 다ìŒì„ ì°¸ê³ í•˜ì‹œê¸° ë°”ëžë‹ˆë‹¤: - Documentation/PCI/pci.rst + Documentation/driver-api/pci/pci.rst Documentation/DMA-API-HOWTO.txt Documentation/DMA-API.txt diff --git a/Documentation/userspace-api/conf.py b/Documentation/userspace-api/conf.py deleted file mode 100644 index 2eaf59f844e5..000000000000 --- a/Documentation/userspace-api/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "The Linux kernel user-space API guide" - -tags.add("subproject") - -latex_documents = [ - ('index', 'userspace-api.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/Documentation/virtual/index.rst b/Documentation/virt/index.rst index 062ffb527043..062ffb527043 100644 --- a/Documentation/virtual/index.rst +++ b/Documentation/virt/index.rst diff --git a/Documentation/virtual/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst index d18c97b4e140..d18c97b4e140 100644 --- a/Documentation/virtual/kvm/amd-memory-encryption.rst +++ b/Documentation/virt/kvm/amd-memory-encryption.rst diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virt/kvm/api.txt index e54a3f51ddc5..2d067767b617 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virt/kvm/api.txt @@ -3781,7 +3781,7 @@ encrypted VMs. Currently, this ioctl is used for issuing Secure Encrypted Virtualization (SEV) commands on AMD Processors. The SEV commands are defined in -Documentation/virtual/kvm/amd-memory-encryption.rst. +Documentation/virt/kvm/amd-memory-encryption.rst. 4.111 KVM_MEMORY_ENCRYPT_REG_REGION diff --git a/Documentation/virtual/kvm/arm/hyp-abi.txt b/Documentation/virt/kvm/arm/hyp-abi.txt index a20a0bee268d..a20a0bee268d 100644 --- a/Documentation/virtual/kvm/arm/hyp-abi.txt +++ b/Documentation/virt/kvm/arm/hyp-abi.txt diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virt/kvm/arm/psci.txt index 559586fc9d37..559586fc9d37 100644 --- a/Documentation/virtual/kvm/arm/psci.txt +++ b/Documentation/virt/kvm/arm/psci.txt diff --git a/Documentation/virtual/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst index 01b081f6e7ea..01b081f6e7ea 100644 --- a/Documentation/virtual/kvm/cpuid.rst +++ b/Documentation/virt/kvm/cpuid.rst diff --git a/Documentation/virtual/kvm/devices/README b/Documentation/virt/kvm/devices/README index 34a69834124a..34a69834124a 100644 --- a/Documentation/virtual/kvm/devices/README +++ b/Documentation/virt/kvm/devices/README diff --git a/Documentation/virtual/kvm/devices/arm-vgic-its.txt b/Documentation/virt/kvm/devices/arm-vgic-its.txt index eeaa95b893a8..eeaa95b893a8 100644 --- a/Documentation/virtual/kvm/devices/arm-vgic-its.txt +++ b/Documentation/virt/kvm/devices/arm-vgic-its.txt diff --git a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt b/Documentation/virt/kvm/devices/arm-vgic-v3.txt index ff290b43c8e5..ff290b43c8e5 100644 --- a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt +++ b/Documentation/virt/kvm/devices/arm-vgic-v3.txt diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virt/kvm/devices/arm-vgic.txt index 97b6518148f8..97b6518148f8 100644 --- a/Documentation/virtual/kvm/devices/arm-vgic.txt +++ b/Documentation/virt/kvm/devices/arm-vgic.txt diff --git a/Documentation/virtual/kvm/devices/mpic.txt b/Documentation/virt/kvm/devices/mpic.txt index 8257397adc3c..8257397adc3c 100644 --- a/Documentation/virtual/kvm/devices/mpic.txt +++ b/Documentation/virt/kvm/devices/mpic.txt diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virt/kvm/devices/s390_flic.txt index a4e20a090174..a4e20a090174 100644 --- a/Documentation/virtual/kvm/devices/s390_flic.txt +++ b/Documentation/virt/kvm/devices/s390_flic.txt diff --git a/Documentation/virtual/kvm/devices/vcpu.txt b/Documentation/virt/kvm/devices/vcpu.txt index 2b5dab16c4f2..2b5dab16c4f2 100644 --- a/Documentation/virtual/kvm/devices/vcpu.txt +++ b/Documentation/virt/kvm/devices/vcpu.txt diff --git a/Documentation/virtual/kvm/devices/vfio.txt b/Documentation/virt/kvm/devices/vfio.txt index 528c77c8022c..528c77c8022c 100644 --- a/Documentation/virtual/kvm/devices/vfio.txt +++ b/Documentation/virt/kvm/devices/vfio.txt diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virt/kvm/devices/vm.txt index 4ffb82b02468..4ffb82b02468 100644 --- a/Documentation/virtual/kvm/devices/vm.txt +++ b/Documentation/virt/kvm/devices/vm.txt diff --git a/Documentation/virtual/kvm/devices/xics.txt b/Documentation/virt/kvm/devices/xics.txt index 42864935ac5d..42864935ac5d 100644 --- a/Documentation/virtual/kvm/devices/xics.txt +++ b/Documentation/virt/kvm/devices/xics.txt diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virt/kvm/devices/xive.txt index 9a24a4525253..9a24a4525253 100644 --- a/Documentation/virtual/kvm/devices/xive.txt +++ b/Documentation/virt/kvm/devices/xive.txt diff --git a/Documentation/virtual/kvm/halt-polling.txt b/Documentation/virt/kvm/halt-polling.txt index 4f791b128dd2..4f791b128dd2 100644 --- a/Documentation/virtual/kvm/halt-polling.txt +++ b/Documentation/virt/kvm/halt-polling.txt diff --git a/Documentation/virtual/kvm/hypercalls.txt b/Documentation/virt/kvm/hypercalls.txt index da210651f714..5f6d291bd004 100644 --- a/Documentation/virtual/kvm/hypercalls.txt +++ b/Documentation/virt/kvm/hypercalls.txt @@ -18,7 +18,7 @@ S390: number in R1. For further information on the S390 diagnose call as supported by KVM, - refer to Documentation/virtual/kvm/s390-diag.txt. + refer to Documentation/virt/kvm/s390-diag.txt. PowerPC: It uses R3-R10 and hypercall number in R11. R4-R11 are used as output registers. @@ -26,7 +26,7 @@ S390: KVM hypercalls uses 4 byte opcode, that are patched with 'hypercall-instructions' property inside the device tree's /hypervisor node. - For more information refer to Documentation/virtual/kvm/ppc-pv.txt + For more information refer to Documentation/virt/kvm/ppc-pv.txt MIPS: KVM hypercalls use the HYPCALL instruction with code 0 and the hypercall diff --git a/Documentation/virtual/kvm/index.rst b/Documentation/virt/kvm/index.rst index 0b206a06f5be..ada224a511fe 100644 --- a/Documentation/virtual/kvm/index.rst +++ b/Documentation/virt/kvm/index.rst @@ -9,3 +9,4 @@ KVM amd-memory-encryption cpuid + vcpu-requests diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virt/kvm/locking.txt index 635cd6eaf714..635cd6eaf714 100644 --- a/Documentation/virtual/kvm/locking.txt +++ b/Documentation/virt/kvm/locking.txt diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virt/kvm/mmu.txt index 2efe0efc516e..1b9880dfba0a 100644 --- a/Documentation/virtual/kvm/mmu.txt +++ b/Documentation/virt/kvm/mmu.txt @@ -298,7 +298,7 @@ Handling a page fault is performed as follows: vcpu->arch.mmio_gfn, and call the emulator - If both P bit and R/W bit of error code are set, this could possibly be handled as a "fast page fault" (fixed without taking the MMU lock). See - the description in Documentation/virtual/kvm/locking.txt. + the description in Documentation/virt/kvm/locking.txt. - if needed, walk the guest page tables to determine the guest translation (gva->gpa or ngpa->gpa) - if permissions are insufficient, reflect the fault back to the guest diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virt/kvm/msr.txt index df1f4338b3ca..df1f4338b3ca 100644 --- a/Documentation/virtual/kvm/msr.txt +++ b/Documentation/virt/kvm/msr.txt diff --git a/Documentation/virtual/kvm/nested-vmx.txt b/Documentation/virt/kvm/nested-vmx.txt index 97eb1353e962..97eb1353e962 100644 --- a/Documentation/virtual/kvm/nested-vmx.txt +++ b/Documentation/virt/kvm/nested-vmx.txt diff --git a/Documentation/virtual/kvm/ppc-pv.txt b/Documentation/virt/kvm/ppc-pv.txt index e26115ce4258..e26115ce4258 100644 --- a/Documentation/virtual/kvm/ppc-pv.txt +++ b/Documentation/virt/kvm/ppc-pv.txt diff --git a/Documentation/virtual/kvm/review-checklist.txt b/Documentation/virt/kvm/review-checklist.txt index a83b27635fdd..499af499e296 100644 --- a/Documentation/virtual/kvm/review-checklist.txt +++ b/Documentation/virt/kvm/review-checklist.txt @@ -7,7 +7,7 @@ Review checklist for kvm patches 2. Patches should be against kvm.git master branch. 3. If the patch introduces or modifies a new userspace API: - - the API must be documented in Documentation/virtual/kvm/api.txt + - the API must be documented in Documentation/virt/kvm/api.txt - the API must be discoverable using KVM_CHECK_EXTENSION 4. New state must include support for save/restore. diff --git a/Documentation/virtual/kvm/s390-diag.txt b/Documentation/virt/kvm/s390-diag.txt index 7c52e5f8b210..7c52e5f8b210 100644 --- a/Documentation/virtual/kvm/s390-diag.txt +++ b/Documentation/virt/kvm/s390-diag.txt diff --git a/Documentation/virtual/kvm/timekeeping.txt b/Documentation/virt/kvm/timekeeping.txt index 76808a17ad84..76808a17ad84 100644 --- a/Documentation/virtual/kvm/timekeeping.txt +++ b/Documentation/virt/kvm/timekeeping.txt diff --git a/Documentation/virtual/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst index 5feb3706a7ae..5feb3706a7ae 100644 --- a/Documentation/virtual/kvm/vcpu-requests.rst +++ b/Documentation/virt/kvm/vcpu-requests.rst diff --git a/Documentation/virtual/paravirt_ops.rst b/Documentation/virt/paravirt_ops.rst index 6b789d27cead..6b789d27cead 100644 --- a/Documentation/virtual/paravirt_ops.rst +++ b/Documentation/virt/paravirt_ops.rst diff --git a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt b/Documentation/virt/uml/UserModeLinux-HOWTO.txt index 87b80f589e1c..87b80f589e1c 100644 --- a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt +++ b/Documentation/virt/uml/UserModeLinux-HOWTO.txt diff --git a/Documentation/vm/conf.py b/Documentation/vm/conf.py deleted file mode 100644 index 3b0b601af558..000000000000 --- a/Documentation/vm/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "Linux Memory Management Documentation" - -tags.add("subproject") - -latex_documents = [ - ('index', 'memory-management.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst index 7d90964abbb0..710ce1c701bf 100644 --- a/Documentation/vm/hmm.rst +++ b/Documentation/vm/hmm.rst @@ -237,7 +237,7 @@ The usage pattern is:: ret = hmm_range_snapshot(&range); if (ret) { up_read(&mm->mmap_sem); - if (ret == -EAGAIN) { + if (ret == -EBUSY) { /* * No need to check hmm_range_wait_until_valid() return value * on retry we will get proper error with hmm_range_snapshot() diff --git a/Documentation/watchdog/hpwdt.rst b/Documentation/watchdog/hpwdt.rst index c165d92cfd12..c824cd7f6e32 100644 --- a/Documentation/watchdog/hpwdt.rst +++ b/Documentation/watchdog/hpwdt.rst @@ -63,7 +63,7 @@ Last reviewed: 08/20/2018 and loop forever. This is generally not what a watchdog user wants. For those wishing to learn more please see: - Documentation/kdump/kdump.rst + Documentation/admin-guide/kdump/kdump.rst Documentation/admin-guide/kernel-parameters.txt (panic=) Your Linux Distribution specific documentation. diff --git a/Documentation/x86/conf.py b/Documentation/x86/conf.py deleted file mode 100644 index 33c5c3142e20..000000000000 --- a/Documentation/x86/conf.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8; mode: python -*- - -project = "X86 architecture specific documentation" - -tags.add("subproject") - -latex_documents = [ - ('index', 'x86.tex', project, - 'The kernel development community', 'manual'), -] diff --git a/MAINTAINERS b/MAINTAINERS index 6fe3462a1f7a..8573884e1893 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -899,7 +899,7 @@ L: linux-iio@vger.kernel.org W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/iio/adc/ad7124.c -F: Documentation/devicetree/bindings/iio/adc/adi,ad7124.txt +F: Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml ANALOG DEVICES INC AD7606 DRIVER M: Stefan Popa <stefan.popa@analog.com> @@ -1194,7 +1194,7 @@ F: include/uapi/linux/if_arcnet.h ARM ARCHITECTED TIMER DRIVER M: Mark Rutland <mark.rutland@arm.com> -M: Marc Zyngier <marc.zyngier@arm.com> +M: Marc Zyngier <maz@kernel.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/include/asm/arch_timer.h @@ -2155,10 +2155,12 @@ F: Documentation/devicetree/bindings/arm/realtek.txt ARM/RENESAS ARM64 ARCHITECTURE M: Simon Horman <horms@verge.net.au> +M: Geert Uytterhoeven <geert+renesas@glider.be> M: Magnus Damm <magnus.damm@gmail.com> L: linux-renesas-soc@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next +T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next S: Supported F: arch/arm64/boot/dts/renesas/ F: Documentation/devicetree/bindings/arm/renesas.yaml @@ -2269,10 +2271,12 @@ F: drivers/media/platform/s5p-mfc/ ARM/SHMOBILE ARM ARCHITECTURE M: Simon Horman <horms@verge.net.au> +M: Geert Uytterhoeven <geert+renesas@glider.be> M: Magnus Damm <magnus.damm@gmail.com> L: linux-renesas-soc@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next +T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next S: Supported F: arch/arm/boot/dts/emev2* F: arch/arm/boot/dts/gr-peach* @@ -4190,7 +4194,7 @@ M: Jens Axboe <axboe@kernel.dk> L: cgroups@vger.kernel.org L: linux-block@vger.kernel.org T: git git://git.kernel.dk/linux-block -F: Documentation/cgroup-v1/blkio-controller.rst +F: Documentation/admin-guide/cgroup-v1/blkio-controller.rst F: block/blk-cgroup.c F: include/linux/blk-cgroup.h F: block/blk-throttle.c @@ -4469,7 +4473,7 @@ F: arch/powerpc/platforms/powernv/pci-cxl.c F: drivers/misc/cxl/ F: include/misc/cxl* F: include/uapi/misc/cxl.h -F: Documentation/powerpc/cxl.txt +F: Documentation/powerpc/cxl.rst F: Documentation/ABI/testing/sysfs-class-cxl CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER @@ -4480,7 +4484,7 @@ L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/cxlflash/ F: include/uapi/scsi/cxlflash_ioctl.h -F: Documentation/powerpc/cxlflash.txt +F: Documentation/powerpc/cxlflash.rst CYBERPRO FB DRIVER M: Russell King <linux@armlinux.org.uk> @@ -6336,7 +6340,8 @@ F: Documentation/devicetree/bindings/counter/ftm-quaddec.txt F: drivers/counter/ftm-quaddec.c FLOPPY DRIVER -S: Orphan +M: Denis Efremov <efremov@linux.com> +S: Odd Fixes L: linux-block@vger.kernel.org F: drivers/block/floppy.c @@ -6870,7 +6875,7 @@ R: Sagi Shahar <sagis@google.com> R: Jon Olson <jonolson@google.com> L: netdev@vger.kernel.org S: Supported -F: Documentation/networking/device_drivers/google/gve.txt +F: Documentation/networking/device_drivers/google/gve.rst F: drivers/net/ethernet/google GPD POCKET FAN DRIVER @@ -8504,7 +8509,7 @@ S: Obsolete F: include/uapi/linux/ipx.h IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) -M: Marc Zyngier <marc.zyngier@arm.com> +M: Marc Zyngier <maz@kernel.org> S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core F: Documentation/IRQ-domain.txt @@ -8522,7 +8527,7 @@ F: kernel/irq/ IRQCHIP DRIVERS M: Thomas Gleixner <tglx@linutronix.de> M: Jason Cooper <jason@lakedaemon.net> -M: Marc Zyngier <marc.zyngier@arm.com> +M: Marc Zyngier <maz@kernel.org> L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core @@ -8822,7 +8827,7 @@ L: kvm@vger.kernel.org W: http://www.linux-kvm.org T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git S: Supported -F: Documentation/virtual/kvm/ +F: Documentation/virt/kvm/ F: include/trace/events/kvm.h F: include/uapi/asm-generic/kvm* F: include/uapi/linux/kvm* @@ -8842,10 +8847,10 @@ F: arch/x86/include/asm/svm.h F: arch/x86/kvm/svm.c KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64) -M: Marc Zyngier <marc.zyngier@arm.com> +M: Marc Zyngier <maz@kernel.org> R: James Morse <james.morse@arm.com> -R: Julien Thierry <julien.thierry@arm.com> -R: Suzuki K Pouloze <suzuki.poulose@arm.com> +R: Julien Thierry <julien.thierry.kdev@gmail.com> +R: Suzuki K Poulose <suzuki.poulose@arm.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: kvmarm@lists.cs.columbia.edu T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git @@ -12151,7 +12156,7 @@ M: Thomas Hellstrom <thellstrom@vmware.com> M: "VMware, Inc." <pv-drivers@vmware.com> L: virtualization@lists.linux-foundation.org S: Supported -F: Documentation/virtual/paravirt_ops.txt +F: Documentation/virt/paravirt_ops.rst F: arch/*/kernel/paravirt* F: arch/*/include/asm/paravirt*.h F: include/linux/hypervisor.h @@ -12408,7 +12413,7 @@ F: Documentation/PCI/pci-error-recovery.rst F: drivers/pci/pcie/aer.c F: drivers/pci/pcie/dpc.c F: drivers/pci/pcie/err.c -F: Documentation/powerpc/eeh-pci-error-recovery.txt +F: Documentation/powerpc/eeh-pci-error-recovery.rst F: arch/powerpc/kernel/eeh*.c F: arch/powerpc/platforms/*/eeh*.c F: arch/powerpc/include/*/eeh*.h @@ -13739,6 +13744,7 @@ F: drivers/mtd/nand/raw/r852.c F: drivers/mtd/nand/raw/r852.h RISC-V ARCHITECTURE +M: Paul Walmsley <paul.walmsley@sifive.com> M: Palmer Dabbelt <palmer@sifive.com> M: Albert Ou <aou@eecs.berkeley.edu> L: linux-riscv@lists.infradead.org @@ -13961,7 +13967,6 @@ F: drivers/pci/hotplug/s390_pci_hpc.c S390 VFIO-CCW DRIVER M: Cornelia Huck <cohuck@redhat.com> -M: Farhan Ali <alifm@linux.ibm.com> M: Eric Farman <farman@linux.ibm.com> R: Halil Pasic <pasic@linux.ibm.com> L: linux-s390@vger.kernel.org @@ -14030,6 +14035,12 @@ F: drivers/media/common/saa7146/ F: drivers/media/pci/saa7146/ F: include/media/drv-intf/saa7146* +SAFESETID SECURITY MODULE +M: Micah Morton <mortonm@chromium.org> +S: Supported +F: security/safesetid/ +F: Documentation/admin-guide/LSM/SafeSetID.rst + SAMSUNG AUDIO (ASoC) DRIVERS M: Krzysztof Kozlowski <krzk@kernel.org> M: Sangbeom Kim <sbkim73@samsung.com> @@ -16868,7 +16879,7 @@ W: http://user-mode-linux.sourceforge.net Q: https://patchwork.ozlabs.org/project/linux-um/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git S: Maintained -F: Documentation/virtual/uml/ +F: Documentation/virt/uml/ F: arch/um/ F: arch/x86/um/ F: fs/hostfs/ @@ -17137,7 +17148,7 @@ F: drivers/virtio/virtio_input.c F: include/uapi/linux/virtio_input.h VIRTIO IOMMU DRIVER -M: Jean-Philippe Brucker <jean-philippe.brucker@arm.com> +M: Jean-Philippe Brucker <jean-philippe@linaro.org> L: virtualization@lists.linux-foundation.org S: Maintained F: drivers/iommu/virtio-iommu.c @@ -17186,7 +17197,6 @@ F: drivers/vme/ F: include/linux/vme* VMWARE BALLOON DRIVER -M: Julien Freche <jfreche@vmware.com> M: Nadav Amit <namit@vmware.com> M: "VMware, Inc." <pv-drivers@vmware.com> L: linux-kernel@vger.kernel.org @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 3 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc3 NAME = Bobtail Squid # *DOCUMENTATION* @@ -472,6 +472,7 @@ KBUILD_CFLAGS_MODULE := -DMODULE KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds KBUILD_LDFLAGS := GCC_PLUGINS_CFLAGS := +CLANG_FLAGS := export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC export CPP AR NM STRIP OBJCOPY OBJDUMP PAHOLE KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS @@ -519,7 +520,7 @@ endif ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) ifneq ($(CROSS_COMPILE),) -CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%)) +CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) @@ -843,6 +844,9 @@ NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include) # warn about C99 declaration after statement KBUILD_CFLAGS += -Wdeclaration-after-statement +# Warn about unmarked fall-throughs in switch statement. +KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough=3,) + # Variable Length Arrays (VLAs) should not be used anywhere in the kernel KBUILD_CFLAGS += -Wvla diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index c929bea9a9ff..85710e078afb 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -1535,7 +1535,6 @@ config DEBUG_LL_INCLUDE DEBUG_IMX7D_UART default "debug/ks8695.S" if DEBUG_KS8695_UART default "debug/msm.S" if DEBUG_QCOM_UARTDM - default "debug/netx.S" if DEBUG_NETX_UART default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART default "debug/renesas-scif.S" if DEBUG_R7S72100_SCIF2 default "debug/renesas-scif.S" if DEBUG_RCAR_GEN1_SCIF0 @@ -1575,7 +1574,6 @@ config DEBUG_UART_8250 config DEBUG_UART_PHYS hex "Physical base address of debug UART" - default 0x00100a00 if DEBUG_NETX_UART default 0x01c20000 if DEBUG_DAVINCI_DMx_UART0 default 0x01c28000 if DEBUG_SUNXI_UART0 default 0x01c28400 if DEBUG_SUNXI_UART1 @@ -1700,7 +1698,6 @@ config DEBUG_UART_PHYS DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \ DEBUG_LL_UART_EFM32 || \ DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \ - DEBUG_NETX_UART || \ DEBUG_QCOM_UARTDM || DEBUG_R7S72100_SCIF2 || \ DEBUG_RCAR_GEN1_SCIF0 || DEBUG_RCAR_GEN1_SCIF2 || \ DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF1 || \ @@ -1717,7 +1714,6 @@ config DEBUG_UART_VIRT default 0xc881f000 if DEBUG_RV1108_UART2 default 0xc8821000 if DEBUG_RV1108_UART1 default 0xc8912000 if DEBUG_RV1108_UART0 - default 0xe0000a00 if DEBUG_NETX_UART default 0xe0010fe0 if ARCH_RPC default 0xf0000be0 if ARCH_EBSA110 default 0xf0010000 if DEBUG_ASM9260_UART @@ -1822,7 +1818,6 @@ config DEBUG_UART_VIRT default DEBUG_UART_PHYS if !MMU depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \ DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \ - DEBUG_NETX_UART || \ DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \ DEBUG_S3C64XX_UART || \ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \ diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts index 18d0ae46e76c..0faae8950375 100644 --- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts +++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts @@ -124,6 +124,9 @@ }; mdio-bus-mux { + #address-cells = <1>; + #size-cells = <0>; + /* BIT(9) = 1 => external mdio */ mdio_ext: mdio@200 { reg = <0x200>; diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi index cbe61b61a212..c2a9dd57e56a 100644 --- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi +++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi @@ -112,7 +112,7 @@ }; &i2c2 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c2>; status = "okay"; diff --git a/arch/arm/boot/dts/imx6ul-geam.dts b/arch/arm/boot/dts/imx6ul-geam.dts index 21ddd359d3ed..9f63706383a7 100644 --- a/arch/arm/boot/dts/imx6ul-geam.dts +++ b/arch/arm/boot/dts/imx6ul-geam.dts @@ -156,7 +156,7 @@ }; &i2c2 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c2>; status = "okay"; diff --git a/arch/arm/boot/dts/imx6ul-isiot.dtsi b/arch/arm/boot/dts/imx6ul-isiot.dtsi index b26d4f57c655..cc9adce638f5 100644 --- a/arch/arm/boot/dts/imx6ul-isiot.dtsi +++ b/arch/arm/boot/dts/imx6ul-isiot.dtsi @@ -148,7 +148,7 @@ }; &i2c2 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c2>; status = "okay"; diff --git a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts index 39eeeddac39e..09f7ffa9ad8c 100644 --- a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts +++ b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts @@ -43,7 +43,7 @@ }; &i2c2 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c2>; status = "okay"; diff --git a/arch/arm/boot/dts/imx6ul-pico-pi.dts b/arch/arm/boot/dts/imx6ul-pico-pi.dts index de07357b27fc..6cd7d5877d20 100644 --- a/arch/arm/boot/dts/imx6ul-pico-pi.dts +++ b/arch/arm/boot/dts/imx6ul-pico-pi.dts @@ -43,7 +43,7 @@ }; &i2c2 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c2>; status = "okay"; @@ -58,7 +58,7 @@ }; &i2c3 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c3>; status = "okay"; diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi index 992747a57442..56907bb4b329 100644 --- a/arch/arm/boot/dts/imx7ulp.dtsi +++ b/arch/arm/boot/dts/imx7ulp.dtsi @@ -186,7 +186,7 @@ reg = <0x40330200 0x200>; }; - usbphy1: usb-phy@0x40350000 { + usbphy1: usb-phy@40350000 { compatible = "fsl,imx7ulp-usbphy", "fsl,imx6ul-usbphy"; reg = <0x40350000 0x1000>; interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index e6b98b6eb88d..822cddfbf1af 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -4,17 +4,9 @@ CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PARTITION_ADVANCED=y CONFIG_ARCH_U8500=y -CONFIG_MACH_HREFV60=y -CONFIG_MACH_SNOWBALL=y CONFIG_SMP=y CONFIG_NR_CPUS=2 -CONFIG_PREEMPT=y -CONFIG_AEABI=y CONFIG_HIGHMEM=y CONFIG_ARM_APPENDED_DTB=y CONFIG_ARM_ATAG_DTB_COMPAT=y @@ -25,6 +17,11 @@ CONFIG_CPU_IDLE=y CONFIG_ARM_U8500_CPUIDLE=y CONFIG_VFP=y CONFIG_NEON=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_CMA=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -47,7 +44,6 @@ CONFIG_SMSC911X=y CONFIG_SMSC_PHY=y CONFIG_CW1200=y CONFIG_CW1200_WLAN_SDIO=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_GPIO=y @@ -63,7 +59,6 @@ CONFIG_RMI4_CORE=y CONFIG_RMI4_I2C=y CONFIG_RMI4_F11=y # CONFIG_SERIO is not set -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y @@ -72,6 +67,7 @@ CONFIG_SPI=y CONFIG_SPI_PL022=y CONFIG_GPIO_STMPE=y CONFIG_GPIO_TC3589X=y +CONFIG_SENSORS_IIO_HWMON=y CONFIG_THERMAL=y CONFIG_CPU_THERMAL=y CONFIG_WATCHDOG=y @@ -79,6 +75,13 @@ CONFIG_MFD_STMPE=y CONFIG_MFD_TC3589X=y CONFIG_REGULATOR_AB8500=y CONFIG_REGULATOR_GPIO=y +CONFIG_DRM=y +CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=y +CONFIG_DRM_LIMA=y +CONFIG_DRM_MCDE=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_GENERIC=m +CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_SOC=y @@ -87,6 +90,7 @@ CONFIG_SND_SOC_UX500_MACH_MOP500=y CONFIG_USB=y CONFIG_USB_MUSB_HDRC=y CONFIG_USB_MUSB_UX500=y +CONFIG_MUSB_PIO_ONLY=y CONFIG_AB8500_USB=y CONFIG_USB_GADGET=y CONFIG_USB_ETH=m @@ -103,6 +107,7 @@ CONFIG_RTC_DRV_AB8500=y CONFIG_RTC_DRV_PL031=y CONFIG_DMADEVICES=y CONFIG_STE_DMA40=y +CONFIG_HWSPINLOCK=y CONFIG_HSEM_U8500=y CONFIG_IIO=y CONFIG_IIO_SW_TRIGGER=y @@ -126,20 +131,19 @@ CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y +CONFIG_CRYPTO_DEV_UX500=y +CONFIG_CRYPTO_DEV_UX500_CRYP=y +CONFIG_CRYPTO_DEV_UX500_HASH=y +CONFIG_CRYPTO_DEV_UX500_DEBUG=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y # CONFIG_SCHED_DEBUG is not set -# CONFIG_DEBUG_PREEMPT is not set # CONFIG_FTRACE is not set CONFIG_DEBUG_USER=y CONFIG_CORESIGHT=y CONFIG_CORESIGHT_SINK_TPIU=y CONFIG_CORESIGHT_SINK_ETBV10=y CONFIG_CORESIGHT_SOURCE_ETM3X=y -CONFIG_CRYPTO_DEV_UX500=y -CONFIG_CRYPTO_DEV_UX500_CRYP=y -CONFIG_CRYPTO_DEV_UX500_HASH=y -CONFIG_CRYPTO_DEV_UX500_DEBUG=y diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 7e0486ad1318..dba9355e2484 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -18,7 +18,9 @@ extern const struct dma_map_ops arm_coherent_dma_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : NULL; + if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE)) + return &arm_dma_ops; + return NULL; } #ifdef __arch_page_to_dma diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S index 05d03f09ff54..71262dcdbca3 100644 --- a/arch/arm/mach-davinci/sleep.S +++ b/arch/arm/mach-davinci/sleep.S @@ -24,6 +24,7 @@ #define DEEPSLEEP_SLEEPENABLE_BIT BIT(31) .text + .arch armv5te /* * Move DaVinci into deep sleep state * diff --git a/arch/arm/mach-netx/Kconfig b/arch/arm/mach-netx/Kconfig deleted file mode 100644 index 1e5d9c870784..000000000000 --- a/arch/arm/mach-netx/Kconfig +++ /dev/null @@ -1,22 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -menu "NetX Implementations" - depends on ARCH_NETX - -config MACH_NXDKN - bool "Enable Hilscher nxdkn Eval Board support" - help - Board support for the Hilscher NetX Eval Board - -config MACH_NXDB500 - bool "Enable Hilscher nxdb500 Eval Board support" - select ARM_AMBA - help - Board support for the Hilscher nxdb500 Eval Board - -config MACH_NXEB500HMI - bool "Enable Hilscher nxeb500hmi Eval Board support" - select ARM_AMBA - help - Board support for the Hilscher nxeb500hmi Eval Board - -endmenu diff --git a/arch/arm/mach-netx/Makefile b/arch/arm/mach-netx/Makefile deleted file mode 100644 index 44ea83f7d9c2..000000000000 --- a/arch/arm/mach-netx/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Makefile for the linux kernel. -# - -# Object file lists. - -obj-y += time.o generic.o pfifo.o xc.o - -# Specific board support -obj-$(CONFIG_MACH_NXDKN) += nxdkn.o -obj-$(CONFIG_MACH_NXDB500) += nxdb500.o fb.o -obj-$(CONFIG_MACH_NXEB500HMI) += nxeb500hmi.o fb.o diff --git a/arch/arm/mach-netx/Makefile.boot b/arch/arm/mach-netx/Makefile.boot deleted file mode 100644 index 2eb23c0cb6b0..000000000000 --- a/arch/arm/mach-netx/Makefile.boot +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - zreladdr-y += 0x80008000 - diff --git a/arch/arm/mach-netx/fb.c b/arch/arm/mach-netx/fb.c deleted file mode 100644 index 2dc80db07390..000000000000 --- a/arch/arm/mach-netx/fb.c +++ /dev/null @@ -1,65 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * arch/arm/mach-netx/fb.c - * - * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#include <linux/device.h> -#include <linux/init.h> -#include <linux/dma-mapping.h> -#include <linux/amba/bus.h> -#include <linux/amba/clcd.h> -#include <linux/err.h> -#include <linux/gfp.h> - -#include <asm/irq.h> - -#include <mach/netx-regs.h> -#include <mach/hardware.h> - -static struct clcd_panel *netx_panel; - -void netx_clcd_enable(struct clcd_fb *fb) -{ -} - -int netx_clcd_setup(struct clcd_fb *fb) -{ - dma_addr_t dma; - - fb->panel = netx_panel; - - fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, 1024 * 1024, &dma, - GFP_KERNEL); - if (!fb->fb.screen_base) { - printk(KERN_ERR "CLCD: unable to map framebuffer\n"); - return -ENOMEM; - } - - fb->fb.fix.smem_start = dma; - fb->fb.fix.smem_len = 1024*1024; - - return 0; -} - -int netx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma) -{ - return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base, - fb->fb.fix.smem_start, fb->fb.fix.smem_len); -} - -void netx_clcd_remove(struct clcd_fb *fb) -{ - dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base, - fb->fb.fix.smem_start); -} - -static AMBA_AHB_DEVICE(fb, "fb", 0, 0x00104000, { NETX_IRQ_LCD }, NULL); - -int netx_fb_init(struct clcd_board *board, struct clcd_panel *panel) -{ - netx_panel = panel; - fb_device.dev.platform_data = board; - return amba_device_register(&fb_device, &iomem_resource); -} diff --git a/arch/arm/mach-netx/fb.h b/arch/arm/mach-netx/fb.h deleted file mode 100644 index 5cdc01fc3c86..000000000000 --- a/arch/arm/mach-netx/fb.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * arch/arm/mach-netx/fb.h - * - * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -void netx_clcd_enable(struct clcd_fb *fb); -int netx_clcd_setup(struct clcd_fb *fb); -int netx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma); -void netx_clcd_remove(struct clcd_fb *fb); -int netx_fb_init(struct clcd_board *board, struct clcd_panel *panel); diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c deleted file mode 100644 index 88881fd45e9f..000000000000 --- a/arch/arm/mach-netx/generic.c +++ /dev/null @@ -1,182 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * arch/arm/mach-netx/generic.c - * - * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#include <linux/device.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/io.h> -#include <linux/irqchip/arm-vic.h> -#include <linux/reboot.h> -#include <mach/hardware.h> -#include <asm/mach/map.h> -#include <mach/netx-regs.h> -#include <asm/mach/irq.h> - -static struct map_desc netx_io_desc[] __initdata = { - { - .virtual = NETX_IO_VIRT, - .pfn = __phys_to_pfn(NETX_IO_PHYS), - .length = NETX_IO_SIZE, - .type = MT_DEVICE - } -}; - -void __init netx_map_io(void) -{ - iotable_init(netx_io_desc, ARRAY_SIZE(netx_io_desc)); -} - -static struct resource netx_rtc_resources[] = { - [0] = { - .start = 0x00101200, - .end = 0x00101220, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device netx_rtc_device = { - .name = "netx-rtc", - .id = 0, - .num_resources = ARRAY_SIZE(netx_rtc_resources), - .resource = netx_rtc_resources, -}; - -static struct platform_device *devices[] __initdata = { - &netx_rtc_device, -}; - -#if 0 -#define DEBUG_IRQ(fmt...) printk(fmt) -#else -#define DEBUG_IRQ(fmt...) while (0) {} -#endif - -static void netx_hif_demux_handler(struct irq_desc *desc) -{ - unsigned int irq = NETX_IRQ_HIF_CHAINED(0); - unsigned int stat; - - stat = ((readl(NETX_DPMAS_INT_EN) & - readl(NETX_DPMAS_INT_STAT)) >> 24) & 0x1f; - - while (stat) { - if (stat & 1) { - DEBUG_IRQ("handling irq %d\n", irq); - generic_handle_irq(irq); - } - irq++; - stat >>= 1; - } -} - -static int -netx_hif_irq_type(struct irq_data *d, unsigned int type) -{ - unsigned int val, irq; - - val = readl(NETX_DPMAS_IF_CONF1); - - irq = d->irq - NETX_IRQ_HIF_CHAINED(0); - - if (type & IRQ_TYPE_EDGE_RISING) { - DEBUG_IRQ("rising edges\n"); - val |= (1 << 26) << irq; - } - if (type & IRQ_TYPE_EDGE_FALLING) { - DEBUG_IRQ("falling edges\n"); - val &= ~((1 << 26) << irq); - } - if (type & IRQ_TYPE_LEVEL_LOW) { - DEBUG_IRQ("low level\n"); - val &= ~((1 << 26) << irq); - } - if (type & IRQ_TYPE_LEVEL_HIGH) { - DEBUG_IRQ("high level\n"); - val |= (1 << 26) << irq; - } - - writel(val, NETX_DPMAS_IF_CONF1); - - return 0; -} - -static void -netx_hif_ack_irq(struct irq_data *d) -{ - unsigned int val, irq; - - irq = d->irq - NETX_IRQ_HIF_CHAINED(0); - writel((1 << 24) << irq, NETX_DPMAS_INT_STAT); - - val = readl(NETX_DPMAS_INT_EN); - val &= ~((1 << 24) << irq); - writel(val, NETX_DPMAS_INT_EN); - - DEBUG_IRQ("%s: irq %d\n", __func__, d->irq); -} - -static void -netx_hif_mask_irq(struct irq_data *d) -{ - unsigned int val, irq; - - irq = d->irq - NETX_IRQ_HIF_CHAINED(0); - val = readl(NETX_DPMAS_INT_EN); - val &= ~((1 << 24) << irq); - writel(val, NETX_DPMAS_INT_EN); - DEBUG_IRQ("%s: irq %d\n", __func__, d->irq); -} - -static void -netx_hif_unmask_irq(struct irq_data *d) -{ - unsigned int val, irq; - - irq = d->irq - NETX_IRQ_HIF_CHAINED(0); - val = readl(NETX_DPMAS_INT_EN); - val |= (1 << 24) << irq; - writel(val, NETX_DPMAS_INT_EN); - DEBUG_IRQ("%s: irq %d\n", __func__, d->irq); -} - -static struct irq_chip netx_hif_chip = { - .irq_ack = netx_hif_ack_irq, - .irq_mask = netx_hif_mask_irq, - .irq_unmask = netx_hif_unmask_irq, - .irq_set_type = netx_hif_irq_type, -}; - -void __init netx_init_irq(void) -{ - int irq; - - vic_init(io_p2v(NETX_PA_VIC), NETX_IRQ_VIC_START, ~0, 0); - - for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) { - irq_set_chip_and_handler(irq, &netx_hif_chip, - handle_level_irq); - irq_clear_status_flags(irq, IRQ_NOREQUEST); - } - - writel(NETX_DPMAS_INT_EN_GLB_EN, NETX_DPMAS_INT_EN); - irq_set_chained_handler(NETX_IRQ_HIF, netx_hif_demux_handler); -} - -static int __init netx_init(void) -{ - return platform_add_devices(devices, ARRAY_SIZE(devices)); -} - -subsys_initcall(netx_init); - -void netx_restart(enum reboot_mode mode, const char *cmd) -{ - writel(NETX_SYSTEM_RES_CR_FIRMW_RES_EN | NETX_SYSTEM_RES_CR_FIRMW_RES, - NETX_SYSTEM_RES_CR); -} diff --git a/arch/arm/mach-netx/generic.h b/arch/arm/mach-netx/generic.h deleted file mode 100644 index 223e304574a5..000000000000 --- a/arch/arm/mach-netx/generic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * arch/arm/mach-netx/generic.h - * - * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#include <linux/reboot.h> - -extern void __init netx_map_io(void); -extern void __init netx_init_irq(void); -extern void netx_restart(enum reboot_mode, const char *); - -extern void netx_timer_init(void); diff --git a/arch/arm/mach-netx/include/mach/hardware.h b/arch/arm/mach-netx/include/mach/hardware.h deleted file mode 100644 index 84253993d1e0..000000000000 --- a/arch/arm/mach-netx/include/mach/hardware.h +++ /dev/null @@ -1,27 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * arch/arm/mach-netx/include/mach/hardware.h - * - * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ -#ifndef __ASM_ARCH_HARDWARE_H -#define __ASM_ARCH_HARDWARE_H - -#define NETX_IO_PHYS 0x00100000 -#define NETX_IO_VIRT 0xe0000000 -#define NETX_IO_SIZE 0x00100000 - -#define SRAM_INTERNAL_PHYS_0 0x00000 -#define SRAM_INTERNAL_PHYS_1 0x08000 -#define SRAM_INTERNAL_PHYS_2 0x10000 -#define SRAM_INTERNAL_PHYS_3 0x18000 -#define SRAM_INTERNAL_PHYS(no) ((no) * 0x8000) - -#define XPEC_MEM_SIZE 0x4000 -#define XMAC_MEM_SIZE 0x1000 -#define SRAM_MEM_SIZE 0x8000 - -#define io_p2v(x) IOMEM((x) - NETX_IO_PHYS + NETX_IO_VIRT) -#define io_v2p(x) ((x) - NETX_IO_VIRT + NETX_IO_PHYS) - -#endif diff --git a/arch/arm/mach-netx/include/mach/irqs.h b/arch/arm/mach-netx/include/mach/irqs.h deleted file mode 100644 index 540c92104fe8..000000000000 --- a/arch/arm/mach-netx/include/mach/irqs.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * arch/arm/mach-netx/include/mach/irqs.h - * - * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#define NETX_IRQ_VIC_START 64 -#define NETX_IRQ_SOFTINT (NETX_IRQ_VIC_START + 0) -#define NETX_IRQ_TIMER0 (NETX_IRQ_VIC_START + 1) -#define NETX_IRQ_TIMER1 (NETX_IRQ_VIC_START + 2) -#define NETX_IRQ_TIMER2 (NETX_IRQ_VIC_START + 3) -#define NETX_IRQ_SYSTIME_NS (NETX_IRQ_VIC_START + 4) -#define NETX_IRQ_SYSTIME_S (NETX_IRQ_VIC_START + 5) -#define NETX_IRQ_GPIO_15 (NETX_IRQ_VIC_START + 6) -#define NETX_IRQ_WATCHDOG (NETX_IRQ_VIC_START + 7) -#define NETX_IRQ_UART0 (NETX_IRQ_VIC_START + 8) -#define NETX_IRQ_UART1 (NETX_IRQ_VIC_START + 9) -#define NETX_IRQ_UART2 (NETX_IRQ_VIC_START + 10) -#define NETX_IRQ_USB (NETX_IRQ_VIC_START + 11) -#define NETX_IRQ_SPI (NETX_IRQ_VIC_START + 12) -#define NETX_IRQ_I2C (NETX_IRQ_VIC_START + 13) -#define NETX_IRQ_LCD (NETX_IRQ_VIC_START + 14) -#define NETX_IRQ_HIF (NETX_IRQ_VIC_START + 15) -#define NETX_IRQ_GPIO_0_14 (NETX_IRQ_VIC_START + 16) -#define NETX_IRQ_XPEC0 (NETX_IRQ_VIC_START + 17) -#define NETX_IRQ_XPEC1 (NETX_IRQ_VIC_START + 18) -#define NETX_IRQ_XPEC2 (NETX_IRQ_VIC_START + 19) -#define NETX_IRQ_XPEC3 (NETX_IRQ_VIC_START + 20) -#define NETX_IRQ_XPEC(no) (NETX_IRQ_VIC_START + 17 + (no)) -#define NETX_IRQ_MSYNC0 (NETX_IRQ_VIC_START + 21) -#define NETX_IRQ_MSYNC1 (NETX_IRQ_VIC_START + 22) -#define NETX_IRQ_MSYNC2 (NETX_IRQ_VIC_START + 23) -#define NETX_IRQ_MSYNC3 (NETX_IRQ_VIC_START + 24) -#define NETX_IRQ_IRQ_PHY (NETX_IRQ_VIC_START + 25) -#define NETX_IRQ_ISO_AREA (NETX_IRQ_VIC_START + 26) -/* int 27 is reserved */ -/* int 28 is reserved */ -#define NETX_IRQ_TIMER3 (NETX_IRQ_VIC_START + 29) -#define NETX_IRQ_TIMER4 (NETX_IRQ_VIC_START + 30) -/* int 31 is reserved */ - -#define NETX_IRQS (NETX_IRQ_VIC_START + 32) - -/* for multiplexed irqs on gpio 0..14 */ -#define NETX_IRQ_GPIO(x) (NETX_IRQS + (x)) -#define NETX_IRQ_GPIO_LAST NETX_IRQ_GPIO(14) - -/* Host interface interrupts */ -#define NETX_IRQ_HIF_CHAINED(x) (NETX_IRQ_GPIO_LAST + 1 + (x)) -#define NETX_IRQ_HIF_PIO35 NETX_IRQ_HIF_CHAINED(0) -#define NETX_IRQ_HIF_PIO36 NETX_IRQ_HIF_CHAINED(1) -#define NETX_IRQ_HIF_PIO40 NETX_IRQ_HIF_CHAINED(2) -#define NETX_IRQ_HIF_PIO47 NETX_IRQ_HIF_CHAINED(3) -#define NETX_IRQ_HIF_PIO72 NETX_IRQ_HIF_CHAINED(4) -#define NETX_IRQ_HIF_LAST NETX_IRQ_HIF_CHAINED(4) - -#define NR_IRQS (NETX_IRQ_HIF_LAST + 1) diff --git a/arch/arm/mach-netx/include/mach/netx-regs.h b/arch/arm/mach-netx/include/mach/netx-regs.h deleted file mode 100644 index 7c356a6ab80b..000000000000 --- a/arch/arm/mach-netx/include/mach/netx-regs.h +++ /dev/null @@ -1,420 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * arch/arm/mach-netx/include/mach/netx-regs.h - * - * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#ifndef __ASM_ARCH_NETX_REGS_H -#define __ASM_ARCH_NETX_REGS_H - -/* offsets relative to the beginning of the io space */ -#define NETX_OFS_SYSTEM 0x00000 -#define NETX_OFS_MEMCR 0x00100 -#define NETX_OFS_DPMAS 0x03000 -#define NETX_OFS_GPIO 0x00800 -#define NETX_OFS_PIO 0x00900 -#define NETX_OFS_UART0 0x00a00 -#define NETX_OFS_UART1 0x00a40 -#define NETX_OFS_UART2 0x00a80 -#define NETX_OF_MIIMU 0x00b00 -#define NETX_OFS_SPI 0x00c00 -#define NETX_OFS_I2C 0x00d00 -#define NETX_OFS_SYSTIME 0x01100 -#define NETX_OFS_RTC 0x01200 -#define NETX_OFS_EXTBUS 0x03600 -#define NETX_OFS_LCD 0x04000 -#define NETX_OFS_USB 0x20000 -#define NETX_OFS_XMAC0 0x60000 -#define NETX_OFS_XMAC1 0x61000 -#define NETX_OFS_XMAC2 0x62000 -#define NETX_OFS_XMAC3 0x63000 -#define NETX_OFS_XMAC(no) (0x60000 + (no) * 0x1000) -#define NETX_OFS_PFIFO 0x64000 -#define NETX_OFS_XPEC0 0x70000 -#define NETX_OFS_XPEC1 0x74000 -#define NETX_OFS_XPEC2 0x78000 -#define NETX_OFS_XPEC3 0x7c000 -#define NETX_OFS_XPEC(no) (0x70000 + (no) * 0x4000) -#define NETX_OFS_VIC 0xff000 - -/* physical addresses */ -#define NETX_PA_SYSTEM (NETX_IO_PHYS + NETX_OFS_SYSTEM) -#define NETX_PA_MEMCR (NETX_IO_PHYS + NETX_OFS_MEMCR) -#define NETX_PA_DPMAS (NETX_IO_PHYS + NETX_OFS_DPMAS) -#define NETX_PA_GPIO (NETX_IO_PHYS + NETX_OFS_GPIO) -#define NETX_PA_PIO (NETX_IO_PHYS + NETX_OFS_PIO) -#define NETX_PA_UART0 (NETX_IO_PHYS + NETX_OFS_UART0) -#define NETX_PA_UART1 (NETX_IO_PHYS + NETX_OFS_UART1) -#define NETX_PA_UART2 (NETX_IO_PHYS + NETX_OFS_UART2) -#define NETX_PA_MIIMU (NETX_IO_PHYS + NETX_OF_MIIMU) -#define NETX_PA_SPI (NETX_IO_PHYS + NETX_OFS_SPI) -#define NETX_PA_I2C (NETX_IO_PHYS + NETX_OFS_I2C) -#define NETX_PA_SYSTIME (NETX_IO_PHYS + NETX_OFS_SYSTIME) -#define NETX_PA_RTC (NETX_IO_PHYS + NETX_OFS_RTC) -#define NETX_PA_EXTBUS (NETX_IO_PHYS + NETX_OFS_EXTBUS) -#define NETX_PA_LCD (NETX_IO_PHYS + NETX_OFS_LCD) -#define NETX_PA_USB (NETX_IO_PHYS + NETX_OFS_USB) -#define NETX_PA_XMAC0 (NETX_IO_PHYS + NETX_OFS_XMAC0) -#define NETX_PA_XMAC1 (NETX_IO_PHYS + NETX_OFS_XMAC1) -#define NETX_PA_XMAC2 (NETX_IO_PHYS + NETX_OFS_XMAC2) -#define NETX_PA_XMAC3 (NETX_IO_PHYS + NETX_OFS_XMAC3) -#define NETX_PA_XMAC(no) (NETX_IO_PHYS + NETX_OFS_XMAC(no)) -#define NETX_PA_PFIFO (NETX_IO_PHYS + NETX_OFS_PFIFO) -#define NETX_PA_XPEC0 (NETX_IO_PHYS + NETX_OFS_XPEC0) -#define NETX_PA_XPEC1 (NETX_IO_PHYS + NETX_OFS_XPEC1) -#define NETX_PA_XPEC2 (NETX_IO_PHYS + NETX_OFS_XPEC2) -#define NETX_PA_XPEC3 (NETX_IO_PHYS + NETX_OFS_XPEC3) -#define NETX_PA_XPEC(no) (NETX_IO_PHYS + NETX_OFS_XPEC(no)) -#define NETX_PA_VIC (NETX_IO_PHYS + NETX_OFS_VIC) - -/* virtual addresses */ -#define NETX_VA_SYSTEM (NETX_IO_VIRT + NETX_OFS_SYSTEM) -#define NETX_VA_MEMCR (NETX_IO_VIRT + NETX_OFS_MEMCR) -#define NETX_VA_DPMAS (NETX_IO_VIRT + NETX_OFS_DPMAS) -#define NETX_VA_GPIO (NETX_IO_VIRT + NETX_OFS_GPIO) -#define NETX_VA_PIO (NETX_IO_VIRT + NETX_OFS_PIO) -#define NETX_VA_UART0 (NETX_IO_VIRT + NETX_OFS_UART0) -#define NETX_VA_UART1 (NETX_IO_VIRT + NETX_OFS_UART1) -#define NETX_VA_UART2 (NETX_IO_VIRT + NETX_OFS_UART2) -#define NETX_VA_MIIMU (NETX_IO_VIRT + NETX_OF_MIIMU) -#define NETX_VA_SPI (NETX_IO_VIRT + NETX_OFS_SPI) -#define NETX_VA_I2C (NETX_IO_VIRT + NETX_OFS_I2C) -#define NETX_VA_SYSTIME (NETX_IO_VIRT + NETX_OFS_SYSTIME) -#define NETX_VA_RTC (NETX_IO_VIRT + NETX_OFS_RTC) -#define NETX_VA_EXTBUS (NETX_IO_VIRT + NETX_OFS_EXTBUS) -#define NETX_VA_LCD (NETX_IO_VIRT + NETX_OFS_LCD) -#define NETX_VA_USB (NETX_IO_VIRT + NETX_OFS_USB) -#define NETX_VA_XMAC0 (NETX_IO_VIRT + NETX_OFS_XMAC0) -#define NETX_VA_XMAC1 (NETX_IO_VIRT + NETX_OFS_XMAC1) -#define NETX_VA_XMAC2 (NETX_IO_VIRT + NETX_OFS_XMAC2) -#define NETX_VA_XMAC3 (NETX_IO_VIRT + NETX_OFS_XMAC3) -#define NETX_VA_XMAC(no) (NETX_IO_VIRT + NETX_OFS_XMAC(no)) -#define NETX_VA_PFIFO (NETX_IO_VIRT + NETX_OFS_PFIFO) -#define NETX_VA_XPEC0 (NETX_IO_VIRT + NETX_OFS_XPEC0) -#define NETX_VA_XPEC1 (NETX_IO_VIRT + NETX_OFS_XPEC1) -#define NETX_VA_XPEC2 (NETX_IO_VIRT + NETX_OFS_XPEC2) -#define NETX_VA_XPEC3 (NETX_IO_VIRT + NETX_OFS_XPEC3) -#define NETX_VA_XPEC(no) (NETX_IO_VIRT + NETX_OFS_XPEC(no)) -#define NETX_VA_VIC (NETX_IO_VIRT + NETX_OFS_VIC) - -/********************************* - * System functions * - *********************************/ - -/* Registers */ -#define NETX_SYSTEM_REG(ofs) IOMEM(NETX_VA_SYSTEM + (ofs)) -#define NETX_SYSTEM_BOO_SR NETX_SYSTEM_REG(0x00) -#define NETX_SYSTEM_IOC_CR NETX_SYSTEM_REG(0x04) -#define NETX_SYSTEM_IOC_MR NETX_SYSTEM_REG(0x08) - -/* FIXME: Docs are not consistent */ -/* #define NETX_SYSTEM_RES_CR NETX_SYSTEM_REG(0x08) */ -#define NETX_SYSTEM_RES_CR NETX_SYSTEM_REG(0x0c) - -#define NETX_SYSTEM_PHY_CONTROL NETX_SYSTEM_REG(0x10) -#define NETX_SYSTEM_REV NETX_SYSTEM_REG(0x34) -#define NETX_SYSTEM_IOC_ACCESS_KEY NETX_SYSTEM_REG(0x70) -#define NETX_SYSTEM_WDG_TR NETX_SYSTEM_REG(0x200) -#define NETX_SYSTEM_WDG_CTR NETX_SYSTEM_REG(0x204) -#define NETX_SYSTEM_WDG_IRQ_TIMEOUT NETX_SYSTEM_REG(0x208) -#define NETX_SYSTEM_WDG_RES_TIMEOUT NETX_SYSTEM_REG(0x20c) - -/* Bits */ -#define NETX_SYSTEM_RES_CR_RSTIN (1<<0) -#define NETX_SYSTEM_RES_CR_WDG_RES (1<<1) -#define NETX_SYSTEM_RES_CR_HOST_RES (1<<2) -#define NETX_SYSTEM_RES_CR_FIRMW_RES (1<<3) -#define NETX_SYSTEM_RES_CR_XPEC0_RES (1<<4) -#define NETX_SYSTEM_RES_CR_XPEC1_RES (1<<5) -#define NETX_SYSTEM_RES_CR_XPEC2_RES (1<<6) -#define NETX_SYSTEM_RES_CR_XPEC3_RES (1<<7) -#define NETX_SYSTEM_RES_CR_DIS_XPEC0_RES (1<<16) -#define NETX_SYSTEM_RES_CR_DIS_XPEC1_RES (1<<17) -#define NETX_SYSTEM_RES_CR_DIS_XPEC2_RES (1<<18) -#define NETX_SYSTEM_RES_CR_DIS_XPEC3_RES (1<<19) -#define NETX_SYSTEM_RES_CR_FIRMW_FLG0 (1<<20) -#define NETX_SYSTEM_RES_CR_FIRMW_FLG1 (1<<21) -#define NETX_SYSTEM_RES_CR_FIRMW_FLG2 (1<<22) -#define NETX_SYSTEM_RES_CR_FIRMW_FLG3 (1<<23) -#define NETX_SYSTEM_RES_CR_FIRMW_RES_EN (1<<24) -#define NETX_SYSTEM_RES_CR_RSTOUT (1<<25) -#define NETX_SYSTEM_RES_CR_EN_RSTOUT (1<<26) - -#define PHY_CONTROL_RESET (1<<31) -#define PHY_CONTROL_SIM_BYP (1<<30) -#define PHY_CONTROL_CLK_XLATIN (1<<29) -#define PHY_CONTROL_PHY1_EN (1<<21) -#define PHY_CONTROL_PHY1_NP_MSG_CODE -#define PHY_CONTROL_PHY1_AUTOMDIX (1<<17) -#define PHY_CONTROL_PHY1_FIXMODE (1<<16) -#define PHY_CONTROL_PHY1_MODE(mode) (((mode) & 0x7) << 13) -#define PHY_CONTROL_PHY0_EN (1<<12) -#define PHY_CONTROL_PHY0_NP_MSG_CODE -#define PHY_CONTROL_PHY0_AUTOMDIX (1<<8) -#define PHY_CONTROL_PHY0_FIXMODE (1<<7) -#define PHY_CONTROL_PHY0_MODE(mode) (((mode) & 0x7) << 4) -#define PHY_CONTROL_PHY_ADDRESS(adr) ((adr) & 0xf) - -#define PHY_MODE_10BASE_T_HALF 0 -#define PHY_MODE_10BASE_T_FULL 1 -#define PHY_MODE_100BASE_TX_FX_FULL 2 -#define PHY_MODE_100BASE_TX_FX_HALF 3 -#define PHY_MODE_100BASE_TX_HALF 4 -#define PHY_MODE_REPEATER 5 -#define PHY_MODE_POWER_DOWN 6 -#define PHY_MODE_ALL 7 - -/* Bits */ -#define VECT_CNTL_ENABLE (1 << 5) - -/******************************* - * GPIO and timer module * - *******************************/ - -/* Registers */ -#define NETX_GPIO_REG(ofs) IOMEM(NETX_VA_GPIO + (ofs)) -#define NETX_GPIO_CFG(gpio) NETX_GPIO_REG(0x0 + ((gpio)<<2)) -#define NETX_GPIO_THRESHOLD_CAPTURE(gpio) NETX_GPIO_REG(0x40 + ((gpio)<<2)) -#define NETX_GPIO_COUNTER_CTRL(counter) NETX_GPIO_REG(0x80 + ((counter)<<2)) -#define NETX_GPIO_COUNTER_MAX(counter) NETX_GPIO_REG(0x94 + ((counter)<<2)) -#define NETX_GPIO_COUNTER_CURRENT(counter) NETX_GPIO_REG(0xa8 + ((counter)<<2)) -#define NETX_GPIO_IRQ_ENABLE NETX_GPIO_REG(0xbc) -#define NETX_GPIO_IRQ_DISABLE NETX_GPIO_REG(0xc0) -#define NETX_GPIO_SYSTIME_NS_CMP NETX_GPIO_REG(0xc4) -#define NETX_GPIO_LINE NETX_GPIO_REG(0xc8) -#define NETX_GPIO_IRQ NETX_GPIO_REG(0xd0) - -/* Bits */ -#define NETX_GPIO_CFG_IOCFG_GP_INPUT (0x0) -#define NETX_GPIO_CFG_IOCFG_GP_OUTPUT (0x1) -#define NETX_GPIO_CFG_IOCFG_GP_UART (0x2) -#define NETX_GPIO_CFG_INV (1<<2) -#define NETX_GPIO_CFG_MODE_INPUT_READ (0<<3) -#define NETX_GPIO_CFG_MODE_INPUT_CAPTURE_CONT_RISING (1<<3) -#define NETX_GPIO_CFG_MODE_INPUT_CAPTURE_ONCE_RISING (2<<3) -#define NETX_GPIO_CFG_MODE_INPUT_CAPTURE_HIGH_LEVEL (3<<3) -#define NETX_GPIO_CFG_COUNT_REF_COUNTER0 (0<<5) -#define NETX_GPIO_CFG_COUNT_REF_COUNTER1 (1<<5) -#define NETX_GPIO_CFG_COUNT_REF_COUNTER2 (2<<5) -#define NETX_GPIO_CFG_COUNT_REF_COUNTER3 (3<<5) -#define NETX_GPIO_CFG_COUNT_REF_COUNTER4 (4<<5) -#define NETX_GPIO_CFG_COUNT_REF_SYSTIME (7<<5) - -#define NETX_GPIO_COUNTER_CTRL_RUN (1<<0) -#define NETX_GPIO_COUNTER_CTRL_SYM (1<<1) -#define NETX_GPIO_COUNTER_CTRL_ONCE (1<<2) -#define NETX_GPIO_COUNTER_CTRL_IRQ_EN (1<<3) -#define NETX_GPIO_COUNTER_CTRL_CNT_EVENT (1<<4) -#define NETX_GPIO_COUNTER_CTRL_RST_EN (1<<5) -#define NETX_GPIO_COUNTER_CTRL_SEL_EVENT (1<<6) -#define NETX_GPIO_COUNTER_CTRL_GPIO_REF /* FIXME */ - -#define GPIO_BIT(gpio) (1<<(gpio)) -#define COUNTER_BIT(counter) ((1<<16)<<(counter)) - -/******************************* - * PIO * - *******************************/ - -/* Registers */ -#define NETX_PIO_REG(ofs) IOMEM(NETX_VA_PIO + (ofs)) -#define NETX_PIO_INPIO NETX_PIO_REG(0x0) -#define NETX_PIO_OUTPIO NETX_PIO_REG(0x4) -#define NETX_PIO_OEPIO NETX_PIO_REG(0x8) - -/******************************* - * MII Unit * - *******************************/ - -/* Registers */ -#define NETX_MIIMU IOMEM(NETX_VA_MIIMU) - -/* Bits */ -#define MIIMU_SNRDY (1<<0) -#define MIIMU_PREAMBLE (1<<1) -#define MIIMU_OPMODE_WRITE (1<<2) -#define MIIMU_MDC_PERIOD (1<<3) -#define MIIMU_PHY_NRES (1<<4) -#define MIIMU_RTA (1<<5) -#define MIIMU_REGADDR(adr) (((adr) & 0x1f) << 6) -#define MIIMU_PHYADDR(adr) (((adr) & 0x1f) << 11) -#define MIIMU_DATA(data) (((data) & 0xffff) << 16) - -/******************************* - * xmac / xpec * - *******************************/ - -/* XPEC register offsets relative to NETX_VA_XPEC(no) */ -#define NETX_XPEC_R0_OFS 0x00 -#define NETX_XPEC_R1_OFS 0x04 -#define NETX_XPEC_R2_OFS 0x08 -#define NETX_XPEC_R3_OFS 0x0c -#define NETX_XPEC_R4_OFS 0x10 -#define NETX_XPEC_R5_OFS 0x14 -#define NETX_XPEC_R6_OFS 0x18 -#define NETX_XPEC_R7_OFS 0x1c -#define NETX_XPEC_RANGE01_OFS 0x20 -#define NETX_XPEC_RANGE23_OFS 0x24 -#define NETX_XPEC_RANGE45_OFS 0x28 -#define NETX_XPEC_RANGE67_OFS 0x2c -#define NETX_XPEC_PC_OFS 0x48 -#define NETX_XPEC_TIMER_OFS(timer) (0x30 + ((timer)<<2)) -#define NETX_XPEC_IRQ_OFS 0x8c -#define NETX_XPEC_SYSTIME_NS_OFS 0x90 -#define NETX_XPEC_FIFO_DATA_OFS 0x94 -#define NETX_XPEC_SYSTIME_S_OFS 0x98 -#define NETX_XPEC_ADC_OFS 0x9c -#define NETX_XPEC_URX_COUNT_OFS 0x40 -#define NETX_XPEC_UTX_COUNT_OFS 0x44 -#define NETX_XPEC_PC_OFS 0x48 -#define NETX_XPEC_ZERO_OFS 0x4c -#define NETX_XPEC_STATCFG_OFS 0x50 -#define NETX_XPEC_EC_MASKA_OFS 0x54 -#define NETX_XPEC_EC_MASKB_OFS 0x58 -#define NETX_XPEC_EC_MASK0_OFS 0x5c -#define NETX_XPEC_EC_MASK8_OFS 0x7c -#define NETX_XPEC_EC_MASK9_OFS 0x80 -#define NETX_XPEC_XPU_HOLD_PC_OFS 0x100 -#define NETX_XPEC_RAM_START_OFS 0x2000 - -/* Bits */ -#define XPU_HOLD_PC (1<<0) - -/* XMAC register offsets relative to NETX_VA_XMAC(no) */ -#define NETX_XMAC_RPU_PROGRAM_START_OFS 0x000 -#define NETX_XMAC_RPU_PROGRAM_END_OFS 0x3ff -#define NETX_XMAC_TPU_PROGRAM_START_OFS 0x400 -#define NETX_XMAC_TPU_PROGRAM_END_OFS 0x7ff -#define NETX_XMAC_RPU_HOLD_PC_OFS 0xa00 -#define NETX_XMAC_TPU_HOLD_PC_OFS 0xa04 -#define NETX_XMAC_STATUS_SHARED0_OFS 0x840 -#define NETX_XMAC_CONFIG_SHARED0_OFS 0x844 -#define NETX_XMAC_STATUS_SHARED1_OFS 0x848 -#define NETX_XMAC_CONFIG_SHARED1_OFS 0x84c -#define NETX_XMAC_STATUS_SHARED2_OFS 0x850 -#define NETX_XMAC_CONFIG_SHARED2_OFS 0x854 -#define NETX_XMAC_STATUS_SHARED3_OFS 0x858 -#define NETX_XMAC_CONFIG_SHARED3_OFS 0x85c - -#define RPU_HOLD_PC (1<<15) -#define TPU_HOLD_PC (1<<15) - -/******************************* - * Pointer FIFO * - *******************************/ - -/* Registers */ -#define NETX_PFIFO_REG(ofs) IOMEM(NETX_VA_PFIFO + (ofs)) -#define NETX_PFIFO_BASE(pfifo) NETX_PFIFO_REG(0x00 + ((pfifo)<<2)) -#define NETX_PFIFO_BORDER_BASE(pfifo) NETX_PFIFO_REG(0x80 + ((pfifo)<<2)) -#define NETX_PFIFO_RESET NETX_PFIFO_REG(0x100) -#define NETX_PFIFO_FULL NETX_PFIFO_REG(0x104) -#define NETX_PFIFO_EMPTY NETX_PFIFO_REG(0x108) -#define NETX_PFIFO_OVEFLOW NETX_PFIFO_REG(0x10c) -#define NETX_PFIFO_UNDERRUN NETX_PFIFO_REG(0x110) -#define NETX_PFIFO_FILL_LEVEL(pfifo) NETX_PFIFO_REG(0x180 + ((pfifo)<<2)) -#define NETX_PFIFO_XPEC_ISR(xpec) NETX_PFIFO_REG(0x400 + ((xpec) << 2)) - - -/******************************* - * Memory Controller * - *******************************/ - -/* Registers */ -#define NETX_MEMCR_REG(ofs) IOMEM(NETX_VA_MEMCR + (ofs)) -#define NETX_MEMCR_SRAM_CTRL(cs) NETX_MEMCR_REG(0x0 + 4 * (cs)) /* SRAM for CS 0..2 */ -#define NETX_MEMCR_SDRAM_CFG_CTRL NETX_MEMCR_REG(0x40) -#define NETX_MEMCR_SDRAM_TIMING_CTRL NETX_MEMCR_REG(0x44) -#define NETX_MEMCR_SDRAM_MODE NETX_MEMCR_REG(0x48) -#define NETX_MEMCR_SDRAM_EXT_MODE NETX_MEMCR_REG(0x4c) -#define NETX_MEMCR_PRIO_TIMESLOT_CTRL NETX_MEMCR_REG(0x80) -#define NETX_MEMCR_PRIO_ACCESS_CTRL NETX_MEMCR_REG(0x84) - -/* Bits */ -#define NETX_MEMCR_SRAM_CTRL_WIDTHEXTMEM(x) (((x) & 0x3) << 24) -#define NETX_MEMCR_SRAM_CTRL_WSPOSTPAUSEEXTMEM(x) (((x) & 0x3) << 16) -#define NETX_MEMCR_SRAM_CTRL_WSPREPASEEXTMEM(x) (((x) & 0x3) << 8) -#define NETX_MEMCR_SRAM_CTRL_WSEXTMEM(x) (((x) & 0x1f) << 0) - - -/******************************* - * Dual Port Memory * - *******************************/ - -/* Registers */ -#define NETX_DPMAS_REG(ofs) IOMEM(NETX_VA_DPMAS + (ofs)) -#define NETX_DPMAS_SYS_STAT NETX_DPMAS_REG(0x4d8) -#define NETX_DPMAS_INT_STAT NETX_DPMAS_REG(0x4e0) -#define NETX_DPMAS_INT_EN NETX_DPMAS_REG(0x4f0) -#define NETX_DPMAS_IF_CONF0 NETX_DPMAS_REG(0x608) -#define NETX_DPMAS_IF_CONF1 NETX_DPMAS_REG(0x60c) -#define NETX_DPMAS_EXT_CONFIG(cs) NETX_DPMAS_REG(0x610 + 4 * (cs)) -#define NETX_DPMAS_IO_MODE0 NETX_DPMAS_REG(0x620) /* I/O 32..63 */ -#define NETX_DPMAS_DRV_EN0 NETX_DPMAS_REG(0x624) -#define NETX_DPMAS_DATA0 NETX_DPMAS_REG(0x628) -#define NETX_DPMAS_IO_MODE1 NETX_DPMAS_REG(0x630) /* I/O 64..84 */ -#define NETX_DPMAS_DRV_EN1 NETX_DPMAS_REG(0x634) -#define NETX_DPMAS_DATA1 NETX_DPMAS_REG(0x638) - -/* Bits */ -#define NETX_DPMAS_INT_EN_GLB_EN (1<<31) -#define NETX_DPMAS_INT_EN_MEM_LCK (1<<30) -#define NETX_DPMAS_INT_EN_WDG (1<<29) -#define NETX_DPMAS_INT_EN_PIO72 (1<<28) -#define NETX_DPMAS_INT_EN_PIO47 (1<<27) -#define NETX_DPMAS_INT_EN_PIO40 (1<<26) -#define NETX_DPMAS_INT_EN_PIO36 (1<<25) -#define NETX_DPMAS_INT_EN_PIO35 (1<<24) - -#define NETX_DPMAS_IF_CONF0_HIF_DISABLED (0<<28) -#define NETX_DPMAS_IF_CONF0_HIF_EXT_BUS (1<<28) -#define NETX_DPMAS_IF_CONF0_HIF_UP_8BIT (2<<28) -#define NETX_DPMAS_IF_CONF0_HIF_UP_16BIT (3<<28) -#define NETX_DPMAS_IF_CONF0_HIF_IO (4<<28) -#define NETX_DPMAS_IF_CONF0_WAIT_DRV_PP (1<<14) -#define NETX_DPMAS_IF_CONF0_WAIT_DRV_OD (2<<14) -#define NETX_DPMAS_IF_CONF0_WAIT_DRV_TRI (3<<14) - -#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO35 (1<<26) -#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO36 (1<<27) -#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO40 (1<<28) -#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO47 (1<<29) -#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO72 (1<<30) - -#define NETX_EXT_CONFIG_TALEWIDTH(x) (((x) & 0x7) << 29) -#define NETX_EXT_CONFIG_TADRHOLD(x) (((x) & 0x7) << 26) -#define NETX_EXT_CONFIG_TCSON(x) (((x) & 0x7) << 23) -#define NETX_EXT_CONFIG_TRDON(x) (((x) & 0x7) << 20) -#define NETX_EXT_CONFIG_TWRON(x) (((x) & 0x7) << 17) -#define NETX_EXT_CONFIG_TWROFF(x) (((x) & 0x1f) << 12) -#define NETX_EXT_CONFIG_TRDWRCYC(x) (((x) & 0x1f) << 7) -#define NETX_EXT_CONFIG_WAIT_POL (1<<6) -#define NETX_EXT_CONFIG_WAIT_EN (1<<5) -#define NETX_EXT_CONFIG_NRD_MODE (1<<4) -#define NETX_EXT_CONFIG_DS_MODE (1<<3) -#define NETX_EXT_CONFIG_NWR_MODE (1<<2) -#define NETX_EXT_CONFIG_16BIT (1<<1) -#define NETX_EXT_CONFIG_CS_ENABLE (1<<0) - -#define NETX_DPMAS_IO_MODE0_WRL (1<<13) -#define NETX_DPMAS_IO_MODE0_WAIT (1<<14) -#define NETX_DPMAS_IO_MODE0_READY (1<<15) -#define NETX_DPMAS_IO_MODE0_CS0 (1<<19) -#define NETX_DPMAS_IO_MODE0_EXTRD (1<<20) - -#define NETX_DPMAS_IO_MODE1_CS2 (1<<15) -#define NETX_DPMAS_IO_MODE1_CS1 (1<<16) -#define NETX_DPMAS_IO_MODE1_SAMPLE_NPOR (0<<30) -#define NETX_DPMAS_IO_MODE1_SAMPLE_100MHZ (1<<30) -#define NETX_DPMAS_IO_MODE1_SAMPLE_NPIO36 (2<<30) -#define NETX_DPMAS_IO_MODE1_SAMPLE_PIO36 (3<<30) - -/******************************* - * I2C * - *******************************/ -#define NETX_I2C_REG(ofs) IOMEM(NETX_VA_I2C, (ofs)) -#define NETX_I2C_CTRL NETX_I2C_REG(0x0) -#define NETX_I2C_DATA NETX_I2C_REG(0x4) - -#endif /* __ASM_ARCH_NETX_REGS_H */ diff --git a/arch/arm/mach-netx/include/mach/pfifo.h b/arch/arm/mach-netx/include/mach/pfifo.h deleted file mode 100644 index de23180bc937..000000000000 --- a/arch/arm/mach-netx/include/mach/pfifo.h +++ /dev/null @@ -1,42 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * arch/arm/mach-netx/include/mach/pfifo.h - * - * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - - -#ifndef ASM_ARCH_PFIFO_H -#define ASM_ARCH_PFIFO_H - -static inline int pfifo_push(int no, unsigned int pointer) -{ - writel(pointer, NETX_PFIFO_BASE(no)); - return 0; -} - -static inline unsigned int pfifo_pop(int no) -{ - return readl(NETX_PFIFO_BASE(no)); -} - -static inline int pfifo_fill_level(int no) -{ - - return readl(NETX_PFIFO_FILL_LEVEL(no)); -} - -static inline int pfifo_full(int no) -{ - return readl(NETX_PFIFO_FULL) & (1<<no) ? 1 : 0; -} - -static inline int pfifo_empty(int no) -{ - return readl(NETX_PFIFO_EMPTY) & (1<<no) ? 1 : 0; -} - -int pfifo_request(unsigned int pfifo_mask); -void pfifo_free(unsigned int pfifo_mask); - -#endif /* ASM_ARCH_PFIFO_H */ diff --git a/arch/arm/mach-netx/include/mach/uncompress.h b/arch/arm/mach-netx/include/mach/uncompress.h deleted file mode 100644 index edc1ac997eab..000000000000 --- a/arch/arm/mach-netx/include/mach/uncompress.h +++ /dev/null @@ -1,63 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * arch/arm/mach-netx/include/mach/uncompress.h - * - * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -/* - * The following code assumes the serial port has already been - * initialized by the bootloader. We search for the first enabled - * port in the most probable order. If you didn't setup a port in - * your bootloader then nothing will appear (which might be desired). - * - * This does not append a newline - */ - -#define REG(x) (*(volatile unsigned long *)(x)) - -#define UART1_BASE 0x100a00 -#define UART2_BASE 0x100a80 - -#define UART_DR 0x0 - -#define UART_CR 0x14 -#define CR_UART_EN (1<<0) - -#define UART_FR 0x18 -#define FR_BUSY (1<<3) -#define FR_TXFF (1<<5) - -static inline void putc(char c) -{ - unsigned long base; - - if (REG(UART1_BASE + UART_CR) & CR_UART_EN) - base = UART1_BASE; - else if (REG(UART2_BASE + UART_CR) & CR_UART_EN) - base = UART2_BASE; - else - return; - - while (REG(base + UART_FR) & FR_TXFF); - REG(base + UART_DR) = c; -} - -static inline void flush(void) -{ - unsigned long base; - - if (REG(UART1_BASE + UART_CR) & CR_UART_EN) - base = UART1_BASE; - else if (REG(UART2_BASE + UART_CR) & CR_UART_EN) - base = UART2_BASE; - else - return; - - while (REG(base + UART_FR) & FR_BUSY); -} - -/* - * nothing to do - */ -#define arch_decomp_setup() diff --git a/arch/arm/mach-netx/include/mach/xc.h b/arch/arm/mach-netx/include/mach/xc.h deleted file mode 100644 index 465d5e250ab8..000000000000 --- a/arch/arm/mach-netx/include/mach/xc.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * arch/arm/mach-netx/include/mach/xc.h - * - * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#ifndef __ASM_ARCH_XC_H -#define __ASM_ARCH_XC_H - -struct xc { - int no; - unsigned int type; - unsigned int version; - void __iomem *xpec_base; - void __iomem *xmac_base; - void __iomem *sram_base; - int irq; - struct device *dev; -}; - -int xc_reset(struct xc *x); -int xc_stop(struct xc* x); -int xc_start(struct xc *x); -int xc_running(struct xc *x); -int xc_request_firmware(struct xc* x); -struct xc* request_xc(int xcno, struct device *dev); -void free_xc(struct xc *x); - -#endif /* __ASM_ARCH_XC_H */ diff --git a/arch/arm/mach-netx/nxdb500.c b/arch/arm/mach-netx/nxdb500.c deleted file mode 100644 index ad5e6747b834..000000000000 --- a/arch/arm/mach-netx/nxdb500.c +++ /dev/null @@ -1,197 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * arch/arm/mach-netx/nxdb500.c - * - * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#include <linux/dma-mapping.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/mtd/plat-ram.h> -#include <linux/platform_device.h> -#include <linux/amba/bus.h> -#include <linux/amba/clcd.h> - -#include <mach/hardware.h> -#include <asm/mach-types.h> -#include <asm/mach/arch.h> -#include <mach/netx-regs.h> -#include <linux/platform_data/eth-netx.h> - -#include "generic.h" -#include "fb.h" - -static struct clcd_panel qvga = { - .mode = { - .name = "QVGA", - .refresh = 60, - .xres = 240, - .yres = 320, - .pixclock = 187617, - .left_margin = 6, - .right_margin = 26, - .upper_margin = 0, - .lower_margin = 6, - .hsync_len = 6, - .vsync_len = 1, - .sync = 0, - .vmode = FB_VMODE_NONINTERLACED, - }, - .width = -1, - .height = -1, - .tim2 = 16, - .cntl = CNTL_LCDTFT | CNTL_BGR, - .bpp = 16, - .grayscale = 0, -}; - -static inline int nxdb500_check(struct clcd_fb *fb, struct fb_var_screeninfo *var) -{ - var->green.length = 5; - var->green.msb_right = 0; - - return clcdfb_check(fb, var); -} - -static int nxdb500_clcd_setup(struct clcd_fb *fb) -{ - unsigned int val; - - fb->fb.var.green.length = 5; - fb->fb.var.green.msb_right = 0; - - /* enable asic control */ - val = readl(NETX_SYSTEM_IOC_ACCESS_KEY); - writel(val, NETX_SYSTEM_IOC_ACCESS_KEY); - - writel(3, NETX_SYSTEM_IOC_CR); - - val = readl(NETX_PIO_OUTPIO); - writel(val | 1, NETX_PIO_OUTPIO); - - val = readl(NETX_PIO_OEPIO); - writel(val | 1, NETX_PIO_OEPIO); - return netx_clcd_setup(fb); -} - -static struct clcd_board clcd_data = { - .name = "netX", - .check = nxdb500_check, - .decode = clcdfb_decode, - .enable = netx_clcd_enable, - .setup = nxdb500_clcd_setup, - .mmap = netx_clcd_mmap, - .remove = netx_clcd_remove, -}; - -static struct netxeth_platform_data eth0_platform_data = { - .xcno = 0, -}; - -static struct platform_device netx_eth0_device = { - .name = "netx-eth", - .id = 0, - .num_resources = 0, - .resource = NULL, - .dev = { - .platform_data = ð0_platform_data, - } -}; - -static struct netxeth_platform_data eth1_platform_data = { - .xcno = 1, -}; - -static struct platform_device netx_eth1_device = { - .name = "netx-eth", - .id = 1, - .num_resources = 0, - .resource = NULL, - .dev = { - .platform_data = ð1_platform_data, - } -}; - -static struct resource netx_uart0_resources[] = { - [0] = { - .start = 0x00100A00, - .end = 0x00100A3F, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = (NETX_IRQ_UART0), - .end = (NETX_IRQ_UART0), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device netx_uart0_device = { - .name = "netx-uart", - .id = 0, - .num_resources = ARRAY_SIZE(netx_uart0_resources), - .resource = netx_uart0_resources, -}; - -static struct resource netx_uart1_resources[] = { - [0] = { - .start = 0x00100A40, - .end = 0x00100A7F, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = (NETX_IRQ_UART1), - .end = (NETX_IRQ_UART1), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device netx_uart1_device = { - .name = "netx-uart", - .id = 1, - .num_resources = ARRAY_SIZE(netx_uart1_resources), - .resource = netx_uart1_resources, -}; - -static struct resource netx_uart2_resources[] = { - [0] = { - .start = 0x00100A80, - .end = 0x00100ABF, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = (NETX_IRQ_UART2), - .end = (NETX_IRQ_UART2), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device netx_uart2_device = { - .name = "netx-uart", - .id = 2, - .num_resources = ARRAY_SIZE(netx_uart2_resources), - .resource = netx_uart2_resources, -}; - -static struct platform_device *devices[] __initdata = { - &netx_eth0_device, - &netx_eth1_device, - &netx_uart0_device, - &netx_uart1_device, - &netx_uart2_device, -}; - -static void __init nxdb500_init(void) -{ - netx_fb_init(&clcd_data, &qvga); - platform_add_devices(devices, ARRAY_SIZE(devices)); -} - -MACHINE_START(NXDB500, "Hilscher nxdb500") - .atag_offset = 0x100, - .map_io = netx_map_io, - .init_irq = netx_init_irq, - .init_time = netx_timer_init, - .init_machine = nxdb500_init, - .restart = netx_restart, -MACHINE_END diff --git a/arch/arm/mach-netx/nxdkn.c b/arch/arm/mach-netx/nxdkn.c deleted file mode 100644 index 917381559628..000000000000 --- a/arch/arm/mach-netx/nxdkn.c +++ /dev/null @@ -1,90 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * arch/arm/mach-netx/nxdkn.c - * - * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#include <linux/dma-mapping.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/mtd/plat-ram.h> -#include <linux/platform_device.h> -#include <linux/amba/bus.h> -#include <linux/amba/clcd.h> - -#include <mach/hardware.h> -#include <asm/mach-types.h> -#include <asm/mach/arch.h> -#include <mach/netx-regs.h> -#include <linux/platform_data/eth-netx.h> - -#include "generic.h" - -static struct netxeth_platform_data eth0_platform_data = { - .xcno = 0, -}; - -static struct platform_device nxdkn_eth0_device = { - .name = "netx-eth", - .id = 0, - .num_resources = 0, - .resource = NULL, - .dev = { - .platform_data = ð0_platform_data, - } -}; - -static struct netxeth_platform_data eth1_platform_data = { - .xcno = 1, -}; - -static struct platform_device nxdkn_eth1_device = { - .name = "netx-eth", - .id = 1, - .num_resources = 0, - .resource = NULL, - .dev = { - .platform_data = ð1_platform_data, - } -}; - -static struct resource netx_uart0_resources[] = { - [0] = { - .start = 0x00100A00, - .end = 0x00100A3F, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = (NETX_IRQ_UART0), - .end = (NETX_IRQ_UART0), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device netx_uart0_device = { - .name = "netx-uart", - .id = 0, - .num_resources = ARRAY_SIZE(netx_uart0_resources), - .resource = netx_uart0_resources, -}; - -static struct platform_device *devices[] __initdata = { - &nxdkn_eth0_device, - &nxdkn_eth1_device, - &netx_uart0_device, -}; - -static void __init nxdkn_init(void) -{ - platform_add_devices(devices, ARRAY_SIZE(devices)); -} - -MACHINE_START(NXDKN, "Hilscher nxdkn") - .atag_offset = 0x100, - .map_io = netx_map_io, - .init_irq = netx_init_irq, - .init_time = netx_timer_init, - .init_machine = nxdkn_init, - .restart = netx_restart, -MACHINE_END diff --git a/arch/arm/mach-netx/nxeb500hmi.c b/arch/arm/mach-netx/nxeb500hmi.c deleted file mode 100644 index aa0d5b2ca712..000000000000 --- a/arch/arm/mach-netx/nxeb500hmi.c +++ /dev/null @@ -1,174 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * arch/arm/mach-netx/nxeb500hmi.c - * - * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#include <linux/dma-mapping.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/mtd/plat-ram.h> -#include <linux/platform_device.h> -#include <linux/amba/bus.h> -#include <linux/amba/clcd.h> - -#include <mach/hardware.h> -#include <asm/mach-types.h> -#include <asm/mach/arch.h> -#include <mach/netx-regs.h> -#include <linux/platform_data/eth-netx.h> - -#include "generic.h" -#include "fb.h" - -static struct clcd_panel qvga = { - .mode = { - .name = "QVGA", - .refresh = 60, - .xres = 240, - .yres = 320, - .pixclock = 187617, - .left_margin = 6, - .right_margin = 26, - .upper_margin = 0, - .lower_margin = 6, - .hsync_len = 6, - .vsync_len = 1, - .sync = 0, - .vmode = FB_VMODE_NONINTERLACED, - }, - .width = -1, - .height = -1, - .tim2 = 16, - .cntl = CNTL_LCDTFT | CNTL_BGR, - .bpp = 16, - .grayscale = 0, -}; - -static inline int nxeb500hmi_check(struct clcd_fb *fb, struct fb_var_screeninfo *var) -{ - var->green.length = 5; - var->green.msb_right = 0; - - return clcdfb_check(fb, var); -} - -static int nxeb500hmi_clcd_setup(struct clcd_fb *fb) -{ - unsigned int val; - - fb->fb.var.green.length = 5; - fb->fb.var.green.msb_right = 0; - - /* enable asic control */ - val = readl(NETX_SYSTEM_IOC_ACCESS_KEY); - writel(val, NETX_SYSTEM_IOC_ACCESS_KEY); - - writel(3, NETX_SYSTEM_IOC_CR); - - /* GPIO 14 is used for display enable on newer boards */ - writel(9, NETX_GPIO_CFG(14)); - - val = readl(NETX_PIO_OUTPIO); - writel(val | 1, NETX_PIO_OUTPIO); - - val = readl(NETX_PIO_OEPIO); - writel(val | 1, NETX_PIO_OEPIO); - return netx_clcd_setup(fb); -} - -static struct clcd_board clcd_data = { - .name = "netX", - .check = nxeb500hmi_check, - .decode = clcdfb_decode, - .enable = netx_clcd_enable, - .setup = nxeb500hmi_clcd_setup, - .mmap = netx_clcd_mmap, - .remove = netx_clcd_remove, -}; - -static struct netxeth_platform_data eth0_platform_data = { - .xcno = 0, -}; - -static struct platform_device netx_eth0_device = { - .name = "netx-eth", - .id = 0, - .num_resources = 0, - .resource = NULL, - .dev = { - .platform_data = ð0_platform_data, - } -}; - -static struct netxeth_platform_data eth1_platform_data = { - .xcno = 1, -}; - -static struct platform_device netx_eth1_device = { - .name = "netx-eth", - .id = 1, - .num_resources = 0, - .resource = NULL, - .dev = { - .platform_data = ð1_platform_data, - } -}; - -static struct resource netx_cf_resources[] = { - [0] = { - .start = 0x20000000, - .end = 0x25ffffff, - .flags = IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT, - }, -}; - -static struct platform_device netx_cf_device = { - .name = "netx-cf", - .id = 0, - .resource = netx_cf_resources, - .num_resources = ARRAY_SIZE(netx_cf_resources), -}; - -static struct resource netx_uart0_resources[] = { - [0] = { - .start = 0x00100A00, - .end = 0x00100A3F, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = (NETX_IRQ_UART0), - .end = (NETX_IRQ_UART0), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device netx_uart0_device = { - .name = "netx-uart", - .id = 0, - .num_resources = ARRAY_SIZE(netx_uart0_resources), - .resource = netx_uart0_resources, -}; - -static struct platform_device *devices[] __initdata = { - &netx_eth0_device, - &netx_eth1_device, - &netx_cf_device, - &netx_uart0_device, -}; - -static void __init nxeb500hmi_init(void) -{ - netx_fb_init(&clcd_data, &qvga); - platform_add_devices(devices, ARRAY_SIZE(devices)); -} - -MACHINE_START(NXEB500HMI, "Hilscher nxeb500hmi") - .atag_offset = 0x100, - .map_io = netx_map_io, - .init_irq = netx_init_irq, - .init_time = netx_timer_init, - .init_machine = nxeb500hmi_init, - .restart = netx_restart, -MACHINE_END diff --git a/arch/arm/mach-netx/pfifo.c b/arch/arm/mach-netx/pfifo.c deleted file mode 100644 index 2e5cc777329f..000000000000 --- a/arch/arm/mach-netx/pfifo.c +++ /dev/null @@ -1,56 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * arch/arm/mach-netx/pfifo.c - * - * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/io.h> - -#include <mach/hardware.h> -#include <mach/netx-regs.h> -#include <mach/pfifo.h> - -static DEFINE_MUTEX(pfifo_lock); - -static unsigned int pfifo_used = 0; - -int pfifo_request(unsigned int pfifo_mask) -{ - int err = 0; - unsigned int val; - - mutex_lock(&pfifo_lock); - - if (pfifo_mask & pfifo_used) { - err = -EBUSY; - goto out; - } - - pfifo_used |= pfifo_mask; - - val = readl(NETX_PFIFO_RESET); - writel(val | pfifo_mask, NETX_PFIFO_RESET); - writel(val, NETX_PFIFO_RESET); - -out: - mutex_unlock(&pfifo_lock); - return err; -} - -void pfifo_free(unsigned int pfifo_mask) -{ - mutex_lock(&pfifo_lock); - pfifo_used &= ~pfifo_mask; - mutex_unlock(&pfifo_lock); -} - -EXPORT_SYMBOL(pfifo_push); -EXPORT_SYMBOL(pfifo_pop); -EXPORT_SYMBOL(pfifo_fill_level); -EXPORT_SYMBOL(pfifo_empty); -EXPORT_SYMBOL(pfifo_request); -EXPORT_SYMBOL(pfifo_free); diff --git a/arch/arm/mach-netx/time.c b/arch/arm/mach-netx/time.c deleted file mode 100644 index d9defa1ab605..000000000000 --- a/arch/arm/mach-netx/time.c +++ /dev/null @@ -1,141 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * arch/arm/mach-netx/time.c - * - * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/clocksource.h> -#include <linux/clockchips.h> -#include <linux/io.h> - -#include <mach/hardware.h> -#include <asm/mach/time.h> -#include <mach/netx-regs.h> - -#define NETX_CLOCK_FREQ 100000000 -#define NETX_LATCH DIV_ROUND_CLOSEST(NETX_CLOCK_FREQ, HZ) - -#define TIMER_CLOCKEVENT 0 -#define TIMER_CLOCKSOURCE 1 - -static inline void timer_shutdown(struct clock_event_device *evt) -{ - /* disable timer */ - writel(0, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT)); -} - -static int netx_shutdown(struct clock_event_device *evt) -{ - timer_shutdown(evt); - - return 0; -} - -static int netx_set_oneshot(struct clock_event_device *evt) -{ - u32 tmode = NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN; - - timer_shutdown(evt); - writel(0, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT)); - writel(tmode, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT)); - - return 0; -} - -static int netx_set_periodic(struct clock_event_device *evt) -{ - u32 tmode = NETX_GPIO_COUNTER_CTRL_RST_EN | - NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN; - - timer_shutdown(evt); - writel(NETX_LATCH, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT)); - writel(tmode, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT)); - - return 0; -} - -static int netx_set_next_event(unsigned long evt, - struct clock_event_device *clk) -{ - writel(0 - evt, NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKEVENT)); - return 0; -} - -static struct clock_event_device netx_clockevent = { - .name = "netx-timer" __stringify(TIMER_CLOCKEVENT), - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_next_event = netx_set_next_event, - .set_state_shutdown = netx_shutdown, - .set_state_periodic = netx_set_periodic, - .set_state_oneshot = netx_set_oneshot, - .tick_resume = netx_shutdown, -}; - -/* - * IRQ handler for the timer - */ -static irqreturn_t -netx_timer_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = &netx_clockevent; - - /* acknowledge interrupt */ - writel(COUNTER_BIT(0), NETX_GPIO_IRQ); - - evt->event_handler(evt); - - return IRQ_HANDLED; -} - -static struct irqaction netx_timer_irq = { - .name = "NetX Timer Tick", - .flags = IRQF_TIMER | IRQF_IRQPOLL, - .handler = netx_timer_interrupt, -}; - -/* - * Set up timer interrupt - */ -void __init netx_timer_init(void) -{ - /* disable timer initially */ - writel(0, NETX_GPIO_COUNTER_CTRL(0)); - - /* Reset the timer value to zero */ - writel(0, NETX_GPIO_COUNTER_CURRENT(0)); - - writel(NETX_LATCH, NETX_GPIO_COUNTER_MAX(0)); - - /* acknowledge interrupt */ - writel(COUNTER_BIT(0), NETX_GPIO_IRQ); - - /* Enable the interrupt in the specific timer - * register and start timer - */ - writel(COUNTER_BIT(0), NETX_GPIO_IRQ_ENABLE); - writel(NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN, - NETX_GPIO_COUNTER_CTRL(0)); - - setup_irq(NETX_IRQ_TIMER0, &netx_timer_irq); - - /* Setup timer one for clocksource */ - writel(0, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKSOURCE)); - writel(0, NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE)); - writel(0xffffffff, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKSOURCE)); - - writel(NETX_GPIO_COUNTER_CTRL_RUN, - NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKSOURCE)); - - clocksource_mmio_init(NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE), - "netx_timer", NETX_CLOCK_FREQ, 200, 32, clocksource_mmio_readl_up); - - /* with max_delta_ns >= delta2ns(0x800) the system currently runs fine. - * Adding some safety ... */ - netx_clockevent.cpumask = cpumask_of(0); - clockevents_config_and_register(&netx_clockevent, NETX_CLOCK_FREQ, - 0xa00, 0xfffffffe); -} diff --git a/arch/arm/mach-netx/xc.c b/arch/arm/mach-netx/xc.c deleted file mode 100644 index 885a618b2651..000000000000 --- a/arch/arm/mach-netx/xc.c +++ /dev/null @@ -1,246 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * arch/arm/mach-netx/xc.c - * - * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#include <linux/init.h> -#include <linux/device.h> -#include <linux/firmware.h> -#include <linux/mutex.h> -#include <linux/slab.h> -#include <linux/io.h> -#include <linux/export.h> - -#include <mach/hardware.h> -#include <mach/irqs.h> -#include <mach/netx-regs.h> - -#include <mach/xc.h> - -static DEFINE_MUTEX(xc_lock); - -static int xc_in_use = 0; - -struct fw_desc { - unsigned int ofs; - unsigned int size; - unsigned int patch_ofs; - unsigned int patch_entries; -}; - -struct fw_header { - unsigned int magic; - unsigned int type; - unsigned int version; - unsigned int reserved[5]; - struct fw_desc fw_desc[3]; -} __attribute__ ((packed)); - -int xc_stop(struct xc *x) -{ - writel(RPU_HOLD_PC, x->xmac_base + NETX_XMAC_RPU_HOLD_PC_OFS); - writel(TPU_HOLD_PC, x->xmac_base + NETX_XMAC_TPU_HOLD_PC_OFS); - writel(XPU_HOLD_PC, x->xpec_base + NETX_XPEC_XPU_HOLD_PC_OFS); - return 0; -} - -int xc_start(struct xc *x) -{ - writel(0, x->xmac_base + NETX_XMAC_RPU_HOLD_PC_OFS); - writel(0, x->xmac_base + NETX_XMAC_TPU_HOLD_PC_OFS); - writel(0, x->xpec_base + NETX_XPEC_XPU_HOLD_PC_OFS); - return 0; -} - -int xc_running(struct xc *x) -{ - return (readl(x->xmac_base + NETX_XMAC_RPU_HOLD_PC_OFS) & RPU_HOLD_PC) - || (readl(x->xmac_base + NETX_XMAC_TPU_HOLD_PC_OFS) & TPU_HOLD_PC) - || (readl(x->xpec_base + NETX_XPEC_XPU_HOLD_PC_OFS) & XPU_HOLD_PC) ? - 0 : 1; -} - -int xc_reset(struct xc *x) -{ - writel(0, x->xpec_base + NETX_XPEC_PC_OFS); - return 0; -} - -static int xc_check_ptr(struct xc *x, unsigned long adr, unsigned int size) -{ - if (adr >= NETX_PA_XMAC(x->no) && - adr + size < NETX_PA_XMAC(x->no) + XMAC_MEM_SIZE) - return 0; - - if (adr >= NETX_PA_XPEC(x->no) && - adr + size < NETX_PA_XPEC(x->no) + XPEC_MEM_SIZE) - return 0; - - dev_err(x->dev, "Illegal pointer in firmware found. aborting\n"); - - return -1; -} - -static int xc_patch(struct xc *x, const void *patch, int count) -{ - unsigned int val, adr; - const unsigned int *data = patch; - - int i; - for (i = 0; i < count; i++) { - adr = *data++; - val = *data++; - if (xc_check_ptr(x, adr, 4) < 0) - return -EINVAL; - - writel(val, (void __iomem *)io_p2v(adr)); - } - return 0; -} - -int xc_request_firmware(struct xc *x) -{ - int ret; - char name[16]; - const struct firmware *fw; - struct fw_header *head; - unsigned int size; - int i; - const void *src; - unsigned long dst; - - sprintf(name, "xc%d.bin", x->no); - - ret = request_firmware(&fw, name, x->dev); - - if (ret < 0) { - dev_err(x->dev, "request_firmware failed\n"); - return ret; - } - - head = (struct fw_header *)fw->data; - if (head->magic != 0x4e657458) { - if (head->magic == 0x5874654e) { - dev_err(x->dev, - "firmware magic is 'XteN'. Endianness problems?\n"); - ret = -ENODEV; - goto exit_release_firmware; - } - dev_err(x->dev, "unrecognized firmware magic 0x%08x\n", - head->magic); - ret = -ENODEV; - goto exit_release_firmware; - } - - x->type = head->type; - x->version = head->version; - - ret = -EINVAL; - - for (i = 0; i < 3; i++) { - src = fw->data + head->fw_desc[i].ofs; - dst = *(unsigned int *)src; - src += sizeof (unsigned int); - size = head->fw_desc[i].size - sizeof (unsigned int); - - if (xc_check_ptr(x, dst, size)) - goto exit_release_firmware; - - memcpy((void *)io_p2v(dst), src, size); - - src = fw->data + head->fw_desc[i].patch_ofs; - size = head->fw_desc[i].patch_entries; - ret = xc_patch(x, src, size); - if (ret < 0) - goto exit_release_firmware; - } - - ret = 0; - - exit_release_firmware: - release_firmware(fw); - - return ret; -} - -struct xc *request_xc(int xcno, struct device *dev) -{ - struct xc *x = NULL; - - mutex_lock(&xc_lock); - - if (xcno > 3) - goto exit; - if (xc_in_use & (1 << xcno)) - goto exit; - - x = kmalloc(sizeof (struct xc), GFP_KERNEL); - if (!x) - goto exit; - - if (!request_mem_region - (NETX_PA_XPEC(xcno), XPEC_MEM_SIZE, kobject_name(&dev->kobj))) - goto exit_free; - - if (!request_mem_region - (NETX_PA_XMAC(xcno), XMAC_MEM_SIZE, kobject_name(&dev->kobj))) - goto exit_release_1; - - if (!request_mem_region - (SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE, kobject_name(&dev->kobj))) - goto exit_release_2; - - x->xpec_base = (void * __iomem)io_p2v(NETX_PA_XPEC(xcno)); - x->xmac_base = (void * __iomem)io_p2v(NETX_PA_XMAC(xcno)); - x->sram_base = ioremap(SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE); - if (!x->sram_base) - goto exit_release_3; - - x->irq = NETX_IRQ_XPEC(xcno); - - x->no = xcno; - x->dev = dev; - - xc_in_use |= (1 << xcno); - - goto exit; - - exit_release_3: - release_mem_region(SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE); - exit_release_2: - release_mem_region(NETX_PA_XMAC(xcno), XMAC_MEM_SIZE); - exit_release_1: - release_mem_region(NETX_PA_XPEC(xcno), XPEC_MEM_SIZE); - exit_free: - kfree(x); - x = NULL; - exit: - mutex_unlock(&xc_lock); - return x; -} - -void free_xc(struct xc *x) -{ - int xcno = x->no; - - mutex_lock(&xc_lock); - - iounmap(x->sram_base); - release_mem_region(SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE); - release_mem_region(NETX_PA_XMAC(xcno), XMAC_MEM_SIZE); - release_mem_region(NETX_PA_XPEC(xcno), XPEC_MEM_SIZE); - xc_in_use &= ~(1 << x->no); - kfree(x); - - mutex_unlock(&xc_lock); -} - -EXPORT_SYMBOL(free_xc); -EXPORT_SYMBOL(request_xc); -EXPORT_SYMBOL(xc_request_firmware); -EXPORT_SYMBOL(xc_reset); -EXPORT_SYMBOL(xc_running); -EXPORT_SYMBOL(xc_start); -EXPORT_SYMBOL(xc_stop); diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 820b60a50125..c54cd7ed90ba 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -663,6 +663,11 @@ config ARM_LPAE depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \ !CPU_32v4 && !CPU_32v3 select PHYS_ADDR_T_64BIT + select SWIOTLB + select ARCH_HAS_DMA_COHERENT_TO_PFN + select ARCH_HAS_DMA_MMAP_PGPROT + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_SYNC_DMA_FOR_CPU help Say Y if you have an ARMv7 processor supporting the LPAE page table format and you would like to access memory beyond the diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4789c60a86e3..6774b03aa405 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/device.h> #include <linux/dma-mapping.h> +#include <linux/dma-noncoherent.h> #include <linux/dma-contiguous.h> #include <linux/highmem.h> #include <linux/memblock.h> @@ -1125,6 +1126,19 @@ int arm_dma_supported(struct device *dev, u64 mask) static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) { + /* + * When CONFIG_ARM_LPAE is set, physical address can extend above + * 32-bits, which then can't be addressed by devices that only support + * 32-bit DMA. + * Use the generic dma-direct / swiotlb ops code in that case, as that + * handles bounce buffering for us. + * + * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the + * latter is also selected by the Xen code, but that code for now relies + * on non-NULL dev_dma_ops. To be cleaned up later. + */ + if (IS_ENABLED(CONFIG_ARM_LPAE)) + return NULL; return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; } @@ -2329,6 +2343,9 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct dma_map_ops *dma_ops; dev->archdata.dma_coherent = coherent; +#ifdef CONFIG_SWIOTLB + dev->dma_coherent = coherent; +#endif /* * Don't override the dma_ops if they have already been set. Ideally @@ -2363,3 +2380,47 @@ void arch_teardown_dma_ops(struct device *dev) /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ set_dma_ops(dev, NULL); } + +#ifdef CONFIG_SWIOTLB +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), + size, dir); +} + +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), + size, dir); +} + +long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, + dma_addr_t dma_addr) +{ + return dma_to_pfn(dev, dma_addr); +} + +pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, + unsigned long attrs) +{ + if (!dev_is_dma_coherent(dev)) + return __get_dma_pgprot(attrs, prot); + return prot; +} + +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) +{ + return __dma_alloc(dev, size, dma_handle, gfp, + __get_dma_pgprot(attrs, PAGE_KERNEL), false, + attrs, __builtin_return_address(0)); +} + +void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle, unsigned long attrs) +{ + __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); +} +#endif /* CONFIG_SWIOTLB */ diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 4920a206dce9..16d373d587c4 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -21,6 +21,7 @@ #include <linux/dma-contiguous.h> #include <linux/sizes.h> #include <linux/stop_machine.h> +#include <linux/swiotlb.h> #include <asm/cp15.h> #include <asm/mach-types.h> @@ -463,6 +464,10 @@ static void __init free_highpages(void) */ void __init mem_init(void) { +#ifdef CONFIG_ARM_LPAE + swiotlb_init(1); +#endif + set_max_mapnr(pfn_to_page(max_pfn) - mem_map); /* this will put all unused low memory onto the freelists */ diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index bb1f1dbb34e8..61de992bbea3 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -52,7 +52,7 @@ ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y) ifeq ($(CONFIG_CC_IS_CLANG), y) $(warning CROSS_COMPILE_COMPAT is clang, the compat vDSO will not be built) - else ifeq ($(CROSS_COMPILE_COMPAT),) + else ifeq ($(strip $(CROSS_COMPILE_COMPAT)),) $(warning CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built) else ifeq ($(shell which $(CROSS_COMPILE_COMPAT)gcc 2> /dev/null),) $(error $(CROSS_COMPILE_COMPAT)gcc not found, check CROSS_COMPILE_COMPAT) diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h index e25f7fcd7997..cffa8991880d 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h +++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h @@ -462,7 +462,7 @@ #define MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x1CC 0x434 0x000 0x5 0x0 #define MX8MM_IOMUXC_SAI3_RXFS_TPSMP_HTRANS0 0x1CC 0x434 0x000 0x7 0x0 #define MX8MM_IOMUXC_SAI3_RXC_SAI3_RX_BCLK 0x1D0 0x438 0x000 0x0 0x0 -#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CAPTURE2 0x1D0 0x438 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CLK 0x1D0 0x438 0x000 0x1 0x0 #define MX8MM_IOMUXC_SAI3_RXC_SAI5_RX_BCLK 0x1D0 0x438 0x4D0 0x2 0x2 #define MX8MM_IOMUXC_SAI3_RXC_GPIO4_IO29 0x1D0 0x438 0x000 0x5 0x0 #define MX8MM_IOMUXC_SAI3_RXC_TPSMP_HTRANS1 0x1D0 0x438 0x000 0x7 0x0 @@ -472,7 +472,7 @@ #define MX8MM_IOMUXC_SAI3_RXD_GPIO4_IO30 0x1D4 0x43C 0x000 0x5 0x0 #define MX8MM_IOMUXC_SAI3_RXD_TPSMP_HDATA0 0x1D4 0x43C 0x000 0x7 0x0 #define MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0x1D8 0x440 0x000 0x0 0x0 -#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CLK 0x1D8 0x440 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CAPTURE2 0x1D8 0x440 0x000 0x1 0x0 #define MX8MM_IOMUXC_SAI3_TXFS_SAI5_RX_DATA1 0x1D8 0x440 0x4D8 0x2 0x2 #define MX8MM_IOMUXC_SAI3_TXFS_GPIO4_IO31 0x1D8 0x440 0x000 0x5 0x0 #define MX8MM_IOMUXC_SAI3_TXFS_TPSMP_HDATA1 0x1D8 0x440 0x000 0x7 0x0 diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index d09b808eff87..52aae341d0da 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi @@ -715,8 +715,7 @@ sai2: sai@308b0000 { #sound-dai-cells = <0>; - compatible = "fsl,imx8mq-sai", - "fsl,imx6sx-sai"; + compatible = "fsl,imx8mq-sai"; reg = <0x308b0000 0x10000>; interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MQ_CLK_SAI2_IPG>, diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 79155a8cfe7c..89e4c8b79349 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -155,6 +155,12 @@ static inline void gic_pmr_mask_irqs(void) BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF | GIC_PRIO_PSR_I_SET)); BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON); + /* + * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared + * and non-secure PMR accesses are not subject to the shifts that + * are applied to IRQ priorities + */ + BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON); gic_write_pmr(GIC_PRIO_IRQOFF); } diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 407e2bf23676..c96ffa4722d3 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -35,9 +35,10 @@ */ enum ftr_type { - FTR_EXACT, /* Use a predefined safe value */ - FTR_LOWER_SAFE, /* Smaller value is safe */ - FTR_HIGHER_SAFE,/* Bigger value is safe */ + FTR_EXACT, /* Use a predefined safe value */ + FTR_LOWER_SAFE, /* Smaller value is safe */ + FTR_HIGHER_SAFE, /* Bigger value is safe */ + FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */ }; #define FTR_STRICT true /* SANITY check strict matching required */ diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 987926ed535e..063c964af705 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -13,6 +13,8 @@ #define DAIF_PROCCTX 0 #define DAIF_PROCCTX_NOIRQ PSR_I_BIT #define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT) +#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) + /* mask/save/unmask/restore all exceptions, including interrupts. */ static inline void local_daif_mask(void) diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 8e79ce9c3f5c..76a144702586 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -105,7 +105,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__) #define alloc_screen_info(x...) &screen_info -#define free_screen_info(x...) + +static inline void free_screen_info(efi_system_table_t *sys_table_arg, + struct screen_info *si) +{ +} /* redeclare as 'hidden' so the compiler will generate relative references */ extern struct screen_info screen_info __attribute__((__visibility__("hidden"))); diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 3c7037c6ba9b..b618017205a3 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -202,7 +202,7 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; ({ \ set_thread_flag(TIF_32BIT); \ }) -#ifdef CONFIG_GENERIC_COMPAT_VDSO +#ifdef CONFIG_COMPAT_VDSO #define COMPAT_ARCH_DLINFO \ do { \ /* \ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index b7ba75809751..fb04f10a78ab 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -210,7 +210,11 @@ extern u64 vabits_user; #define __tag_reset(addr) untagged_addr(addr) #define __tag_get(addr) (__u8)((u64)(addr) >> 56) #else -#define __tag_set(addr, tag) (addr) +static inline const void *__tag_set(const void *addr, u8 tag) +{ + return addr; +} + #define __tag_reset(addr) (addr) #define __tag_get(addr) 0 #endif @@ -301,8 +305,8 @@ static inline void *phys_to_virt(phys_addr_t x) #define page_to_virt(page) ({ \ unsigned long __addr = \ ((__page_to_voff(page)) | PAGE_OFFSET); \ - unsigned long __addr_tag = \ - __tag_set(__addr, page_kasan_tag(page)); \ + const void *__addr_tag = \ + __tag_set((void *)__addr, page_kasan_tag(page)); \ ((void *)__addr_tag); \ }) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 87a4b2ddc1a1..5fdcfe237338 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -301,7 +301,6 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b) /* * Huge pte definitions. */ -#define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT)) #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) /* @@ -448,8 +447,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, PMD_TYPE_SECT) #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 -#define pud_sect(pud) (0) -#define pud_table(pud) (1) +static inline bool pud_sect(pud_t pud) { return false; } +static inline bool pud_table(pud_t pud) { return true; } #else #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ PUD_TYPE_SECT) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index fd5b1a4efc70..844e2964b0f5 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) regs->pmr_save = GIC_PRIO_IRQON; } +static inline void set_ssbs_bit(struct pt_regs *regs) +{ + regs->pstate |= PSR_SSBS_BIT; +} + +static inline void set_compat_ssbs_bit(struct pt_regs *regs) +{ + regs->pstate |= PSR_AA32_SSBS_BIT; +} + static inline void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { @@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc, regs->pstate = PSR_MODE_EL0t; if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) - regs->pstate |= PSR_SSBS_BIT; + set_ssbs_bit(regs); regs->sp = sp; } @@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, #endif if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) - regs->pstate |= PSR_AA32_SSBS_BIT; + set_compat_ssbs_bit(regs); regs->compat_sp = sp; } diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index b1dd039023ef..1dcf63a9ac1f 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -30,7 +30,7 @@ * in the the priority mask, it indicates that PSR.I should be set and * interrupt disabling temporarily does not rely on IRQ priorities. */ -#define GIC_PRIO_IRQON 0xc0 +#define GIC_PRIO_IRQON 0xe0 #define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80) #define GIC_PRIO_PSR_I_SET (1 << 4) diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index df45af931459..4d9b1f48dc39 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -8,19 +8,12 @@ #include <linux/percpu.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> +#include <linux/types.h> #include <asm/memory.h> #include <asm/ptrace.h> #include <asm/sdei.h> -struct stackframe { - unsigned long fp; - unsigned long pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - int graph; -#endif -}; - enum stack_type { STACK_TYPE_UNKNOWN, STACK_TYPE_TASK, @@ -28,6 +21,7 @@ enum stack_type { STACK_TYPE_OVERFLOW, STACK_TYPE_SDEI_NORMAL, STACK_TYPE_SDEI_CRITICAL, + __NR_STACK_TYPES }; struct stack_info { @@ -36,6 +30,37 @@ struct stack_info { enum stack_type type; }; +/* + * A snapshot of a frame record or fp/lr register values, along with some + * accounting information necessary for robust unwinding. + * + * @fp: The fp value in the frame record (or the real fp) + * @pc: The fp value in the frame record (or the real lr) + * + * @stacks_done: Stacks which have been entirely unwound, for which it is no + * longer valid to unwind to. + * + * @prev_fp: The fp that pointed to this frame record, or a synthetic value + * of 0. This is used to ensure that within a stack, each + * subsequent frame record is at an increasing address. + * @prev_type: The type of stack this frame record was on, or a synthetic + * value of STACK_TYPE_UNKNOWN. This is used to detect a + * transition from one stack to another. + * + * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a + * replacement lr value in the ftrace graph stack. + */ +struct stackframe { + unsigned long fp; + unsigned long pc; + DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); + unsigned long prev_fp; + enum stack_type prev_type; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + int graph; +#endif +}; + extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data); @@ -64,8 +89,9 @@ static inline bool on_irq_stack(unsigned long sp, return true; } -static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp, - struct stack_info *info) +static inline bool on_task_stack(const struct task_struct *tsk, + unsigned long sp, + struct stack_info *info) { unsigned long low = (unsigned long)task_stack_page(tsk); unsigned long high = low + THREAD_SIZE; @@ -112,10 +138,13 @@ static inline bool on_overflow_stack(unsigned long sp, * We can only safely access per-cpu stacks from current in a non-preemptible * context. */ -static inline bool on_accessible_stack(struct task_struct *tsk, - unsigned long sp, - struct stack_info *info) +static inline bool on_accessible_stack(const struct task_struct *tsk, + unsigned long sp, + struct stack_info *info) { + if (info) + info->type = STACK_TYPE_UNKNOWN; + if (on_task_stack(tsk, sp, info)) return true; if (tsk != current || preemptible()) @@ -130,4 +159,27 @@ static inline bool on_accessible_stack(struct task_struct *tsk, return false; } +static inline void start_backtrace(struct stackframe *frame, + unsigned long fp, unsigned long pc) +{ + frame->fp = fp; + frame->pc = pc; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + frame->graph = 0; +#endif + + /* + * Prime the first unwind. + * + * In unwind_frame() we'll check that the FP points to a valid stack, + * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be + * treated as a transition to whichever stack that happens to be. The + * prev_fp value won't be used, but we set it to 0 such that it is + * definitely not an accessible stack address. + */ + bitmap_zero(frame->stacks_done, __NR_STACK_TYPES); + frame->prev_fp = 0; + frame->prev_type = STACK_TYPE_UNKNOWN; +} + #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h index f4812777f5c5..c50ee1b7d5cd 100644 --- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h +++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h @@ -16,6 +16,8 @@ #define VDSO_HAS_CLOCK_GETRES 1 +#define VDSO_HAS_32BIT_FALLBACK 1 + static __always_inline int gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz) @@ -52,6 +54,23 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) } static __always_inline +long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) +{ + register struct old_timespec32 *ts asm("r1") = _ts; + register clockid_t clkid asm("r0") = _clkid; + register long ret asm ("r0"); + register long nr asm("r7") = __NR_compat_clock_gettime; + + asm volatile( + " swi #0\n" + : "=r" (ret) + : "r" (clkid), "r" (ts), "r" (nr) + : "memory"); + + return ret; +} + +static __always_inline int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) { register struct __kernel_timespec *ts asm("r1") = _ts; @@ -72,6 +91,27 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) return ret; } +static __always_inline +int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) +{ + register struct old_timespec32 *ts asm("r1") = _ts; + register clockid_t clkid asm("r0") = _clkid; + register long ret asm ("r0"); + register long nr asm("r7") = __NR_compat_clock_getres; + + /* The checks below are required for ABI consistency with arm */ + if ((_clkid >= MAX_CLOCKS) && (_ts == NULL)) + return -EINVAL; + + asm volatile( + " swi #0\n" + : "=r" (ret) + : "r" (clkid), "r" (ts), "r" (nr) + : "memory"); + + return ret; +} + static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) { u64 res; diff --git a/arch/arm64/include/uapi/asm/bpf_perf_event.h b/arch/arm64/include/uapi/asm/bpf_perf_event.h index b551b741653d..5e1e648aeec4 100644 --- a/arch/arm64/include/uapi/asm/bpf_perf_event.h +++ b/arch/arm64/include/uapi/asm/bpf_perf_event.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ #define _UAPI__ASM_BPF_PERF_EVENT_H__ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f29f36a65175..d19d14ba9ae4 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -225,8 +225,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1), /* * Linux can handle differing I-cache policies. Userspace JITs will @@ -468,6 +468,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, case FTR_LOWER_SAFE: ret = new < cur ? new : cur; break; + case FTR_HIGHER_OR_ZERO_SAFE: + if (!cur || !new) + break; + /* Fallthrough */ case FTR_HIGHER_SAFE: ret = new > cur ? new : cur; break; diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index f8719bd30850..48222a4760c2 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -207,16 +207,16 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr) list = user_mode(regs) ? &user_step_hook : &kernel_step_hook; - rcu_read_lock(); - + /* + * Since single-step exception disables interrupt, this function is + * entirely not preemptible, and we can use rcu list safely here. + */ list_for_each_entry_rcu(hook, list, node) { retval = hook->fn(regs, esr); if (retval == DBG_HOOK_HANDLED) break; } - rcu_read_unlock(); - return retval; } NOKPROBE_SYMBOL(call_step_hook); @@ -305,14 +305,16 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) list = user_mode(regs) ? &user_break_hook : &kernel_break_hook; - rcu_read_lock(); + /* + * Since brk exception disables interrupt, this function is + * entirely not preemptible, and we can use rcu list safely here. + */ list_for_each_entry_rcu(hook, list, node) { unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; if ((comment & ~hook->mask) == hook->imm) fn = hook->fn; } - rcu_read_unlock(); return fn ? fn(regs, esr) : DBG_HOOK_ERROR; } diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9cdc4592da3e..320a30dbe35e 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -586,10 +586,8 @@ el1_sync: b.eq el1_ia cmp x24, #ESR_ELx_EC_SYS64 // configurable trap b.eq el1_undef - cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception - b.eq el1_sp_pc cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el1_sp_pc + b.eq el1_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1 b.eq el1_undef cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 @@ -611,9 +609,11 @@ el1_da: bl do_mem_abort kernel_exit 1 -el1_sp_pc: +el1_pc: /* - * Stack or PC alignment exception handling + * PC alignment exception handling. We don't handle SP alignment faults, + * since we will have hit a recursive exception when trying to push the + * initial pt_regs. */ mrs x0, far_el1 inherit_daif pstate=x23, tmp=x2 @@ -732,9 +732,9 @@ el0_sync: ccmp x24, #ESR_ELx_EC_WFx, #4, ne b.eq el0_sys cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception - b.eq el0_sp_pc + b.eq el0_sp cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el0_sp_pc + b.eq el0_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 @@ -758,7 +758,7 @@ el0_sync_compat: cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception b.eq el0_fpsimd_exc cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el0_sp_pc + b.eq el0_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap @@ -858,11 +858,15 @@ el0_fpsimd_exc: mov x1, sp bl do_fpsimd_exc b ret_to_user +el0_sp: + ldr x26, [sp, #S_SP] + b el0_sp_pc +el0_pc: + mrs x26, far_el1 el0_sp_pc: /* * Stack or PC alignment exception handling */ - mrs x26, far_el1 gic_prio_kentry_setup tmp=x0 enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index eec4776ae5f0..37d3912cfe06 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -406,6 +406,18 @@ static __uint128_t arm64_cpu_to_le128(__uint128_t x) #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x) +static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst, + unsigned int vq) +{ + unsigned int i; + __uint128_t *p; + + for (i = 0; i < SVE_NUM_ZREGS; ++i) { + p = (__uint128_t *)ZREG(sst, vq, i); + *p = arm64_cpu_to_le128(fst->vregs[i]); + } +} + /* * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to * task->thread.sve_state. @@ -423,17 +435,12 @@ static void fpsimd_to_sve(struct task_struct *task) unsigned int vq; void *sst = task->thread.sve_state; struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; - unsigned int i; - __uint128_t *p; if (!system_supports_sve()) return; vq = sve_vq_from_vl(task->thread.sve_vl); - for (i = 0; i < 32; ++i) { - p = (__uint128_t *)ZREG(sst, vq, i); - *p = arm64_cpu_to_le128(fst->vregs[i]); - } + __fpsimd_to_sve(sst, fst, vq); } /* @@ -459,7 +466,7 @@ static void sve_to_fpsimd(struct task_struct *task) return; vq = sve_vq_from_vl(task->thread.sve_vl); - for (i = 0; i < 32; ++i) { + for (i = 0; i < SVE_NUM_ZREGS; ++i) { p = (__uint128_t const *)ZREG(sst, vq, i); fst->vregs[i] = arm64_le128_to_cpu(*p); } @@ -550,8 +557,6 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) unsigned int vq; void *sst = task->thread.sve_state; struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; - unsigned int i; - __uint128_t *p; if (!test_tsk_thread_flag(task, TIF_SVE)) return; @@ -559,11 +564,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) vq = sve_vq_from_vl(task->thread.sve_vl); memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); - - for (i = 0; i < 32; ++i) { - p = (__uint128_t *)ZREG(sst, vq, i); - *p = arm64_cpu_to_le128(fst->vregs[i]); - } + __fpsimd_to_sve(sst, fst, vq); } int sve_set_vector_length(struct task_struct *task, diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index dceb84520948..38ee1514cd9c 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -536,13 +536,18 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, /* Aligned */ break; case 1: - /* Allow single byte watchpoint. */ - if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) - break; case 2: /* Allow halfword watchpoints and breakpoints. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; + + /* Fallthrough */ + case 3: + /* Allow single byte watchpoint. */ + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) + break; + + /* Fallthrough */ default: return -EINVAL; } diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 46e643e30708..03ff15bffbb6 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -314,18 +314,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, /* MOVW instruction relocations. */ case R_AARCH64_MOVW_UABS_G0_NC: overflow_check = false; + /* Fall through */ case R_AARCH64_MOVW_UABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G1_NC: overflow_check = false; + /* Fall through */ case R_AARCH64_MOVW_UABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G2_NC: overflow_check = false; + /* Fall through */ case R_AARCH64_MOVW_UABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, AARCH64_INSN_IMM_MOVKZ); @@ -393,6 +396,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, break; case R_AARCH64_ADR_PREL_PG_HI21_NC: overflow_check = false; + /* Fall through */ case R_AARCH64_ADR_PREL_PG_HI21: ovf = reloc_insn_adrp(me, sechdrs, loc, val); if (ovf && ovf != -ERANGE) diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index 9d63514b9836..b0e03e052dd1 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -154,12 +154,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, return; } - frame.fp = regs->regs[29]; - frame.pc = regs->pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif - + start_backtrace(&frame, regs->regs[29], regs->pc); walk_stackframe(current, &frame, callchain_trace, entry); } diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index bd5dfffca272..c4452827419b 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -21,6 +21,7 @@ #include <asm/ptrace.h> #include <asm/cacheflush.h> #include <asm/debug-monitors.h> +#include <asm/daifflags.h> #include <asm/system_misc.h> #include <asm/insn.h> #include <linux/uaccess.h> @@ -168,33 +169,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p) } /* - * When PSTATE.D is set (masked), then software step exceptions can not be - * generated. - * SPSR's D bit shows the value of PSTATE.D immediately before the - * exception was taken. PSTATE.D is set while entering into any exception - * mode, however software clears it for any normal (none-debug-exception) - * mode in the exception entry. Therefore, when we are entering into kprobe - * breakpoint handler from any normal mode then SPSR.D bit is already - * cleared, however it is set when we are entering from any debug exception - * mode. - * Since we always need to generate single step exception after a kprobe - * breakpoint exception therefore we need to clear it unconditionally, when - * we become sure that the current breakpoint exception is for kprobe. - */ -static void __kprobes -spsr_set_debug_flag(struct pt_regs *regs, int mask) -{ - unsigned long spsr = regs->pstate; - - if (mask) - spsr |= PSR_D_BIT; - else - spsr &= ~PSR_D_BIT; - - regs->pstate = spsr; -} - -/* * Interrupts need to be disabled before single-step mode is set, and not * reenabled until after single-step mode ends. * Without disabling interrupt on local CPU, there is a chance of @@ -205,17 +179,17 @@ spsr_set_debug_flag(struct pt_regs *regs, int mask) static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, struct pt_regs *regs) { - kcb->saved_irqflag = regs->pstate; + kcb->saved_irqflag = regs->pstate & DAIF_MASK; regs->pstate |= PSR_I_BIT; + /* Unmask PSTATE.D for enabling software step exceptions. */ + regs->pstate &= ~PSR_D_BIT; } static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, struct pt_regs *regs) { - if (kcb->saved_irqflag & PSR_I_BIT) - regs->pstate |= PSR_I_BIT; - else - regs->pstate &= ~PSR_I_BIT; + regs->pstate &= ~DAIF_MASK; + regs->pstate |= kcb->saved_irqflag; } static void __kprobes @@ -252,8 +226,6 @@ static void __kprobes setup_singlestep(struct kprobe *p, set_ss_context(kcb, slot); /* mark pending ss */ - spsr_set_debug_flag(regs, 0); - /* IRQs and single stepping do not mix well. */ kprobes_save_local_irqflag(kcb, regs); kernel_enable_single_step(regs); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 6a869d9f304f..f674f28df663 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, childregs->pstate |= PSR_UAO_BIT; if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) - childregs->pstate |= PSR_SSBS_BIT; + set_ssbs_bit(childregs); if (system_uses_irq_prio_masking()) childregs->pmr_save = GIC_PRIO_IRQON; @@ -443,6 +443,32 @@ void uao_thread_switch(struct task_struct *next) } /* + * Force SSBS state on context-switch, since it may be lost after migrating + * from a CPU which treats the bit as RES0 in a heterogeneous system. + */ +static void ssbs_thread_switch(struct task_struct *next) +{ + struct pt_regs *regs = task_pt_regs(next); + + /* + * Nothing to do for kernel threads, but 'regs' may be junk + * (e.g. idle task) so check the flags and bail early. + */ + if (unlikely(next->flags & PF_KTHREAD)) + return; + + /* If the mitigation is enabled, then we leave SSBS clear. */ + if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || + test_tsk_thread_flag(next, TIF_SSBD)) + return; + + if (compat_user_mode(regs)) + set_compat_ssbs_bit(regs); + else if (user_mode(regs)) + set_ssbs_bit(regs); +} + +/* * We store our current task in sp_el0, which is clobbered by userspace. Keep a * shadow copy so that we can restore this upon entry from userspace. * @@ -471,6 +497,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, entry_task_switch(next); uao_thread_switch(next); ptrauth_thread_switch(next); + ssbs_thread_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case @@ -498,11 +525,8 @@ unsigned long get_wchan(struct task_struct *p) if (!stack_page) return 0; - frame.fp = thread_saved_fp(p); - frame.pc = thread_saved_pc(p); -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif + start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p)); + do { if (unwind_frame(p, &frame)) goto out; diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index b21cba90f82d..a5e8b3b9d798 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c @@ -8,6 +8,7 @@ #include <linux/export.h> #include <linux/ftrace.h> +#include <linux/kprobes.h> #include <asm/stack_pointer.h> #include <asm/stacktrace.h> @@ -29,6 +30,7 @@ static int save_return_addr(struct stackframe *frame, void *d) return 0; } } +NOKPROBE_SYMBOL(save_return_addr); void *return_address(unsigned int level) { @@ -38,12 +40,9 @@ void *return_address(unsigned int level) data.level = level + 2; data.addr = NULL; - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)return_address; /* dummy */ -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif - + start_backtrace(&frame, + (unsigned long)__builtin_frame_address(0), + (unsigned long)return_address); walk_stackframe(current, &frame, save_return_addr, &data); if (!data.level) @@ -52,3 +51,4 @@ void *return_address(unsigned int level) return NULL; } EXPORT_SYMBOL_GPL(return_address); +NOKPROBE_SYMBOL(return_address); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index ea90d3bd9253..018a33e01b0e 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -152,8 +152,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) pr_crit("CPU%u: died during early boot\n", cpu); break; } - /* Fall through */ pr_crit("CPU%u: may not have shut down cleanly\n", cpu); + /* Fall through */ case CPU_STUCK_IN_KERNEL: pr_crit("CPU%u: is stuck in kernel\n", cpu); if (status & CPU_STUCK_REASON_52_BIT_VA) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 62d395151abe..a336cb124320 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -7,6 +7,7 @@ #include <linux/kernel.h> #include <linux/export.h> #include <linux/ftrace.h> +#include <linux/kprobes.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> @@ -29,9 +30,18 @@ * ldp x29, x30, [sp] * add sp, sp, #0x10 */ + +/* + * Unwind from one frame record (A) to the next frame record (B). + * + * We terminate early if the location of B indicates a malformed chain of frame + * records (e.g. a cycle), determined based on the location and fp value of A + * and the location (but not the fp value) of B. + */ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) { unsigned long fp = frame->fp; + struct stack_info info; if (fp & 0xf) return -EINVAL; @@ -39,11 +49,40 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) if (!tsk) tsk = current; - if (!on_accessible_stack(tsk, fp, NULL)) + if (!on_accessible_stack(tsk, fp, &info)) + return -EINVAL; + + if (test_bit(info.type, frame->stacks_done)) return -EINVAL; + /* + * As stacks grow downward, any valid record on the same stack must be + * at a strictly higher address than the prior record. + * + * Stacks can nest in several valid orders, e.g. + * + * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL + * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW + * + * ... but the nesting itself is strict. Once we transition from one + * stack to another, it's never valid to unwind back to that first + * stack. + */ + if (info.type == frame->prev_type) { + if (fp <= frame->prev_fp) + return -EINVAL; + } else { + set_bit(frame->prev_type, frame->stacks_done); + } + + /* + * Record this frame record's values and location. The prev_fp and + * prev_type are only meaningful to the next unwind_frame() invocation. + */ frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); + frame->prev_fp = fp; + frame->prev_type = info.type; #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk->ret_stack && @@ -73,6 +112,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) return 0; } +NOKPROBE_SYMBOL(unwind_frame); void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data) @@ -87,6 +127,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, break; } } +NOKPROBE_SYMBOL(walk_stackframe); #ifdef CONFIG_STACKTRACE struct stack_trace_data { @@ -122,12 +163,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) data.skip = trace->skip; data.no_sched_functions = 0; - frame.fp = regs->regs[29]; - frame.pc = regs->pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif - + start_backtrace(&frame, regs->regs[29], regs->pc); walk_stackframe(current, &frame, save_trace, &data); } EXPORT_SYMBOL_GPL(save_stack_trace_regs); @@ -146,17 +182,15 @@ static noinline void __save_stack_trace(struct task_struct *tsk, data.no_sched_functions = nosched; if (tsk != current) { - frame.fp = thread_saved_fp(tsk); - frame.pc = thread_saved_pc(tsk); + start_backtrace(&frame, thread_saved_fp(tsk), + thread_saved_pc(tsk)); } else { /* We don't want this function nor the caller */ data.skip += 2; - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)__save_stack_trace; + start_backtrace(&frame, + (unsigned long)__builtin_frame_address(0), + (unsigned long)__save_stack_trace); } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif walk_stackframe(tsk, &frame, save_trace, &data); diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index 9f25aedeac9d..0b2946414dc9 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -38,11 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs) if (!in_lock_functions(regs->pc)) return regs->pc; - frame.fp = regs->regs[29]; - frame.pc = regs->pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif + start_backtrace(&frame, regs->regs[29], regs->pc); + do { int ret = unwind_frame(NULL, &frame); if (ret < 0) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 8c03456dade6..d3313797cca9 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -100,18 +100,17 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) return; if (tsk == current) { - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)dump_backtrace; + start_backtrace(&frame, + (unsigned long)__builtin_frame_address(0), + (unsigned long)dump_backtrace); } else { /* * task blocked in __switch_to */ - frame.fp = thread_saved_fp(tsk); - frame.pc = thread_saved_pc(tsk); + start_backtrace(&frame, + thread_saved_fp(tsk), + thread_saved_pc(tsk)); } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif printk("Call trace:\n"); do { diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 4ab863045188..dd2514bb1511 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -32,10 +32,10 @@ UBSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y KCOV_INSTRUMENT := n -ifeq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -else -CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -include $(c-gettimeofday-y) + +ifneq ($(c-gettimeofday-y),) + CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y) endif # Clang versions less than 8 do not support -mcmodel=tiny @@ -57,8 +57,7 @@ $(obj)/vdso.o : $(obj)/vdso.so # Link rule for the .so file, .lds has to be first $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE - $(call if_changed,ld) - $(call if_changed,vdso_check) + $(call if_changed,vdsold_and_vdso_check) # Strip rule for the .so file $(obj)/%.so: OBJCOPYFLAGS := -S @@ -74,8 +73,8 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE $(call if_changed,vdsosym) # Actual build commands -quiet_cmd_vdsocc = VDSOCC $@ - cmd_vdsocc = $(CC) $(a_flags) $(c_flags) -c -o $@ $< +quiet_cmd_vdsold_and_vdso_check = LD $@ + cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check) # Install commands for the unstripped file quiet_cmd_vdso_install = INSTALL $@ diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 60a4c6239712..1fba0776ed40 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -144,8 +144,7 @@ $(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE # Link rule for the .so file, .lds has to be first $(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE - $(call if_changed,vdsold) - $(call if_changed,vdso_check) + $(call if_changed,vdsold_and_vdso_check) # Compilation rules for the vDSO sources $(c-obj-vdso): %.o: %.c FORCE @@ -156,14 +155,17 @@ $(asm-obj-vdso): %.o: %.S FORCE $(call if_changed_dep,vdsoas) # Actual build commands -quiet_cmd_vdsold = VDSOL $@ +quiet_cmd_vdsold_and_vdso_check = LD32 $@ + cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check) + +quiet_cmd_vdsold = LD32 $@ cmd_vdsold = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \ -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@ -quiet_cmd_vdsocc = VDSOC $@ +quiet_cmd_vdsocc = CC32 $@ cmd_vdsocc = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $< -quiet_cmd_vdsocc_gettimeofday = VDSOC_GTD $@ +quiet_cmd_vdsocc_gettimeofday = CC32 $@ cmd_vdsocc_gettimeofday = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $< -quiet_cmd_vdsoas = VDSOA $@ +quiet_cmd_vdsoas = AS32 $@ cmd_vdsoas = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $< quiet_cmd_vdsomunge = MUNGE $@ diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 9568c116ac7f..cfd65b63f36f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -777,6 +777,53 @@ void __init hook_debug_fault_code(int nr, debug_fault_info[nr].name = name; } +/* + * In debug exception context, we explicitly disable preemption despite + * having interrupts disabled. + * This serves two purposes: it makes it much less likely that we would + * accidentally schedule in exception context and it will force a warning + * if we somehow manage to schedule by accident. + */ +static void debug_exception_enter(struct pt_regs *regs) +{ + /* + * Tell lockdep we disabled irqs in entry.S. Do nothing if they were + * already disabled to preserve the last enabled/disabled addresses. + */ + if (interrupts_enabled(regs)) + trace_hardirqs_off(); + + if (user_mode(regs)) { + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); + } else { + /* + * We might have interrupted pretty much anything. In + * fact, if we're a debug exception, we can even interrupt + * NMI processing. We don't want this code makes in_nmi() + * to return true, but we need to notify RCU. + */ + rcu_nmi_enter(); + } + + preempt_disable(); + + /* This code is a bit fragile. Test it. */ + RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work"); +} +NOKPROBE_SYMBOL(debug_exception_enter); + +static void debug_exception_exit(struct pt_regs *regs) +{ + preempt_enable_no_resched(); + + if (!user_mode(regs)) + rcu_nmi_exit(); + + if (interrupts_enabled(regs)) + trace_hardirqs_on(); +} +NOKPROBE_SYMBOL(debug_exception_exit); + #ifdef CONFIG_ARM64_ERRATUM_1463225 DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); @@ -817,12 +864,7 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint, if (cortex_a76_erratum_1463225_debug_handler(regs)) return; - /* - * Tell lockdep we disabled irqs in entry.S. Do nothing if they were - * already disabled to preserve the last enabled/disabled addresses. - */ - if (interrupts_enabled(regs)) - trace_hardirqs_off(); + debug_exception_enter(regs); if (user_mode(regs) && !is_ttbr0_addr(pc)) arm64_apply_bp_hardening(); @@ -832,7 +874,6 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint, inf->sig, inf->code, (void __user *)pc, esr); } - if (interrupts_enabled(regs)) - trace_hardirqs_on(); + debug_exception_exit(regs); } NOKPROBE_SYMBOL(do_debug_exception); diff --git a/arch/csky/include/uapi/asm/byteorder.h b/arch/csky/include/uapi/asm/byteorder.h index b079ec715cdf..d150cd664873 100644 --- a/arch/csky/include/uapi/asm/byteorder.h +++ b/arch/csky/include/uapi/asm/byteorder.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_BYTEORDER_H diff --git a/arch/csky/include/uapi/asm/cachectl.h b/arch/csky/include/uapi/asm/cachectl.h index ddf2f39aa925..ed7fad1ea20d 100644 --- a/arch/csky/include/uapi/asm/cachectl.h +++ b/arch/csky/include/uapi/asm/cachectl.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef __ASM_CSKY_CACHECTL_H #define __ASM_CSKY_CACHECTL_H diff --git a/arch/csky/include/uapi/asm/perf_regs.h b/arch/csky/include/uapi/asm/perf_regs.h index ee323d818592..49d4e147a559 100644 --- a/arch/csky/include/uapi/asm/perf_regs.h +++ b/arch/csky/include/uapi/asm/perf_regs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. #ifndef _ASM_CSKY_PERF_REGS_H diff --git a/arch/csky/include/uapi/asm/ptrace.h b/arch/csky/include/uapi/asm/ptrace.h index 4e248d5b86ef..66b2268e324e 100644 --- a/arch/csky/include/uapi/asm/ptrace.h +++ b/arch/csky/include/uapi/asm/ptrace.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef _CSKY_PTRACE_H diff --git a/arch/csky/include/uapi/asm/sigcontext.h b/arch/csky/include/uapi/asm/sigcontext.h index e81e7ff11e36..670c020f2cb8 100644 --- a/arch/csky/include/uapi/asm/sigcontext.h +++ b/arch/csky/include/uapi/asm/sigcontext.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_SIGCONTEXT_H diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h index ec60e49cea66..211c983c7282 100644 --- a/arch/csky/include/uapi/asm/unistd.h +++ b/arch/csky/include/uapi/asm/unistd.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #define __ARCH_WANT_SYS_CLONE diff --git a/arch/mips/vdso/vdso.h b/arch/mips/vdso/vdso.h index 14b1931be69c..b65b169778e3 100644 --- a/arch/mips/vdso/vdso.h +++ b/arch/mips/vdso/vdso.h @@ -9,6 +9,7 @@ #if _MIPS_SIM != _MIPS_SIM_ABI64 && defined(CONFIG_64BIT) /* Building 32-bit VDSO for the 64-bit kernel. Fake a 32-bit Kconfig. */ +#define BUILD_VDSO32_64 #undef CONFIG_64BIT #define CONFIG_32BIT 1 #ifndef __ASSEMBLY__ diff --git a/arch/nds32/include/uapi/asm/auxvec.h b/arch/nds32/include/uapi/asm/auxvec.h index b5d58ea8decb..bc0b92ab8c15 100644 --- a/arch/nds32/include/uapi/asm/auxvec.h +++ b/arch/nds32/include/uapi/asm/auxvec.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2005-2017 Andes Technology Corporation #ifndef __ASM_AUXVEC_H diff --git a/arch/nds32/include/uapi/asm/byteorder.h b/arch/nds32/include/uapi/asm/byteorder.h index 511e653c709d..c264ef12c49c 100644 --- a/arch/nds32/include/uapi/asm/byteorder.h +++ b/arch/nds32/include/uapi/asm/byteorder.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2005-2017 Andes Technology Corporation #ifndef __NDS32_BYTEORDER_H__ diff --git a/arch/nds32/include/uapi/asm/cachectl.h b/arch/nds32/include/uapi/asm/cachectl.h index 73793662815c..31b9b439d819 100644 --- a/arch/nds32/include/uapi/asm/cachectl.h +++ b/arch/nds32/include/uapi/asm/cachectl.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 1994, 1995, 1996 by Ralf Baechle // Copyright (C) 2005-2017 Andes Technology Corporation #ifndef _ASM_CACHECTL diff --git a/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h b/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h index d54a5d6c6538..f17396db16ec 100644 --- a/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h +++ b/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* Copyright (C) 2005-2019 Andes Technology Corporation */ #ifndef _FP_UDF_IEX_CRTL_H #define _FP_UDF_IEX_CRTL_H diff --git a/arch/nds32/include/uapi/asm/param.h b/arch/nds32/include/uapi/asm/param.h index 2977534a6bd3..48d00328d328 100644 --- a/arch/nds32/include/uapi/asm/param.h +++ b/arch/nds32/include/uapi/asm/param.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2005-2017 Andes Technology Corporation #ifndef __ASM_NDS32_PARAM_H diff --git a/arch/nds32/include/uapi/asm/ptrace.h b/arch/nds32/include/uapi/asm/ptrace.h index 1a6e01c00e6f..d76217c7c010 100644 --- a/arch/nds32/include/uapi/asm/ptrace.h +++ b/arch/nds32/include/uapi/asm/ptrace.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2005-2017 Andes Technology Corporation #ifndef __UAPI_ASM_NDS32_PTRACE_H diff --git a/arch/nds32/include/uapi/asm/sigcontext.h b/arch/nds32/include/uapi/asm/sigcontext.h index dc89af7ddcc3..6c1e6648878f 100644 --- a/arch/nds32/include/uapi/asm/sigcontext.h +++ b/arch/nds32/include/uapi/asm/sigcontext.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2005-2017 Andes Technology Corporation #ifndef _ASMNDS32_SIGCONTEXT_H diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h index a0b2f7b9c0f2..410795e280fe 100644 --- a/arch/nds32/include/uapi/asm/unistd.h +++ b/arch/nds32/include/uapi/asm/unistd.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2005-2017 Andes Technology Corporation #define __ARCH_WANT_STAT64 diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 8acb8fa1f8d6..3b77d729057f 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -19,8 +19,6 @@ KBUILD_IMAGE := vmlinuz -KBUILD_DEFCONFIG := default_defconfig - NM = sh $(srctree)/arch/parisc/nm CHECKFLAGS += -D__hppa__=1 LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) @@ -182,5 +180,8 @@ define archhelp @echo ' zinstall - Install compressed vmlinuz kernel' endef +archclean: + $(Q)$(MAKE) $(clean)=$(boot) + archheaders: $(Q)$(MAKE) $(build)=arch/parisc/kernel/syscalls all diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile index 2da8624e5cf6..1e5879c6a752 100644 --- a/arch/parisc/boot/compressed/Makefile +++ b/arch/parisc/boot/compressed/Makefile @@ -12,6 +12,7 @@ UBSAN_SANITIZE := n targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 targets += misc.o piggy.o sizes.h head.o real2.o firmware.o +targets += real2.S firmware.c KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING @@ -55,7 +56,8 @@ $(obj)/misc.o: $(obj)/sizes.h CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER $(obj)/vmlinux.lds: $(obj)/sizes.h -$(obj)/vmlinux.bin: vmlinux +OBJCOPYFLAGS_vmlinux.bin := -R .comment -R .note -S +$(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) vmlinux.bin.all-y := $(obj)/vmlinux.bin diff --git a/arch/parisc/boot/compressed/vmlinux.lds.S b/arch/parisc/boot/compressed/vmlinux.lds.S index bfd7872739a3..2ac3a643f2eb 100644 --- a/arch/parisc/boot/compressed/vmlinux.lds.S +++ b/arch/parisc/boot/compressed/vmlinux.lds.S @@ -48,8 +48,8 @@ SECTIONS *(.rodata.compressed) } - /* bootloader code and data starts behind area of extracted kernel */ - . = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START); + /* bootloader code and data starts at least behind area of extracted kernel */ + . = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START)); /* align on next page boundary */ . = ALIGN(4096); diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/defconfig index 5b877ca34ebf..5b877ca34ebf 100644 --- a/arch/parisc/configs/default_defconfig +++ b/arch/parisc/configs/defconfig diff --git a/arch/parisc/include/asm/kprobes.h b/arch/parisc/include/asm/kprobes.h index e09cf2deeafe..904034da4974 100644 --- a/arch/parisc/include/asm/kprobes.h +++ b/arch/parisc/include/asm/kprobes.h @@ -50,6 +50,10 @@ struct kprobe_ctlblk { int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs); int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs); +static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) +{ + return 0; +} #endif /* CONFIG_KPROBES */ #endif /* _PARISC_KPROBES_H */ diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index d784ccdd8fef..b6fb30f2e4bf 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -181,8 +181,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, for (i = 0; i < ARRAY_SIZE(insn); i++) insn[i] = INSN_NOP; + __patch_text((void *)rec->ip, INSN_NOP); __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn), - insn, sizeof(insn)); + insn, sizeof(insn)-4); return 0; } #endif diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index ba67893a1d72..df46b0e5a915 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -63,7 +63,7 @@ ENTRY_CFI(flush_tlb_all_local) /* Flush Instruction Tlb */ - LDREG ITLB_SID_BASE(%r1), %r20 +88: LDREG ITLB_SID_BASE(%r1), %r20 LDREG ITLB_SID_STRIDE(%r1), %r21 LDREG ITLB_SID_COUNT(%r1), %r22 LDREG ITLB_OFF_BASE(%r1), %arg0 @@ -103,6 +103,7 @@ fitonemiddle: /* Loop if LOOP = 1 */ add %r21, %r20, %r20 /* increment space */ fitdone: + ALTERNATIVE(88b, fitdone, ALT_COND_NO_SPLIT_TLB, INSN_NOP) /* Flush Data Tlb */ diff --git a/arch/parisc/math-emu/Makefile b/arch/parisc/math-emu/Makefile index b6c4b254901a..55c1396580a4 100644 --- a/arch/parisc/math-emu/Makefile +++ b/arch/parisc/math-emu/Makefile @@ -18,3 +18,4 @@ obj-y := frnd.o driver.o decode_exc.o fpudispatch.o denormal.o \ # other very old or stripped-down PA-RISC CPUs -- not currently supported obj-$(CONFIG_MATH_EMULATION) += unimplemented-math-emulation.o +CFLAGS_REMOVE_fpudispatch.o = -Wimplicit-fallthrough=3 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 6dd4669ce7a5..adbd5e2144a3 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -66,6 +66,7 @@ parisc_acctyp(unsigned long code, unsigned int inst) case 0x30000000: /* coproc2 */ if (bit22set(inst)) return VM_WRITE; + /* fall through */ case 0x0: /* indexed/memory management */ if (bit22set(inst)) { diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index d8dcd8820369..77f6ebf97113 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -121,6 +121,7 @@ config PPC select ARCH_32BIT_OFF_T if PPC32 select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEVMEM_IS_ALLOWED + select ARCH_HAS_DMA_MMAP_PGPROT select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 463c63a9fcf1..11112023e327 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -302,9 +302,14 @@ #define H_SCM_UNBIND_MEM 0x3F0 #define H_SCM_QUERY_BLOCK_MEM_BINDING 0x3F4 #define H_SCM_QUERY_LOGICAL_MEM_BINDING 0x3F8 -#define H_SCM_MEM_QUERY 0x3FC -#define H_SCM_BLOCK_CLEAR 0x400 -#define MAX_HCALL_OPCODE H_SCM_BLOCK_CLEAR +#define H_SCM_UNBIND_ALL 0x3FC +#define H_SCM_HEALTH 0x400 +#define H_SCM_PERFORMANCE_STATS 0x418 +#define MAX_HCALL_OPCODE H_SCM_PERFORMANCE_STATS + +/* Scope args for H_SCM_UNBIND_ALL */ +#define H_UNBIND_SCOPE_ALL (0x1) +#define H_UNBIND_SCOPE_DRC (0x2) /* H_VIOCTL functions */ #define H_GET_VIOA_DUMP_SIZE 0x01 diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h index dc9a1ca70edf..c6bbe9778d3c 100644 --- a/arch/powerpc/include/asm/pmc.h +++ b/arch/powerpc/include/asm/pmc.h @@ -27,11 +27,10 @@ static inline void ppc_set_pmu_inuse(int inuse) #ifdef CONFIG_PPC_PSERIES get_lppaca()->pmcregs_in_use = inuse; #endif - } else { + } #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - get_paca()->pmcregs_in_use = inuse; + get_paca()->pmcregs_in_use = inuse; #endif - } #endif } diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 68473c3c471c..b0720c7c3fcf 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -49,6 +49,7 @@ #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h b/arch/powerpc/include/uapi/asm/bpf_perf_event.h index b551b741653d..5e1e648aeec4 100644 --- a/arch/powerpc/include/uapi/asm/bpf_perf_event.h +++ b/arch/powerpc/include/uapi/asm/bpf_perf_event.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ #define _UAPI__ASM_BPF_PERF_EVENT_H__ diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h index 01555c6ae0f5..be48c2215fa2 100644 --- a/arch/powerpc/include/uapi/asm/kvm_para.h +++ b/arch/powerpc/include/uapi/asm/kvm_para.h @@ -31,7 +31,7 @@ * Struct fields are always 32 or 64 bit aligned, depending on them being 32 * or 64 bit wide respectively. * - * See Documentation/virtual/kvm/ppc-pv.txt + * See Documentation/virt/kvm/ppc-pv.txt */ struct kvm_vcpu_arch_shared { __u64 scratch1; diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 56dfa7a2a6f2..ea0c69236789 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -49,7 +49,8 @@ obj-y := cputable.o ptrace.o syscalls.o \ signal.o sysfs.o cacheinfo.o time.o \ prom.o traps.o setup-common.o \ udbg.o misc.o io.o misc_$(BITS).o \ - of_platform.o prom_parse.o + of_platform.o prom_parse.o \ + dma-common.o obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ signal_64.o ptrace32.o \ paca.o nvram_64.o firmware.o diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 7107ad86de65..92045ed64976 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -176,9 +176,11 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, ret |= __get_user_inatomic(temp.v[1], p++); ret |= __get_user_inatomic(temp.v[2], p++); ret |= __get_user_inatomic(temp.v[3], p++); + /* fall through */ case 4: ret |= __get_user_inatomic(temp.v[4], p++); ret |= __get_user_inatomic(temp.v[5], p++); + /* fall through */ case 2: ret |= __get_user_inatomic(temp.v[6], p++); ret |= __get_user_inatomic(temp.v[7], p++); @@ -259,9 +261,11 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, ret |= __put_user_inatomic(data.v[1], p++); ret |= __put_user_inatomic(data.v[2], p++); ret |= __put_user_inatomic(data.v[3], p++); + /* fall through */ case 4: ret |= __put_user_inatomic(data.v[4], p++); ret |= __put_user_inatomic(data.v[5], p++); + /* fall through */ case 2: ret |= __put_user_inatomic(data.v[6], p++); ret |= __put_user_inatomic(data.v[7], p++); diff --git a/arch/powerpc/kernel/dma-common.c b/arch/powerpc/kernel/dma-common.c new file mode 100644 index 000000000000..dc7ef6b17b69 --- /dev/null +++ b/arch/powerpc/kernel/dma-common.c @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Contains common dma routines for all powerpc platforms. + * + * Copyright (C) 2019 Shawn Anastasio. + */ + +#include <linux/mm.h> +#include <linux/dma-noncoherent.h> + +pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, + unsigned long attrs) +{ + if (!dev_is_dma_coherent(dev)) + return pgprot_noncached(prot); + return prot; +} diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 85fdb6d879f1..54fab22c9a43 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -597,6 +597,14 @@ ppc_clone: stw r0,_TRAP(r1) /* register set saved */ b sys_clone + .globl ppc_clone3 +ppc_clone3: + SAVE_NVGPRS(r1) + lwz r0,_TRAP(r1) + rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ + stw r0,_TRAP(r1) /* register set saved */ + b sys_clone3 + .globl ppc_swapcontext ppc_swapcontext: SAVE_NVGPRS(r1) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index d9105fcf4021..0a0b5310f54a 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -487,6 +487,11 @@ _GLOBAL(ppc_clone) bl sys_clone b .Lsyscall_exit +_GLOBAL(ppc_clone3) + bl save_nvgprs + bl sys_clone3 + b .Lsyscall_exit + _GLOBAL(ppc32_swapcontext) bl save_nvgprs bl compat_sys_swapcontext diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index eee5bef736c8..6ba3cc2ef8ab 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1531,7 +1531,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) * * Call convention: * - * syscall register convention is in Documentation/powerpc/syscall64-abi.txt + * syscall register convention is in Documentation/powerpc/syscall64-abi.rst * * For hypercalls, the register convention is as follows: * r0 volatile diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index f50b708d6d77..98600b276f76 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -1198,6 +1198,9 @@ SYSCALL_DEFINE0(rt_sigreturn) goto bad; if (MSR_TM_ACTIVE(msr_hi<<32)) { + /* Trying to start TM on non TM system */ + if (!cpu_has_feature(CPU_FTR_TM)) + goto bad; /* We only recheckpoint on return if we're * transaction. */ diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 2f80e270c7b0..117515564ec7 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -771,6 +771,11 @@ SYSCALL_DEFINE0(rt_sigreturn) if (MSR_TM_ACTIVE(msr)) { /* We recheckpoint on return. */ struct ucontext __user *uc_transact; + + /* Trying to start TM on non TM system */ + if (!cpu_has_feature(CPU_FTR_TM)) + goto badframe; + if (__get_user(uc_transact, &uc->uc_link)) goto badframe; if (restore_tm_sigcontexts(current, &uc->uc_mcontext, diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index 3331749aab20..43f736ed47f2 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -516,4 +516,4 @@ 432 common fsmount sys_fsmount 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open -# 435 reserved for clone3 +435 nospu clone3 ppc_clone3 diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 653936177857..18f244aad7aa 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -239,6 +239,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, case 2: case 6: pte->may_write = true; + /* fall through */ case 3: case 5: case 7: diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index ec1804f822af..cde3f5a4b3e4 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3569,9 +3569,18 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb()); if (kvmhv_on_pseries()) { + /* + * We need to save and restore the guest visible part of the + * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor + * doesn't do this for us. Note only required if pseries since + * this is done in kvmhv_load_hv_regs_and_go() below otherwise. + */ + unsigned long host_psscr; /* call our hypervisor to load up HV regs and go */ struct hv_guest_state hvregs; + host_psscr = mfspr(SPRN_PSSCR_PR); + mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); kvmhv_save_hv_regs(vcpu, &hvregs); hvregs.lpcr = lpcr; vcpu->arch.regs.msr = vcpu->arch.shregs.msr; @@ -3590,6 +3599,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, vcpu->arch.shregs.msr = vcpu->arch.regs.msr; vcpu->arch.shregs.dar = mfspr(SPRN_DAR); vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); + vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); + mtspr(SPRN_PSSCR_PR, host_psscr); /* H_CEDE has to be handled now, not later */ if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && @@ -3654,6 +3665,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, vcpu->arch.vpa.dirty = 1; save_pmu = lp->pmcregs_in_use; } + /* Must save pmu if this guest is capable of running nested guests */ + save_pmu |= nesting_enabled(vcpu->kvm); kvmhv_save_guest_pmu(vcpu, save_pmu); diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 6ca0d7376a9f..e3ba67095895 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -1986,10 +1986,8 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) xive->single_escalation = xive_native_has_single_escalation(); - if (ret) { - kfree(xive); + if (ret) return ret; - } return 0; } diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 5596c8ec221a..a998823f68a3 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -1090,9 +1090,9 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type) xive->ops = &kvmppc_xive_native_ops; if (ret) - kfree(xive); + return ret; - return ret; + return 0; } /* diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 9a5963e07a82..b8ad14bb1170 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1899,11 +1899,20 @@ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base, * * For guests on platforms before POWER9, we clamp the it limit to 1G * to avoid some funky things such as RTAS bugs etc... + * + * On POWER9 we limit to 1TB in case the host erroneously told us that + * the RMA was >1TB. Effective address bits 0:23 are treated as zero + * (meaning the access is aliased to zero i.e. addr = addr % 1TB) + * for virtual real mode addressing and so it doesn't make sense to + * have an area larger than 1TB as it can't be addressed. */ if (!early_cpu_has_feature(CPU_FTR_HVMODE)) { ppc64_rma_size = first_memblock_size; if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000); + else + ppc64_rma_size = min_t(u64, ppc64_rma_size, + 1UL << SID_SHIFT_1T); /* Finally limit subsequent allocations */ memblock_set_current_limit(ppc64_rma_size); diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index 0d62be3cba47..74f4555a62ba 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -21,7 +21,7 @@ static void kasan_populate_pte(pte_t *ptep, pgprot_t prot) __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0); } -static int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end) +static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end) { pmd_t *pmd; unsigned long k_cur, k_next; @@ -35,7 +35,10 @@ static int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_ if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte) continue; - new = pte_alloc_one_kernel(&init_mm); + if (slab_is_available()) + new = pte_alloc_one_kernel(&init_mm); + else + new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE); if (!new) return -ENOMEM; diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 9259337d7374..9191a66b3bc5 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -239,7 +239,7 @@ void __init paging_init(void) #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = min(max_low_pfn, - ((1UL << ARCH_ZONE_DMA_BITS) - 1) >> PAGE_SHIFT); + 1UL << (ARCH_ZONE_DMA_BITS - PAGE_SHIFT)); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index c8ec670ee924..a5ac371a3f06 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -11,6 +11,7 @@ #include <linux/sched.h> #include <linux/libnvdimm.h> #include <linux/platform_device.h> +#include <linux/delay.h> #include <asm/plpar_wrappers.h> @@ -43,8 +44,9 @@ struct papr_scm_priv { static int drc_pmem_bind(struct papr_scm_priv *p) { unsigned long ret[PLPAR_HCALL_BUFSIZE]; - uint64_t rc, token; uint64_t saved = 0; + uint64_t token; + int64_t rc; /* * When the hypervisor cannot map all the requested memory in a single @@ -64,6 +66,10 @@ static int drc_pmem_bind(struct papr_scm_priv *p) } while (rc == H_BUSY); if (rc) { + /* H_OVERLAP needs a separate error path */ + if (rc == H_OVERLAP) + return -EBUSY; + dev_err(&p->pdev->dev, "bind err: %lld\n", rc); return -ENXIO; } @@ -78,22 +84,36 @@ static int drc_pmem_bind(struct papr_scm_priv *p) static int drc_pmem_unbind(struct papr_scm_priv *p) { unsigned long ret[PLPAR_HCALL_BUFSIZE]; - uint64_t rc, token; + uint64_t token = 0; + int64_t rc; - token = 0; + dev_dbg(&p->pdev->dev, "unbind drc %x\n", p->drc_index); - /* NB: unbind has the same retry requirements mentioned above */ + /* NB: unbind has the same retry requirements as drc_pmem_bind() */ do { - rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index, - p->bound_addr, p->blocks, token); + + /* Unbind of all SCM resources associated with drcIndex */ + rc = plpar_hcall(H_SCM_UNBIND_ALL, ret, H_UNBIND_SCOPE_DRC, + p->drc_index, token); token = ret[0]; - cond_resched(); + + /* Check if we are stalled for some time */ + if (H_IS_LONG_BUSY(rc)) { + msleep(get_longbusy_msecs(rc)); + rc = H_BUSY; + } else if (rc == H_BUSY) { + cond_resched(); + } + } while (rc == H_BUSY); if (rc) dev_err(&p->pdev->dev, "unbind error: %lld\n", rc); + else + dev_dbg(&p->pdev->dev, "unbind drc %x complete\n", + p->drc_index); - return !!rc; + return rc == H_SUCCESS ? 0 : -ENXIO; } static int papr_scm_meta_get(struct papr_scm_priv *p, @@ -255,12 +275,32 @@ static const struct attribute_group *papr_scm_dimm_groups[] = { NULL, }; +static inline int papr_scm_node(int node) +{ + int min_dist = INT_MAX, dist; + int nid, min_node; + + if ((node == NUMA_NO_NODE) || node_online(node)) + return node; + + min_node = first_online_node; + for_each_online_node(nid) { + dist = node_distance(node, nid); + if (dist < min_dist) { + min_dist = dist; + min_node = nid; + } + } + return min_node; +} + static int papr_scm_nvdimm_init(struct papr_scm_priv *p) { struct device *dev = &p->pdev->dev; struct nd_mapping_desc mapping; struct nd_region_desc ndr_desc; unsigned long dimm_flags; + int target_nid, online_nid; p->bus_desc.ndctl = papr_scm_ndctl; p->bus_desc.module = THIS_MODULE; @@ -299,8 +339,10 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) memset(&ndr_desc, 0, sizeof(ndr_desc)); ndr_desc.attr_groups = region_attr_groups; - ndr_desc.numa_node = dev_to_node(&p->pdev->dev); - ndr_desc.target_node = ndr_desc.numa_node; + target_nid = dev_to_node(&p->pdev->dev); + online_nid = papr_scm_node(target_nid); + ndr_desc.numa_node = online_nid; + ndr_desc.target_node = target_nid; ndr_desc.res = &p->res; ndr_desc.of_node = p->dn; ndr_desc.provider_data = p; @@ -318,6 +360,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) ndr_desc.res, p->dn); goto err; } + if (target_nid != online_nid) + dev_info(dev, "Region registered with target node %d and online node %d", + target_nid, online_nid); return 0; @@ -389,6 +434,14 @@ static int papr_scm_probe(struct platform_device *pdev) /* request the hypervisor to bind this region to somewhere in memory */ rc = drc_pmem_bind(p); + + /* If phyp says drc memory still bound then force unbound and retry */ + if (rc == -EBUSY) { + dev_warn(&pdev->dev, "Retrying bind after unbinding\n"); + drc_pmem_unbind(p); + rc = drc_pmem_bind(p); + } + if (rc) goto err; diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 082c7e1c20f0..1cdb39575eae 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -479,7 +479,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask, * Now go through the entire mask until we find a valid * target. */ - for (;;) { + do { /* * We re-check online as the fallback case passes us * an untested affinity mask @@ -487,12 +487,11 @@ static int xive_find_target_in_mask(const struct cpumask *mask, if (cpu_online(cpu) && xive_try_pick_target(cpu)) return cpu; cpu = cpumask_next(cpu, mask); - if (cpu == first) - break; /* Wrap around */ if (cpu >= nr_cpu_ids) cpu = cpumask_first(mask); - } + } while (cpu != first); + return -1; } diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi index 40983491b95f..42b5ec223100 100644 --- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi +++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi @@ -21,7 +21,6 @@ cpus { #address-cells = <1>; #size-cells = <0>; - timebase-frequency = <1000000>; cpu0: cpu@0 { compatible = "sifive,e51", "sifive,rocket0", "riscv"; device_type = "cpu"; @@ -217,5 +216,20 @@ #size-cells = <0>; status = "disabled"; }; + eth0: ethernet@10090000 { + compatible = "sifive,fu540-c000-gem"; + interrupt-parent = <&plic0>; + interrupts = <53>; + reg = <0x0 0x10090000 0x0 0x2000 + 0x0 0x100a0000 0x0 0x1000>; + local-mac-address = [00 00 00 00 00 00]; + clock-names = "pclk", "hclk"; + clocks = <&prci PRCI_CLK_GEMGXLPLL>, + <&prci PRCI_CLK_GEMGXLPLL>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + }; }; diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts index 0b55c53c08c7..93d68cbd64fe 100644 --- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts +++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts @@ -76,3 +76,12 @@ disable-wp; }; }; + +ð0 { + status = "okay"; + phy-mode = "gmii"; + phy-handle = <&phy0>; + phy0: ethernet-phy@0 { + reg = <0>; + }; +}; diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index b7b749b18853..93205c0bf71d 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -34,6 +34,7 @@ CONFIG_PCIEPORTBUS=y CONFIG_PCI_HOST_GENERIC=y CONFIG_PCIE_XILINX=y CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y CONFIG_BLK_DEV_LOOP=y CONFIG_VIRTIO_BLK=y CONFIG_BLK_DEV_SD=y @@ -53,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_EARLYCON_RISCV_SBI=y CONFIG_HVC_RISCV_SBI=y +CONFIG_SPI=y +CONFIG_SPI_SIFIVE=y # CONFIG_PTP_1588_CLOCK is not set CONFIG_DRM=y CONFIG_DRM_RADEON=y @@ -66,8 +69,9 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=y CONFIG_USB_UAS=y +CONFIG_MMC=y +CONFIG_MMC_SPI=y CONFIG_VIRTIO_MMIO=y -CONFIG_SPI_SIFIVE=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_AUTOFS4_FS=y @@ -83,8 +87,4 @@ CONFIG_ROOT_NFS=y CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_DEV_VIRTIO=y CONFIG_PRINTK_TIME=y -CONFIG_SPI=y -CONFIG_MMC_SPI=y -CONFIG_MMC=y -CONFIG_DEVTMPFS_MOUNT=y # CONFIG_RCU_TRACE is not set diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 1efaeddf1e4b..16970f246860 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -22,6 +22,7 @@ generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mm-arch-hooks.h +generic-y += msi.h generic-y += percpu.h generic-y += preempt.h generic-y += sections.h diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h index 62716653554b..d86cb17bbabe 100644 --- a/arch/riscv/include/uapi/asm/auxvec.h +++ b/arch/riscv/include/uapi/asm/auxvec.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (C) 2012 ARM Ltd. * Copyright (C) 2015 Regents of the University of California diff --git a/arch/riscv/include/uapi/asm/bitsperlong.h b/arch/riscv/include/uapi/asm/bitsperlong.h index 0b9b58b57ff6..7d0b32e3b701 100644 --- a/arch/riscv/include/uapi/asm/bitsperlong.h +++ b/arch/riscv/include/uapi/asm/bitsperlong.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (C) 2012 ARM Ltd. * Copyright (C) 2015 Regents of the University of California diff --git a/arch/riscv/include/uapi/asm/byteorder.h b/arch/riscv/include/uapi/asm/byteorder.h index 1920debc09c0..f671e16bf6af 100644 --- a/arch/riscv/include/uapi/asm/byteorder.h +++ b/arch/riscv/include/uapi/asm/byteorder.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (C) 2012 ARM Ltd. * Copyright (C) 2015 Regents of the University of California diff --git a/arch/riscv/include/uapi/asm/hwcap.h b/arch/riscv/include/uapi/asm/hwcap.h index 7d786145183b..4e7646077056 100644 --- a/arch/riscv/include/uapi/asm/hwcap.h +++ b/arch/riscv/include/uapi/asm/hwcap.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copied from arch/arm64/include/asm/hwcap.h * diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h index 92d8f7cd8f84..882547f6bd5c 100644 --- a/arch/riscv/include/uapi/asm/ptrace.h +++ b/arch/riscv/include/uapi/asm/ptrace.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (C) 2012 Regents of the University of California */ diff --git a/arch/riscv/include/uapi/asm/sigcontext.h b/arch/riscv/include/uapi/asm/sigcontext.h index 053f809e52ce..84f2dfcfdbce 100644 --- a/arch/riscv/include/uapi/asm/sigcontext.h +++ b/arch/riscv/include/uapi/asm/sigcontext.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (C) 2012 Regents of the University of California */ diff --git a/arch/riscv/include/uapi/asm/ucontext.h b/arch/riscv/include/uapi/asm/ucontext.h index b58e00cee2ec..411dd7b52ed6 100644 --- a/arch/riscv/include/uapi/asm/ucontext.h +++ b/arch/riscv/include/uapi/asm/ucontext.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (C) 2012 ARM Ltd. * Copyright (C) 2017 SiFive, Inc. diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h index 0e2eeeb1fd27..13ce76cc5aff 100644 --- a/arch/riscv/include/uapi/asm/unistd.h +++ b/arch/riscv/include/uapi/asm/unistd.h @@ -18,6 +18,7 @@ #ifdef __LP64__ #define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_SYS_CLONE3 #endif /* __LP64__ */ #include <asm-generic/unistd.h> diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index f1d6ffe43e42..49a5852fd07d 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -37,7 +37,7 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE # these symbols in the kernel code rather than hand-coded addresses. SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ - -Wl,--hash-style=both + -Wl,--build-id -Wl,--hash-style=both $(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE $(call if_changed,vdsold) diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index 7cba96e7587b..4cf0bddb7d92 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -36,7 +36,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o -obj-y += ctype.o text_dma.o +obj-y += version.o ctype.o text_dma.o obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) += uv.o obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h index ad57c2205a71..1c3b2b257637 100644 --- a/arch/s390/boot/boot.h +++ b/arch/s390/boot/boot.h @@ -8,10 +8,12 @@ void store_ipl_parmblock(void); void setup_boot_command_line(void); void parse_boot_command_line(void); void setup_memory_end(void); +void verify_facilities(void); void print_missing_facilities(void); unsigned long get_random_base(unsigned long safe_addr); extern int kaslr_enabled; +extern const char kernel_version[]; unsigned long read_ipl_report(unsigned long safe_offset); diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S index 028aab03a9e7..2087bed6e60f 100644 --- a/arch/s390/boot/head.S +++ b/arch/s390/boot/head.S @@ -361,6 +361,7 @@ ENTRY(startup_kdump) .quad 0 # INITRD_SIZE .quad 0 # OLDMEM_BASE .quad 0 # OLDMEM_SIZE + .quad kernel_version # points to kernel version string .org COMMAND_LINE .byte "root=/dev/ram0 ro" diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c index 3bdd8132e56b..c34a6387ce38 100644 --- a/arch/s390/boot/kaslr.c +++ b/arch/s390/boot/kaslr.c @@ -7,6 +7,7 @@ #include <asm/timex.h> #include <asm/sclp.h> #include "compressed/decompressor.h" +#include "boot.h" #define PRNG_MODE_TDES 1 #define PRNG_MODE_SHA512 2 diff --git a/arch/s390/boot/version.c b/arch/s390/boot/version.c new file mode 100644 index 000000000000..d32e58bdda6a --- /dev/null +++ b/arch/s390/boot/version.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <generated/utsrelease.h> +#include <generated/compile.h> +#include "boot.h" + +const char kernel_version[] = UTS_RELEASE + " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") " UTS_VERSION; diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index e26d4413d34c..74e78ec5beb6 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -3,6 +3,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -18,55 +19,71 @@ CONFIG_BLK_CGROUP=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set -CONFIG_CHECKPOINT_RESTORE=y CONFIG_BPF_SYSCALL=y CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y +CONFIG_LIVEPATCH=y +CONFIG_TUNE_ZEC12=y +CONFIG_NR_CPUS=512 +CONFIG_NUMA=y +CONFIG_HZ_100=y +CONFIG_KEXEC_FILE=y +CONFIG_EXPOLINE=y +CONFIG_EXPOLINE_AUTO=y +CONFIG_CHSC_SCH=y +CONFIG_VFIO_CCW=m +CONFIG_VFIO_AP=m +CONFIG_CRASH_DUMP=y +CONFIG_HIBERNATION=y +CONFIG_PM_DEBUG=y +CONFIG_CMM=m +CONFIG_APPLDATA_BASE=y +CONFIG_KVM=m +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=m CONFIG_OPROFILE=m CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y CONFIG_STATIC_KEYS_SELFTEST=y +CONFIG_REFCOUNT_FULL=y +CONFIG_LOCK_EVENT_COUNTS=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_SHA256=y CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_WBT=y -CONFIG_BLK_WBT_SQ=y +CONFIG_BLK_CGROUP_IOLATENCY=y CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y CONFIG_BSD_DISKLABEL=y CONFIG_MINIX_SUBPARTITION=y CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_CFQ_GROUP_IOSCHED=y -CONFIG_DEFAULT_DEADLINE=y -CONFIG_LIVEPATCH=y -CONFIG_TUNE_ZEC12=y -CONFIG_NR_CPUS=512 -CONFIG_NUMA=y -CONFIG_PREEMPT=y -CONFIG_HZ_100=y -CONFIG_KEXEC_FILE=y -CONFIG_KEXEC_VERIFY_SIG=y -CONFIG_EXPOLINE=y -CONFIG_EXPOLINE_AUTO=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_BINFMT_MISC=m CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y @@ -82,17 +99,8 @@ CONFIG_ZSMALLOC=m CONFIG_ZSMALLOC_STAT=y CONFIG_DEFERRED_STRUCT_PAGE_INIT=y CONFIG_IDLE_PAGE_TRACKING=y -CONFIG_PCI=y -CONFIG_PCI_DEBUG=y -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_S390=y -CONFIG_CHSC_SCH=y -CONFIG_VFIO_AP=m -CONFIG_VFIO_CCW=m -CONFIG_CRASH_DUMP=y -CONFIG_BINFMT_MISC=m -CONFIG_HIBERNATION=y -CONFIG_PM_DEBUG=y +CONFIG_PERCPU_STATS=y +CONFIG_GUP_BENCHMARK=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=m @@ -121,9 +129,6 @@ CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m CONFIG_TCP_CONG_ADVANCED=y @@ -139,10 +144,6 @@ CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_MIP6=m -CONFIG_INET6_XFRM_MODE_TRANSPORT=m -CONFIG_INET6_XFRM_MODE_TUNNEL=m -CONFIG_INET6_XFRM_MODE_BEET=m -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m CONFIG_IPV6_VTI=m CONFIG_IPV6_SIT=m CONFIG_IPV6_GRE=m @@ -264,11 +265,8 @@ CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m CONFIG_IP_VS_FTP=m CONFIG_IP_VS_PE_SIP=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NF_TABLES_ARP=y -CONFIG_NFT_CHAIN_NAT_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -287,10 +285,7 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -309,7 +304,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_NF_TABLES_BRIDGE=y +CONFIG_NF_TABLES_BRIDGE=m CONFIG_RDS=m CONFIG_RDS_RDMA=m CONFIG_RDS_TCP=m @@ -375,9 +370,11 @@ CONFIG_NETLINK_DIAG=m CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m +CONFIG_PCI=y +CONFIG_PCI_DEBUG=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_S390=y CONFIG_DEVTMPFS=y -CONFIG_DMA_CMA=y -CONFIG_CMA_SIZE_MBYTES=0 CONFIG_CONNECTOR=y CONFIG_ZRAM=m CONFIG_BLK_DEV_LOOP=m @@ -395,7 +392,6 @@ CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m -CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=m @@ -415,17 +411,19 @@ CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_HP_SW=m CONFIG_SCSI_DH_EMC=m CONFIG_SCSI_DH_ALUA=m -CONFIG_SCSI_OSD_INITIATOR=m -CONFIG_SCSI_OSD_ULD=m CONFIG_MD=y CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=m CONFIG_MD_MULTIPATH=m CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +CONFIG_BCACHE=m CONFIG_BLK_DEV_DM=m +CONFIG_DM_UNSTRIPED=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_WRITECACHE=m CONFIG_DM_MIRROR=m CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_RAID=m @@ -445,23 +443,78 @@ CONFIG_EQUALIZER=m CONFIG_IFB=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m -CONFIG_VXLAN=m CONFIG_TUN=m CONFIG_VETH=m CONFIG_VIRTIO_NET=m CONFIG_NLMON=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_AURORA is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set # CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_GOOGLE is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_MLX4_EN=m CONFIG_MLX5_CORE=m CONFIG_MLX5_CORE_EN=y +# CONFIG_MLXFW is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_MYRI is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y CONFIG_PPPOE=m CONFIG_PPTP=m CONFIG_PPPOL2TP=m @@ -473,10 +526,13 @@ CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_LEGACY_PTY_COUNT=0 +CONFIG_NULL_TTY=m CONFIG_HW_RANDOM_VIRTIO=m CONFIG_RAW_DRIVER=m CONFIG_HANGCHECK_TIMER=m CONFIG_TN3270_FS=y +CONFIG_PPS=m +# CONFIG_PTP_1588_CLOCK is not set # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y @@ -498,8 +554,8 @@ CONFIG_VFIO_MDEV_DEVICE=m CONFIG_VIRTIO_PCI=m CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_INPUT=y -CONFIG_S390_AP_IOMMU=y CONFIG_S390_CCW_IOMMU=y +CONFIG_S390_AP_IOMMU=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y @@ -519,6 +575,7 @@ CONFIG_OCFS2_FS=m CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_BTRFS_DEBUG=y +CONFIG_BTRFS_ASSERT=y CONFIG_NILFS2_FS=m CONFIG_FS_DAX=y CONFIG_EXPORTFS_BLOCK_OPS=y @@ -552,8 +609,10 @@ CONFIG_ECRYPT_FS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y +CONFIG_SQUASHFS_ZSTD=y CONFIG_ROMFS_FS=m CONFIG_NFS_FS=m CONFIG_NFS_V3_ACL=y @@ -564,7 +623,6 @@ CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_NFSD_V4_SECURITY_LABEL=y CONFIG_CIFS=m -CONFIG_CIFS_STATS=y CONFIG_CIFS_STATS2=y CONFIG_CIFS_WEAK_PW_HASH=y CONFIG_CIFS_UPCALL=y @@ -580,19 +638,112 @@ CONFIG_NLS_ISO8859_1=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_UTF8=m CONFIG_DLM=m +CONFIG_UNICODE=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_ENCRYPTED_KEYS=m +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_FORTIFY_SOURCE=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_DH=m +CONFIG_CRYPTO_ECDH=m +CONFIG_CRYPTO_ECRDSA=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_AEGIS128=m +CONFIG_CRYPTO_AEGIS128L=m +CONFIG_CRYPTO_AEGIS256=m +CONFIG_CRYPTO_MORUS640=m +CONFIG_CRYPTO_MORUS1280=m +CONFIG_CRYPTO_CFB=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_XXHASH=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA3=m +CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m +CONFIG_CRYPTO_STATS=y +CONFIG_ZCRYPT=m +CONFIG_PKEY=m +CONFIG_CRYPTO_PAES_S390=m +CONFIG_CRYPTO_SHA1_S390=m +CONFIG_CRYPTO_SHA256_S390=m +CONFIG_CRYPTO_SHA512_S390=m +CONFIG_CRYPTO_DES_S390=m +CONFIG_CRYPTO_AES_S390=m +CONFIG_CRYPTO_GHASH_S390=m +CONFIG_CRYPTO_CRC32_S390=y +CONFIG_CORDIC=m +CONFIG_CRC32_SELFTEST=y +CONFIG_CRC4=m +CONFIG_CRC7=m +CONFIG_CRC8=m +CONFIG_RANDOM32_SELFTEST=y +CONFIG_DMA_CMA=y +CONFIG_CMA_SIZE_MBYTES=0 +CONFIG_DMA_API_DEBUG=y +CONFIG_STRING_SELFTEST=y CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_DWARF4=y CONFIG_GDB_SCRIPTS=y CONFIG_FRAME_WARN=1024 -CONFIG_READABLE_ASM=y CONFIG_UNUSED_SYMBOLS=y CONFIG_HEADERS_INSTALL=y CONFIG_HEADERS_CHECK=y CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_PAGEALLOC=y +CONFIG_PAGE_OWNER=y CONFIG_DEBUG_RODATA_TEST=y CONFIG_DEBUG_OBJECTS=y CONFIG_DEBUG_OBJECTS_SELFTEST=y @@ -645,7 +796,6 @@ CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_FUNCTION_PROFILER=y CONFIG_HIST_TRIGGERS=y -CONFIG_DMA_API_DEBUG=y CONFIG_LKDTM=m CONFIG_TEST_LIST_SORT=y CONFIG_TEST_SORT=y @@ -657,85 +807,3 @@ CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_S390_PTDUMP=y -CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_BIG_KEYS=y -CONFIG_ENCRYPTED_KEYS=m -CONFIG_SECURITY=y -CONFIG_SECURITY_NETWORK=y -CONFIG_FORTIFY_SOURCE=y -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 -CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_INTEGRITY_SIGNATURE=y -CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y -CONFIG_IMA=y -CONFIG_IMA_DEFAULT_HASH_SHA256=y -CONFIG_IMA_WRITE_POLICY=y -CONFIG_IMA_APPRAISE=y -CONFIG_CRYPTO_DH=m -CONFIG_CRYPTO_ECDH=m -CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_CHACHA20POLY1305=m -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_KEYWRAP=m -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m -CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_AES_TI=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_842=m -CONFIG_CRYPTO_LZ4=m -CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m -CONFIG_CRYPTO_USER_API_RNG=m -CONFIG_CRYPTO_USER_API_AEAD=m -CONFIG_ZCRYPT=m -CONFIG_PKEY=m -CONFIG_CRYPTO_PAES_S390=m -CONFIG_CRYPTO_SHA1_S390=m -CONFIG_CRYPTO_SHA256_S390=m -CONFIG_CRYPTO_SHA512_S390=m -CONFIG_CRYPTO_DES_S390=m -CONFIG_CRYPTO_AES_S390=m -CONFIG_CRYPTO_GHASH_S390=m -CONFIG_CRYPTO_CRC32_S390=y -CONFIG_PKCS7_MESSAGE_PARSER=y -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_CRC7=m -CONFIG_CRC8=m -CONFIG_RANDOM32_SELFTEST=y -CONFIG_CORDIC=m -CONFIG_CMM=m -CONFIG_APPLDATA_BASE=y -CONFIG_KVM=m -CONFIG_KVM_S390_UCONTROL=y -CONFIG_VHOST_NET=m -CONFIG_VHOST_VSOCK=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index e4bc40073003..68d3ca83302b 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -12,30 +12,51 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_NUMA_BALANCING=y -# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_BLK_CGROUP=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set -CONFIG_CHECKPOINT_RESTORE=y CONFIG_BPF_SYSCALL=y CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y +CONFIG_LIVEPATCH=y +CONFIG_TUNE_ZEC12=y +CONFIG_NR_CPUS=512 +CONFIG_NUMA=y +# CONFIG_NUMA_EMU is not set +CONFIG_HZ_100=y +CONFIG_KEXEC_FILE=y +CONFIG_EXPOLINE=y +CONFIG_EXPOLINE_AUTO=y +CONFIG_CHSC_SCH=y +CONFIG_VFIO_CCW=m +CONFIG_VFIO_AP=m +CONFIG_CRASH_DUMP=y +CONFIG_HIBERNATION=y +CONFIG_PM_DEBUG=y +CONFIG_CMM=m +CONFIG_APPLDATA_BASE=y +CONFIG_KVM=m +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=m CONFIG_OPROFILE=m CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y @@ -47,27 +68,18 @@ CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_SHA256=y -CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_WBT=y -CONFIG_BLK_WBT_SQ=y +CONFIG_BLK_CGROUP_IOLATENCY=y CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y CONFIG_BSD_DISKLABEL=y CONFIG_MINIX_SUBPARTITION=y CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_CFQ_GROUP_IOSCHED=y -CONFIG_DEFAULT_DEADLINE=y -CONFIG_LIVEPATCH=y -CONFIG_TUNE_ZEC12=y -CONFIG_NR_CPUS=512 -CONFIG_NUMA=y -CONFIG_HZ_100=y -CONFIG_KEXEC_FILE=y -CONFIG_KEXEC_VERIFY_SIG=y -CONFIG_EXPOLINE=y -CONFIG_EXPOLINE_AUTO=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_BINFMT_MISC=m CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y @@ -81,16 +93,8 @@ CONFIG_ZSMALLOC=m CONFIG_ZSMALLOC_STAT=y CONFIG_DEFERRED_STRUCT_PAGE_INIT=y CONFIG_IDLE_PAGE_TRACKING=y -CONFIG_PCI=y -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_S390=y -CONFIG_CHSC_SCH=y -CONFIG_VFIO_AP=m -CONFIG_VFIO_CCW=m -CONFIG_CRASH_DUMP=y -CONFIG_BINFMT_MISC=m -CONFIG_HIBERNATION=y -CONFIG_PM_DEBUG=y +CONFIG_PERCPU_STATS=y +CONFIG_GUP_BENCHMARK=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=m @@ -119,9 +123,6 @@ CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m CONFIG_TCP_CONG_ADVANCED=y @@ -137,10 +138,6 @@ CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_MIP6=m -CONFIG_INET6_XFRM_MODE_TRANSPORT=m -CONFIG_INET6_XFRM_MODE_TUNNEL=m -CONFIG_INET6_XFRM_MODE_BEET=m -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m CONFIG_IPV6_VTI=m CONFIG_IPV6_SIT=m CONFIG_IPV6_GRE=m @@ -262,11 +259,8 @@ CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m CONFIG_IP_VS_FTP=m CONFIG_IP_VS_PE_SIP=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NF_TABLES_ARP=y -CONFIG_NFT_CHAIN_NAT_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -285,10 +279,7 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -307,7 +298,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_NF_TABLES_BRIDGE=y +CONFIG_NF_TABLES_BRIDGE=m CONFIG_RDS=m CONFIG_RDS_RDMA=m CONFIG_RDS_TCP=m @@ -372,9 +363,11 @@ CONFIG_NETLINK_DIAG=m CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m +CONFIG_PCI=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_S390=y +CONFIG_UEVENT_HELPER=y CONFIG_DEVTMPFS=y -CONFIG_DMA_CMA=y -CONFIG_CMA_SIZE_MBYTES=0 CONFIG_CONNECTOR=y CONFIG_ZRAM=m CONFIG_BLK_DEV_LOOP=m @@ -383,6 +376,7 @@ CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 +# CONFIG_BLK_DEV_XPRAM is not set CONFIG_VIRTIO_BLK=y CONFIG_BLK_DEV_RBD=m CONFIG_BLK_DEV_NVME=m @@ -392,7 +386,6 @@ CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m -CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=m @@ -412,17 +405,19 @@ CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_HP_SW=m CONFIG_SCSI_DH_EMC=m CONFIG_SCSI_DH_ALUA=m -CONFIG_SCSI_OSD_INITIATOR=m -CONFIG_SCSI_OSD_ULD=m CONFIG_MD=y CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=m CONFIG_MD_MULTIPATH=m CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +CONFIG_BCACHE=m CONFIG_BLK_DEV_DM=m +CONFIG_DM_UNSTRIPED=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_WRITECACHE=m CONFIG_DM_MIRROR=m CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_RAID=m @@ -435,6 +430,7 @@ CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m CONFIG_DM_VERITY=m CONFIG_DM_SWITCH=m +CONFIG_DM_INTEGRITY=m CONFIG_NETDEVICES=y CONFIG_BONDING=m CONFIG_DUMMY=m @@ -442,23 +438,78 @@ CONFIG_EQUALIZER=m CONFIG_IFB=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m -CONFIG_VXLAN=m CONFIG_TUN=m CONFIG_VETH=m CONFIG_VIRTIO_NET=m CONFIG_NLMON=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_AURORA is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set # CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_GOOGLE is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_MLX4_EN=m CONFIG_MLX5_CORE=m CONFIG_MLX5_CORE_EN=y +# CONFIG_MLXFW is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_MYRI is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y CONFIG_PPPOE=m CONFIG_PPTP=m CONFIG_PPPOL2TP=m @@ -470,17 +521,21 @@ CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_LEGACY_PTY_COUNT=0 +CONFIG_NULL_TTY=m CONFIG_HW_RANDOM_VIRTIO=m CONFIG_RAW_DRIVER=m CONFIG_HANGCHECK_TIMER=m CONFIG_TN3270_FS=y +# CONFIG_PTP_1588_CLOCK is not set # CONFIG_HWMON is not set CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m CONFIG_DIAG288_WATCHDOG=m CONFIG_DRM=y CONFIG_DRM_VIRTIO_GPU=y +# CONFIG_BACKLIGHT_CLASS_DEVICE is not set CONFIG_FRAMEBUFFER_CONSOLE=y # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set @@ -495,8 +550,8 @@ CONFIG_VFIO_MDEV_DEVICE=m CONFIG_VIRTIO_PCI=m CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_INPUT=y -CONFIG_S390_AP_IOMMU=y CONFIG_S390_CCW_IOMMU=y +CONFIG_S390_AP_IOMMU=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y @@ -546,8 +601,10 @@ CONFIG_ECRYPT_FS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y +CONFIG_SQUASHFS_ZSTD=y CONFIG_ROMFS_FS=m CONFIG_NFS_FS=m CONFIG_NFS_V3_ACL=y @@ -558,7 +615,6 @@ CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_NFSD_V4_SECURITY_LABEL=y CONFIG_CIFS=m -CONFIG_CIFS_STATS=y CONFIG_CIFS_STATS2=y CONFIG_CIFS_WEAK_PW_HASH=y CONFIG_CIFS_UPCALL=y @@ -574,31 +630,7 @@ CONFIG_NLS_ISO8859_1=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_UTF8=m CONFIG_DLM=m -CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_INFO_DWARF4=y -CONFIG_GDB_SCRIPTS=y -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_FRAME_WARN=1024 -CONFIG_UNUSED_SYMBOLS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_PANIC_ON_OOPS=y -CONFIG_RCU_TORTURE_TEST=m -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -CONFIG_LATENCYTOP=y -CONFIG_SCHED_TRACER=y -CONFIG_FTRACE_SYSCALLS=y -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_FUNCTION_PROFILER=y -CONFIG_HIST_TRIGGERS=y -CONFIG_LKDTM=m -CONFIG_PERCPU_TEST=m -CONFIG_ATOMIC64_SELFTEST=y -CONFIG_TEST_BPF=m -CONFIG_BUG_ON_DATA_CORRUPTION=y -CONFIG_S390_PTDUMP=y +CONFIG_UNICODE=y CONFIG_PERSISTENT_KEYRINGS=y CONFIG_BIG_KEYS=y CONFIG_ENCRYPTED_KEYS=m @@ -606,7 +638,6 @@ CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_INTEGRITY_SIGNATURE=y CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y @@ -615,31 +646,42 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_APPRAISE=y CONFIG_CRYPTO_FIPS=y -CONFIG_CRYPTO_DH=m -CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_DH=m +CONFIG_CRYPTO_ECDH=m +CONFIG_CRYPTO_ECRDSA=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_AEGIS128=m +CONFIG_CRYPTO_AEGIS128L=m +CONFIG_CRYPTO_AEGIS256=m +CONFIG_CRYPTO_MORUS640=m +CONFIG_CRYPTO_MORUS1280=m +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_XXHASH=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m +CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST5=m @@ -649,16 +691,19 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m +CONFIG_CRYPTO_STATS=y CONFIG_ZCRYPT=m CONFIG_PKEY=m CONFIG_CRYPTO_PAES_S390=m @@ -669,12 +714,34 @@ CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_CRC32_S390=y +CONFIG_CORDIC=m +CONFIG_CRC4=m CONFIG_CRC7=m CONFIG_CRC8=m -CONFIG_CORDIC=m -CONFIG_CMM=m -CONFIG_APPLDATA_BASE=y -CONFIG_KVM=m -CONFIG_KVM_S390_UCONTROL=y -CONFIG_VHOST_NET=m -CONFIG_VHOST_VSOCK=m +CONFIG_DMA_CMA=y +CONFIG_CMA_SIZE_MBYTES=0 +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_GDB_SCRIPTS=y +CONFIG_FRAME_WARN=1024 +CONFIG_UNUSED_SYMBOLS=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_LATENCYTOP=y +CONFIG_SCHED_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_HIST_TRIGGERS=y +CONFIG_LKDTM=m +CONFIG_PERCPU_TEST=m +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_TEST_BPF=m +CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_S390_PTDUMP=y diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index d92bab844b73..be09a208b608 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -1,27 +1,33 @@ # CONFIG_SWAP is not set CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y +# CONFIG_CPU_ISOLATION is not set +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +# CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_COMPAT_BRK is not set -CONFIG_PARTITION_ADVANCED=y -CONFIG_IBM_PARTITION=y -CONFIG_DEFAULT_DEADLINE=y CONFIG_TUNE_ZEC12=y # CONFIG_COMPAT is not set CONFIG_NR_CPUS=2 -# CONFIG_HOTPLUG_CPU is not set CONFIG_HZ_100=y # CONFIG_ARCH_RANDOM is not set -# CONFIG_COMPACTION is not set -# CONFIG_MIGRATION is not set -# CONFIG_BOUNCE is not set -# CONFIG_CHECK_STACK is not set +# CONFIG_RELOCATABLE is not set # CONFIG_CHSC_SCH is not set # CONFIG_SCM_BUS is not set CONFIG_CRASH_DUMP=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_SECCOMP is not set +# CONFIG_PFAULT is not set +# CONFIG_S390_HYPFS_FS is not set +# CONFIG_VIRTUALIZATION is not set +# CONFIG_S390_GUEST is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_IBM_PARTITION=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_COMPACTION is not set +# CONFIG_MIGRATION is not set +# CONFIG_BOUNCE is not set CONFIG_NET=y # CONFIG_IUCV is not set CONFIG_DEVTMPFS=y @@ -43,7 +49,6 @@ CONFIG_ZFCP=y # CONFIG_HVC_IUCV is not set # CONFIG_HW_RANDOM_S390 is not set CONFIG_RAW_DRIVER=y -# CONFIG_SCLP_ASYNC is not set # CONFIG_HMC_DRV is not set # CONFIG_S390_TAPE is not set # CONFIG_VMCP is not set @@ -56,6 +61,7 @@ CONFIG_RAW_DRIVER=y CONFIG_CONFIGFS_FS=y # CONFIG_MISC_FILESYSTEMS is not set # CONFIG_NETWORK_FILESYSTEMS is not set +# CONFIG_DIMLIB is not set CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_FS=y @@ -64,7 +70,4 @@ CONFIG_PANIC_ON_OOPS=y # CONFIG_SCHED_DEBUG is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_FTRACE is not set -# CONFIG_PFAULT is not set -# CONFIG_S390_HYPFS_FS is not set -# CONFIG_VIRTUALIZATION is not set -# CONFIG_S390_GUEST is not set +# CONFIG_RUNTIME_TESTING_MENU is not set diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index 42f2375c203e..e1fcc03159ef 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -118,7 +118,7 @@ do { \ return PTR_ERR(rc); \ } while(0) -static int hpyfs_vm_create_guest(struct dentry *systems_dir, +static int hypfs_vm_create_guest(struct dentry *systems_dir, struct diag2fc_data *data) { char guest_name[NAME_LEN + 1] = {}; @@ -219,7 +219,7 @@ int hypfs_vm_create_files(struct dentry *root) } for (i = 0; i < count; i++) { - rc = hpyfs_vm_create_guest(dir, &(data[i])); + rc = hypfs_vm_create_guest(dir, &(data[i])); if (rc) goto failed; } diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 9900d655014c..b8833ac983fa 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -35,6 +35,7 @@ #include <linux/typecheck.h> #include <linux/compiler.h> +#include <linux/types.h> #include <asm/atomic_ops.h> #include <asm/barrier.h> @@ -55,7 +56,7 @@ __bitops_byte(unsigned long nr, volatile unsigned long *ptr) return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); } -static inline void set_bit(unsigned long nr, volatile unsigned long *ptr) +static inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); unsigned long mask; @@ -76,7 +77,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr) __atomic64_or(mask, (long *)addr); } -static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) +static inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); unsigned long mask; @@ -97,7 +98,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) __atomic64_and(mask, (long *)addr); } -static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) +static inline void arch_change_bit(unsigned long nr, + volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); unsigned long mask; @@ -118,8 +120,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) __atomic64_xor(mask, (long *)addr); } -static inline int -test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) +static inline bool arch_test_and_set_bit(unsigned long nr, + volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); unsigned long old, mask; @@ -129,8 +131,8 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) return (old & mask) != 0; } -static inline int -test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) +static inline bool arch_test_and_clear_bit(unsigned long nr, + volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); unsigned long old, mask; @@ -140,8 +142,8 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) return (old & ~mask) != 0; } -static inline int -test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) +static inline bool arch_test_and_change_bit(unsigned long nr, + volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); unsigned long old, mask; @@ -151,30 +153,31 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) return (old & mask) != 0; } -static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) +static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned char *addr = __bitops_byte(nr, ptr); *addr |= 1 << (nr & 7); } -static inline void -__clear_bit(unsigned long nr, volatile unsigned long *ptr) +static inline void arch___clear_bit(unsigned long nr, + volatile unsigned long *ptr) { unsigned char *addr = __bitops_byte(nr, ptr); *addr &= ~(1 << (nr & 7)); } -static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) +static inline void arch___change_bit(unsigned long nr, + volatile unsigned long *ptr) { unsigned char *addr = __bitops_byte(nr, ptr); *addr ^= 1 << (nr & 7); } -static inline int -__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) +static inline bool arch___test_and_set_bit(unsigned long nr, + volatile unsigned long *ptr) { unsigned char *addr = __bitops_byte(nr, ptr); unsigned char ch; @@ -184,8 +187,8 @@ __test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) return (ch >> (nr & 7)) & 1; } -static inline int -__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) +static inline bool arch___test_and_clear_bit(unsigned long nr, + volatile unsigned long *ptr) { unsigned char *addr = __bitops_byte(nr, ptr); unsigned char ch; @@ -195,8 +198,8 @@ __test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) return (ch >> (nr & 7)) & 1; } -static inline int -__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) +static inline bool arch___test_and_change_bit(unsigned long nr, + volatile unsigned long *ptr) { unsigned char *addr = __bitops_byte(nr, ptr); unsigned char ch; @@ -206,7 +209,8 @@ __test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) return (ch >> (nr & 7)) & 1; } -static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr) +static inline bool arch_test_bit(unsigned long nr, + const volatile unsigned long *ptr) { const volatile unsigned char *addr; @@ -215,28 +219,30 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr) return (*addr >> (nr & 7)) & 1; } -static inline int test_and_set_bit_lock(unsigned long nr, - volatile unsigned long *ptr) +static inline bool arch_test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *ptr) { - if (test_bit(nr, ptr)) + if (arch_test_bit(nr, ptr)) return 1; - return test_and_set_bit(nr, ptr); + return arch_test_and_set_bit(nr, ptr); } -static inline void clear_bit_unlock(unsigned long nr, - volatile unsigned long *ptr) +static inline void arch_clear_bit_unlock(unsigned long nr, + volatile unsigned long *ptr) { smp_mb__before_atomic(); - clear_bit(nr, ptr); + arch_clear_bit(nr, ptr); } -static inline void __clear_bit_unlock(unsigned long nr, - volatile unsigned long *ptr) +static inline void arch___clear_bit_unlock(unsigned long nr, + volatile unsigned long *ptr) { smp_mb(); - __clear_bit(nr, ptr); + arch___clear_bit(nr, ptr); } +#include <asm-generic/bitops-instrumented.h> + /* * Functions which use MSB0 bit numbering. * The bits are numbered: @@ -261,7 +267,8 @@ static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr) return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); } -static inline int test_and_clear_bit_inv(unsigned long nr, volatile unsigned long *ptr) +static inline bool test_and_clear_bit_inv(unsigned long nr, + volatile unsigned long *ptr) { return test_and_clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); } @@ -276,8 +283,8 @@ static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); } -static inline int test_bit_inv(unsigned long nr, - const volatile unsigned long *ptr) +static inline bool test_bit_inv(unsigned long nr, + const volatile unsigned long *ptr) { return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); } diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index a4d38092530a..823578c6b9e2 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -177,6 +177,8 @@ static inline int devmem_is_allowed(unsigned long pfn) #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define ARCH_ZONE_DMA_BITS 31 + #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index db5ef22c46e4..f647d565bd6d 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -28,7 +28,7 @@ * @sliba: storage list information block address * @sla: storage list address * @slsba: storage list state block address - * @akey: access key for DLIB + * @akey: access key for SLIB * @bkey: access key for SL * @ckey: access key for SBALs * @dkey: access key for SLSB @@ -50,11 +50,10 @@ struct qdesfmt0 { /** * struct qdr - queue description record (QDR) * @qfmt: queue format - * @pfmt: implementation dependent parameter format * @ac: adapter characteristics * @iqdcnt: input queue descriptor count * @oqdcnt: output queue descriptor count - * @iqdsz: inpout queue descriptor size + * @iqdsz: input queue descriptor size * @oqdsz: output queue descriptor size * @qiba: queue information block address * @qkey: queue information block key @@ -62,8 +61,7 @@ struct qdesfmt0 { */ struct qdr { u32 qfmt : 8; - u32 pfmt : 8; - u32 : 8; + u32 : 16; u32 ac : 8; u32 : 8; u32 iqdcnt : 8; @@ -327,6 +325,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, * struct qdio_initialize - qdio initialization data * @cdev: associated ccw device * @q_format: queue format + * @qdr_ac: feature flags to set * @adapter_name: name for the adapter * @qib_param_field_format: format for qib_parm_field * @qib_param_field: pointer to 128 bytes or NULL, if no param field @@ -338,6 +337,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, * @input_handler: handler to be called for input queues * @output_handler: handler to be called for output queues * @queue_start_poll_array: polling handlers (one per input queue or NULL) + * @scan_threshold: # of in-use buffers that triggers scan on output queue * @int_parm: interruption parameter * @input_sbal_addr_array: address of no_input_qs * 128 pointers * @output_sbal_addr_array: address of no_output_qs * 128 pointers diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 82deb8fc8319..70bd65724ec4 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -54,6 +54,7 @@ #define INITRD_SIZE_OFFSET 0x10410 #define OLDMEM_BASE_OFFSET 0x10418 #define OLDMEM_SIZE_OFFSET 0x10420 +#define KERNEL_VERSION_OFFSET 0x10428 #define COMMAND_LINE_OFFSET 0x10480 #ifndef __ASSEMBLY__ @@ -74,7 +75,8 @@ struct parmarea { unsigned long initrd_size; /* 0x10410 */ unsigned long oldmem_base; /* 0x10418 */ unsigned long oldmem_size; /* 0x10420 */ - char pad1[0x10480 - 0x10428]; /* 0x10428 - 0x10480 */ + unsigned long kernel_version; /* 0x10428 */ + char pad1[0x10480 - 0x10430]; /* 0x10430 - 0x10480 */ char command_line[ARCH_COMMAND_LINE_SIZE]; /* 0x10480 */ }; @@ -82,6 +84,7 @@ extern int noexec_disabled; extern int memory_end_set; extern unsigned long memory_end; extern unsigned long max_physmem_end; +extern unsigned long __swsusp_reset_dma; #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index b6755685c7b8..9e9f75ef046a 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -34,5 +34,6 @@ #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 #endif /* _ASM_S390_UNISTD_H_ */ diff --git a/arch/s390/include/uapi/asm/bpf_perf_event.h b/arch/s390/include/uapi/asm/bpf_perf_event.h index cefe7c7cd4f6..3ed42ff6da94 100644 --- a/arch/s390/include/uapi/asm/bpf_perf_event.h +++ b/arch/s390/include/uapi/asm/bpf_perf_event.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ #define _UAPI__ASM_BPF_PERF_EVENT_H__ diff --git a/arch/s390/include/uapi/asm/ipl.h b/arch/s390/include/uapi/asm/ipl.h index fd32b1cd80d2..451ba7d08905 100644 --- a/arch/s390/include/uapi/asm/ipl.h +++ b/arch/s390/include/uapi/asm/ipl.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _ASM_S390_UAPI_IPL_H #define _ASM_S390_UAPI_IPL_H diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h index 494c34c50716..8c5755f41dde 100644 --- a/arch/s390/include/uapi/asm/zcrypt.h +++ b/arch/s390/include/uapi/asm/zcrypt.h @@ -20,6 +20,7 @@ #include <linux/ioctl.h> #include <linux/compiler.h> +#include <linux/types.h> /* Name of the zcrypt device driver. */ #define ZCRYPT_NAME "zcrypt" @@ -160,17 +161,17 @@ struct ica_xcRB { * @payload_len: Payload length */ struct ep11_cprb { - uint16_t cprb_len; + __u16 cprb_len; unsigned char cprb_ver_id; unsigned char pad_000[2]; unsigned char flags; unsigned char func_id[2]; - uint32_t source_id; - uint32_t target_id; - uint32_t ret_code; - uint32_t reserved1; - uint32_t reserved2; - uint32_t payload_len; + __u32 source_id; + __u32 target_id; + __u32 ret_code; + __u32 reserved1; + __u32 reserved2; + __u32 payload_len; } __attribute__((packed)); /** @@ -179,8 +180,8 @@ struct ep11_cprb { * @dom_id: Usage domain id */ struct ep11_target_dev { - uint16_t ap_id; - uint16_t dom_id; + __u16 ap_id; + __u16 dom_id; }; /** @@ -195,14 +196,14 @@ struct ep11_target_dev { * @resp: Addr to response block */ struct ep11_urb { - uint16_t targets_num; - uint64_t targets; - uint64_t weight; - uint64_t req_no; - uint64_t req_len; - uint64_t req; - uint64_t resp_len; - uint64_t resp; + __u16 targets_num; + __u64 targets; + __u64 weight; + __u64 req_no; + __u64 req_len; + __u64 req; + __u64 resp_len; + __u64 resp; } __attribute__((packed)); /** diff --git a/arch/s390/kernel/machine_kexec_reloc.c b/arch/s390/kernel/machine_kexec_reloc.c index 1dded39239f8..3b664cb3ec4d 100644 --- a/arch/s390/kernel/machine_kexec_reloc.c +++ b/arch/s390/kernel/machine_kexec_reloc.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/elf.h> +#include <asm/kexec.h> int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val, unsigned long addr) diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c index d4e031f7b9c8..5f1fd1581330 100644 --- a/arch/s390/kernel/perf_cpum_cf_diag.c +++ b/arch/s390/kernel/perf_cpum_cf_diag.c @@ -34,7 +34,7 @@ struct cf_diag_csd { /* Counter set data per CPU */ unsigned char start[PAGE_SIZE]; /* Counter set at event start */ unsigned char data[PAGE_SIZE]; /* Counter set at event delete */ }; -DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd); +static DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd); /* Counter sets are stored as data stream in a page sized memory buffer and * exported to user space via raw data attached to the event sample data. diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index a90d3e945445..3054e9c035a3 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -437,4 +437,4 @@ 432 common fsmount sys_fsmount sys_fsmount 433 common fspick sys_fspick sys_fspick 434 common pidfd_open sys_pidfd_open sys_pidfd_open -# 435 reserved for clone3 +435 common clone3 sys_clone3 sys_clone3 diff --git a/arch/s390/lib/xor.c b/arch/s390/lib/xor.c index 96580590ccaf..29d9470dbceb 100644 --- a/arch/s390/lib/xor.c +++ b/arch/s390/lib/xor.c @@ -9,6 +9,7 @@ #include <linux/types.h> #include <linux/export.h> #include <linux/raid/xor.h> +#include <asm/xor.h> static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 63507662828f..7b0bb475c166 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -327,6 +327,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int access, case VM_FAULT_BADACCESS: if (access == VM_EXEC && signal_return(regs) == 0) break; + /* fallthrough */ case VM_FAULT_BADMAP: /* Bad memory access. Check if it is kernel or user space. */ if (user_mode(regs)) { @@ -336,7 +337,9 @@ static noinline void do_fault_error(struct pt_regs *regs, int access, do_sigsegv(regs, si_code); break; } + /* fallthrough */ case VM_FAULT_BADCONTEXT: + /* fallthrough */ case VM_FAULT_PFAULT: do_no_context(regs); break; diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 1e668b95e0c6..39c3a6e3d262 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2424,8 +2424,8 @@ EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global); * This function is assumed to be called with the guest_table_lock * held. */ -bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp, - unsigned long gaddr) +static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp, + unsigned long gaddr) { if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) return false; diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 99e06213a22b..54fcdf66ae96 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -17,8 +17,6 @@ #ifdef CONFIG_PGSTE -static int page_table_allocate_pgste_min = 0; -static int page_table_allocate_pgste_max = 1; int page_table_allocate_pgste = 0; EXPORT_SYMBOL(page_table_allocate_pgste); @@ -29,8 +27,8 @@ static struct ctl_table page_table_sysctl[] = { .maxlen = sizeof(int), .mode = S_IRUGO | S_IWUSR, .proc_handler = proc_dointvec_minmax, - .extra1 = &page_table_allocate_pgste_min, - .extra2 = &page_table_allocate_pgste_max, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, { } }; diff --git a/arch/sh/include/uapi/asm/setup.h b/arch/sh/include/uapi/asm/setup.h index 1170dd2fb998..4bd19f80f9b0 100644 --- a/arch/sh/include/uapi/asm/setup.h +++ b/arch/sh/include/uapi/asm/setup.h @@ -1,2 +1,2 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #include <asm-generic/setup.h> diff --git a/arch/sh/include/uapi/asm/types.h b/arch/sh/include/uapi/asm/types.h index f83795fdc0da..68100e108ea6 100644 --- a/arch/sh/include/uapi/asm/types.h +++ b/arch/sh/include/uapi/asm/types.h @@ -1,2 +1,2 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #include <asm-generic/types.h> diff --git a/arch/sparc/include/uapi/asm/oradax.h b/arch/sparc/include/uapi/asm/oradax.h index 64c67f2ea33f..0dace69058ab 100644 --- a/arch/sparc/include/uapi/asm/oradax.h +++ b/arch/sparc/include/uapi/asm/oradax.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */ /* * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. */ diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 2bb986f305ac..4f86928246e7 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -1443,8 +1443,12 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR, ENTRY(page_fault) ASM_CLAC - pushl $0; /* %gs's slot on the stack */ + pushl $do_page_fault + jmp common_exception_read_cr2 +END(page_fault) +common_exception_read_cr2: + /* the function address is in %gs's slot on the stack */ SAVE_ALL switch_stacks=1 skip_gs=1 ENCODE_FRAME_POINTER @@ -1452,6 +1456,7 @@ ENTRY(page_fault) /* fixup %gs */ GS_TO_REG %ecx + movl PT_GS(%esp), %edi REG_TO_PTGS %ecx SET_KERNEL_GS %ecx @@ -1463,9 +1468,9 @@ ENTRY(page_fault) TRACE_IRQS_OFF movl %esp, %eax # pt_regs pointer - call do_page_fault + CALL_NOSPEC %edi jmp ret_from_exception -END(page_fault) +END(common_exception_read_cr2) common_exception: /* the function address is in %gs's slot on the stack */ @@ -1595,7 +1600,7 @@ END(general_protection) ENTRY(async_page_fault) ASM_CLAC pushl $do_async_page_fault - jmp common_exception + jmp common_exception_read_cr2 END(async_page_fault) #endif diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 9e911a96972b..648260b5f367 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -20,7 +20,6 @@ #include <asm/intel-family.h> #include <asm/apic.h> #include <asm/cpu_device_id.h> -#include <asm/hypervisor.h> #include "../perf_event.h" @@ -263,8 +262,8 @@ static struct event_constraint intel_icl_event_constraints[] = { }; static struct extra_reg intel_icl_extra_regs[] __read_mostly = { - INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff9fffull, RSP_0), - INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff9fffull, RSP_1), + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), EVENT_EXTRA_END @@ -4053,7 +4052,7 @@ static bool check_msr(unsigned long msr, u64 mask) * Disable the check for real HW, so we don't * mess with potentionaly enabled registers: */ - if (hypervisor_is_type(X86_HYPER_NATIVE)) + if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) return true; /* @@ -4955,6 +4954,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_SKYLAKE_X: pmem = true; + /* fall through */ case INTEL_FAM6_SKYLAKE_MOBILE: case INTEL_FAM6_SKYLAKE_DESKTOP: case INTEL_FAM6_KABYLAKE_MOBILE: @@ -5004,6 +5004,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_ICELAKE_X: case INTEL_FAM6_ICELAKE_XEON_D: pmem = true; + /* fall through */ case INTEL_FAM6_ICELAKE_MOBILE: case INTEL_FAM6_ICELAKE_DESKTOP: x86_pmu.late_ack = true; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 2c8db2c19328..f1269e804e9b 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -851,7 +851,7 @@ struct event_constraint intel_skl_pebs_event_constraints[] = { struct event_constraint intel_icl_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ - INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x400000000ULL), /* SLOTS */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */ INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8282b8d41209..7b0a4ee77313 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -607,15 +607,16 @@ struct kvm_vcpu_arch { /* * QEMU userspace and the guest each have their own FPU state. - * In vcpu_run, we switch between the user, maintained in the - * task_struct struct, and guest FPU contexts. While running a VCPU, - * the VCPU thread will have the guest FPU context. + * In vcpu_run, we switch between the user and guest FPU contexts. + * While running a VCPU, the VCPU thread will have the guest FPU + * context. * * Note that while the PKRU state lives inside the fpu registers, * it is switched out separately at VMENTER and VMEXIT time. The * "guest_fpu" state here contains the guest FPU context, with the * host PRKU bits. */ + struct fpu *user_fpu; struct fpu *guest_fpu; u64 xcr0; diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h index ae91429129a6..ba71a63cdac4 100644 --- a/arch/x86/include/asm/vdso/gettimeofday.h +++ b/arch/x86/include/asm/vdso/gettimeofday.h @@ -96,6 +96,8 @@ long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) #else +#define VDSO_HAS_32BIT_FALLBACK 1 + static __always_inline long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) { @@ -114,6 +116,23 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) } static __always_inline +long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) +{ + long ret; + + asm ( + "mov %%ebx, %%edx \n" + "mov %[clock], %%ebx \n" + "call __kernel_vsyscall \n" + "mov %%edx, %%ebx \n" + : "=a" (ret), "=m" (*_ts) + : "0" (__NR_clock_gettime), [clock] "g" (_clkid), "c" (_ts) + : "edx"); + + return ret; +} + +static __always_inline long gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz) { @@ -148,6 +167,23 @@ clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) return ret; } +static __always_inline +long clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) +{ + long ret; + + asm ( + "mov %%ebx, %%edx \n" + "mov %[clock], %%ebx \n" + "call __kernel_vsyscall \n" + "mov %%edx, %%ebx \n" + : "=a" (ret), "=m" (*_ts) + : "0" (__NR_clock_getres), [clock] "g" (_clkid), "c" (_ts) + : "edx"); + + return ret; +} + #endif #ifdef CONFIG_PARAVIRT_CLOCK diff --git a/arch/x86/include/uapi/asm/byteorder.h b/arch/x86/include/uapi/asm/byteorder.h index 484e3cfd7ef2..149143cab9ff 100644 --- a/arch/x86/include/uapi/asm/byteorder.h +++ b/arch/x86/include/uapi/asm/byteorder.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _ASM_X86_BYTEORDER_H #define _ASM_X86_BYTEORDER_H diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h index 6ebaae90e207..8b2effe6efb8 100644 --- a/arch/x86/include/uapi/asm/hwcap2.h +++ b/arch/x86/include/uapi/asm/hwcap2.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _ASM_X86_HWCAP2_H #define _ASM_X86_HWCAP2_H diff --git a/arch/x86/include/uapi/asm/sigcontext32.h b/arch/x86/include/uapi/asm/sigcontext32.h index 6b18e88de8a6..7114801d0499 100644 --- a/arch/x86/include/uapi/asm/sigcontext32.h +++ b/arch/x86/include/uapi/asm/sigcontext32.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _ASM_X86_SIGCONTEXT32_H #define _ASM_X86_SIGCONTEXT32_H diff --git a/arch/x86/include/uapi/asm/types.h b/arch/x86/include/uapi/asm/types.h index df55e1ddb0c9..9d5c11a24279 100644 --- a/arch/x86/include/uapi/asm/types.h +++ b/arch/x86/include/uapi/asm/types.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _ASM_X86_TYPES_H #define _ASM_X86_TYPES_H diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 66ca906aa790..801ecd1c3fd5 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1226,7 +1226,7 @@ static ssize_t l1tf_show_state(char *buf) static ssize_t mds_show_state(char *buf) { - if (!hypervisor_is_type(X86_HYPER_NATIVE)) { + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { return sprintf(buf, "%s; SMT Host state unknown\n", mds_strings[mds_mitigation]); } diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 6c4f01540833..6f6b1d04dadf 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -549,6 +549,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = { INTEL_CNL_IDS(&gen9_early_ops), INTEL_ICL_11_IDS(&gen11_early_ops), INTEL_EHL_IDS(&gen11_early_ops), + INTEL_TGL_12_IDS(&gen11_early_ops), }; struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0); diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index a6342c899be5..f3d3e9646a99 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -193,10 +193,10 @@ ENTRY(secondary_startup_64) /* Set up %gs. * - * The base of %gs always points to the bottom of the irqstack - * union. If the stack protector canary is enabled, it is - * located at %gs:40. Note that, on SMP, the boot cpu uses - * init data section till per cpu areas are set up. + * The base of %gs always points to fixed_percpu_data. If the + * stack protector canary is enabled, it is located at %gs:40. + * Note that, on SMP, the boot cpu uses init data section until + * the per cpu areas are set up. */ movl $MSR_GS_BASE,%ecx movl initial_gs(%rip),%eax diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index c43e96a938d0..c6f791bc481e 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -827,10 +827,6 @@ int __init hpet_enable(void) if (!hpet_cfg_working()) goto out_nohpet; - /* Validate that the counter is counting */ - if (!hpet_counting()) - goto out_nohpet; - /* * Read the period and check for a sane value: */ @@ -896,6 +892,14 @@ int __init hpet_enable(void) } hpet_print_config(); + /* + * Validate that the counter is counting. This needs to be done + * after sanitizing the config registers to properly deal with + * force enabled HPETs. + */ + if (!hpet_counting()) + goto out_nohpet; + clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); if (id & HPET_ID_LEGSUP) { diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 4f36d3241faf..2d6898c2cb64 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -100,7 +100,7 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) { int ret; - if (!access_ok(fp, sizeof(*frame))) + if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE)) return 0; ret = 1; diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c index 8eb67a670b10..653b7f617b61 100644 --- a/arch/x86/kernel/sysfb_efi.c +++ b/arch/x86/kernel/sysfb_efi.c @@ -230,9 +230,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = { {}, }; +/* + * Some devices have a portrait LCD but advertise a landscape resolution (and + * pitch). We simply swap width and height for these devices so that we can + * correctly deal with some of them coming with multiple resolutions. + */ +static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = { + { + /* + * Lenovo MIIX310-10ICR, only some batches have the troublesome + * 800x1280 portrait screen. Luckily the portrait version has + * its own BIOS version, so we match on that. + */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"), + DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"), + }, + }, + { + /* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, + "Lenovo MIIX 320-10ICR"), + }, + }, + { + /* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, + "Lenovo ideapad D330-10IGM"), + }, + }, + {}, +}; + __init void sysfb_apply_efi_quirks(void) { if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS)) dmi_check_system(efifb_dmi_system_table); + + if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && + dmi_check_system(efifb_dmi_swap_width_height)) { + u16 temp = screen_info.lfb_width; + + screen_info.lfb_width = screen_info.lfb_height; + screen_info.lfb_height = temp; + screen_info.lfb_linelength = 4 * screen_info.lfb_width; + } } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8f72526e2f68..24843cf49579 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3466,7 +3466,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, /* * Currently, fast page fault only works for direct mapping * since the gfn is not stable for indirect shadow page. See - * Documentation/virtual/kvm/locking.txt to get more detail. + * Documentation/virt/kvm/locking.txt to get more detail. */ fault_handled = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte, diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 19f69df96758..7eafc6907861 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2143,12 +2143,20 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) goto out; } + svm->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, + GFP_KERNEL_ACCOUNT); + if (!svm->vcpu.arch.user_fpu) { + printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n"); + err = -ENOMEM; + goto free_partial_svm; + } + svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL_ACCOUNT); if (!svm->vcpu.arch.guest_fpu) { printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n"); err = -ENOMEM; - goto free_partial_svm; + goto free_user_fpu; } err = kvm_vcpu_init(&svm->vcpu, kvm, id); @@ -2211,6 +2219,8 @@ uninit: kvm_vcpu_uninit(&svm->vcpu); free_svm: kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); +free_user_fpu: + kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu); free_partial_svm: kmem_cache_free(kvm_vcpu_cache, svm); out: @@ -2241,6 +2251,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) __free_page(virt_to_page(svm->nested.hsave)); __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); + kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu); kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); kmem_cache_free(kvm_vcpu_cache, svm); } diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 0f1378789bd0..ced9fba32598 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -220,6 +220,8 @@ static void free_nested(struct kvm_vcpu *vcpu) if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) return; + kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, vcpu); + vmx->nested.vmxon = false; vmx->nested.smm.vmxon = false; free_vpid(vmx->nested.vpid02); @@ -232,7 +234,9 @@ static void free_nested(struct kvm_vcpu *vcpu) vmx->vmcs01.shadow_vmcs = NULL; } kfree(vmx->nested.cached_vmcs12); + vmx->nested.cached_vmcs12 = NULL; kfree(vmx->nested.cached_shadow_vmcs12); + vmx->nested.cached_shadow_vmcs12 = NULL; /* Unpin physical memory we referred to in the vmcs02 */ if (vmx->nested.apic_access_page) { kvm_release_page_dirty(vmx->nested.apic_access_page); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index a279447eb75b..074385c86c09 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6598,6 +6598,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) free_loaded_vmcs(vmx->loaded_vmcs); kfree(vmx->guest_msrs); kvm_vcpu_uninit(vcpu); + kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu); kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); kmem_cache_free(kvm_vcpu_cache, vmx); } @@ -6613,12 +6614,20 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) if (!vmx) return ERR_PTR(-ENOMEM); + vmx->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, + GFP_KERNEL_ACCOUNT); + if (!vmx->vcpu.arch.user_fpu) { + printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n"); + err = -ENOMEM; + goto free_partial_vcpu; + } + vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL_ACCOUNT); if (!vmx->vcpu.arch.guest_fpu) { printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n"); err = -ENOMEM; - goto free_partial_vcpu; + goto free_user_fpu; } vmx->vpid = allocate_vpid(); @@ -6721,6 +6730,8 @@ uninit_vcpu: free_vcpu: free_vpid(vmx->vpid); kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); +free_user_fpu: + kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu); free_partial_vcpu: kmem_cache_free(kvm_vcpu_cache, vmx); return ERR_PTR(err); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 58305cf81182..c6d951cbd76c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3306,6 +3306,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_x86_ops->vcpu_load(vcpu, cpu); + fpregs_assert_state_consistent(); + if (test_thread_flag(TIF_NEED_FPU_LOAD)) + switch_fpu_return(); + /* Apply any externally detected TSC adjustments (due to suspend) */ if (unlikely(vcpu->arch.tsc_offset_adjustment)) { adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); @@ -7202,7 +7206,7 @@ static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id) rcu_read_unlock(); - if (target) + if (target && READ_ONCE(target->ready)) kvm_vcpu_yield_to(target); } @@ -7242,6 +7246,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) break; case KVM_HC_KICK_CPU: kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); + kvm_sched_yield(vcpu->kvm, a1); ret = 0; break; #ifdef CONFIG_X86_64 @@ -7990,9 +7995,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) trace_kvm_entry(vcpu->vcpu_id); guest_enter_irqoff(); - fpregs_assert_state_consistent(); - if (test_thread_flag(TIF_NEED_FPU_LOAD)) - switch_fpu_return(); + /* The preempt notifier should have taken care of the FPU already. */ + WARN_ON_ONCE(test_thread_flag(TIF_NEED_FPU_LOAD)); if (unlikely(vcpu->arch.switch_db_regs)) { set_debugreg(0, 7); @@ -8270,7 +8274,7 @@ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { fpregs_lock(); - copy_fpregs_to_fpstate(¤t->thread.fpu); + copy_fpregs_to_fpstate(vcpu->arch.user_fpu); /* PKRU is separately restored in kvm_x86_ops->run. */ __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state, ~XFEATURE_MASK_PKRU); @@ -8287,7 +8291,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) fpregs_lock(); copy_fpregs_to_fpstate(vcpu->arch.guest_fpu); - copy_kernel_to_fpregs(¤t->thread.fpu.state); + copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state); fpregs_mark_activate(); fpregs_unlock(); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 6c46095cd0d9..9ceacd1156db 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -177,13 +177,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); - if (!pmd_present(*pmd_k)) - return NULL; - if (!pmd_present(*pmd)) + if (pmd_present(*pmd) != pmd_present(*pmd_k)) set_pmd(pmd, *pmd_k); + + if (!pmd_present(*pmd_k)) + return NULL; else - BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); + BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k)); return pmd_k; } @@ -203,17 +204,13 @@ void vmalloc_sync_all(void) spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { spinlock_t *pgt_lock; - pmd_t *ret; /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); - ret = vmalloc_sync_one(page_address(page), address); + vmalloc_sync_one(page_address(page), address); spin_unlock(pgt_lock); - - if (!ret) - break; } spin_unlock(&pgd_lock); } diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S index 60c220020054..80828b95a51f 100644 --- a/arch/xtensa/kernel/coprocessor.S +++ b/arch/xtensa/kernel/coprocessor.S @@ -14,6 +14,7 @@ #include <linux/linkage.h> #include <asm/asm-offsets.h> +#include <asm/asmmacro.h> #include <asm/processor.h> #include <asm/coprocessor.h> #include <asm/thread_info.h> diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 72860325245a..586fcfe227ea 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -3354,38 +3354,57 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq) * there is no active group, then the primary expectation for * this device is probably a high throughput. * - * We are now left only with explaining the additional - * compound condition that is checked below for deciding - * whether the scenario is asymmetric. To explain this - * compound condition, we need to add that the function + * We are now left only with explaining the two sub-conditions in the + * additional compound condition that is checked below for deciding + * whether the scenario is asymmetric. To explain the first + * sub-condition, we need to add that the function * bfq_asymmetric_scenario checks the weights of only - * non-weight-raised queues, for efficiency reasons (see - * comments on bfq_weights_tree_add()). Then the fact that - * bfqq is weight-raised is checked explicitly here. More - * precisely, the compound condition below takes into account - * also the fact that, even if bfqq is being weight-raised, - * the scenario is still symmetric if all queues with requests - * waiting for completion happen to be - * weight-raised. Actually, we should be even more precise - * here, and differentiate between interactive weight raising - * and soft real-time weight raising. + * non-weight-raised queues, for efficiency reasons (see comments on + * bfq_weights_tree_add()). Then the fact that bfqq is weight-raised + * is checked explicitly here. More precisely, the compound condition + * below takes into account also the fact that, even if bfqq is being + * weight-raised, the scenario is still symmetric if all queues with + * requests waiting for completion happen to be + * weight-raised. Actually, we should be even more precise here, and + * differentiate between interactive weight raising and soft real-time + * weight raising. + * + * The second sub-condition checked in the compound condition is + * whether there is a fair amount of already in-flight I/O not + * belonging to bfqq. If so, I/O dispatching is to be plugged, for the + * following reason. The drive may decide to serve in-flight + * non-bfqq's I/O requests before bfqq's ones, thereby delaying the + * arrival of new I/O requests for bfqq (recall that bfqq is sync). If + * I/O-dispatching is not plugged, then, while bfqq remains empty, a + * basically uncontrolled amount of I/O from other queues may be + * dispatched too, possibly causing the service of bfqq's I/O to be + * delayed even longer in the drive. This problem gets more and more + * serious as the speed and the queue depth of the drive grow, + * because, as these two quantities grow, the probability to find no + * queue busy but many requests in flight grows too. By contrast, + * plugging I/O dispatching minimizes the delay induced by already + * in-flight I/O, and enables bfqq to recover the bandwidth it may + * lose because of this delay. * * As a side note, it is worth considering that the above - * device-idling countermeasures may however fail in the - * following unlucky scenario: if idling is (correctly) - * disabled in a time period during which all symmetry - * sub-conditions hold, and hence the device is allowed to - * enqueue many requests, but at some later point in time some - * sub-condition stops to hold, then it may become impossible - * to let requests be served in the desired order until all - * the requests already queued in the device have been served. + * device-idling countermeasures may however fail in the following + * unlucky scenario: if I/O-dispatch plugging is (correctly) disabled + * in a time period during which all symmetry sub-conditions hold, and + * therefore the device is allowed to enqueue many requests, but at + * some later point in time some sub-condition stops to hold, then it + * may become impossible to make requests be served in the desired + * order until all the requests already queued in the device have been + * served. The last sub-condition commented above somewhat mitigates + * this problem for weight-raised queues. */ static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd, struct bfq_queue *bfqq) { return (bfqq->wr_coeff > 1 && - bfqd->wr_busy_queues < - bfq_tot_busy_queues(bfqd)) || + (bfqd->wr_busy_queues < + bfq_tot_busy_queues(bfqd) || + bfqd->rq_in_driver >= + bfqq->dispatched + 4)) || bfq_asymmetric_scenario(bfqd, bfqq); } diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 24ed26957367..55a7dc227dfb 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -54,7 +54,7 @@ static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ -static bool blkcg_debug_stats = false; +bool blkcg_debug_stats = false; static struct workqueue_struct *blkcg_punt_bio_wq; static bool blkcg_policy_enabled(struct request_queue *q, @@ -944,10 +944,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) dbytes, dios); } - if (!blkcg_debug_stats) - goto next; - - if (atomic_read(&blkg->use_delay)) { + if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { has_stats = true; off += scnprintf(buf+off, size-off, " use_delay=%d delay_nsec=%llu", @@ -967,7 +964,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) has_stats = true; off += written; } -next: + if (has_stats) { if (off < size - 1) { off += scnprintf(buf+off, size-off, "\n"); diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index d973c38ee4fd..0fff7b56df0e 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -917,6 +917,9 @@ static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf, unsigned long long avg_lat; unsigned long long cur_win; + if (!blkcg_debug_stats) + return 0; + if (iolat->ssd) return iolatency_ssd_stat(iolat, buf, size); diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index cf22ab00fefb..126021fc3a11 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -61,15 +61,6 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now) e->type->ops.completed_request(rq, now); } -static inline void blk_mq_sched_started_request(struct request *rq) -{ - struct request_queue *q = rq->q; - struct elevator_queue *e = q->elevator; - - if (e && e->type->ops.started_request) - e->type->ops.started_request(rq); -} - static inline void blk_mq_sched_requeue_request(struct request *rq) { struct request_queue *q = rq->q; diff --git a/block/blk-mq.c b/block/blk-mq.c index b038ec680e84..f78d3287dd82 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -669,8 +669,6 @@ void blk_mq_start_request(struct request *rq) { struct request_queue *q = rq->q; - blk_mq_sched_started_request(rq); - trace_block_rq_issue(q, rq); if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { @@ -1960,9 +1958,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) rq = blk_mq_get_request(q, bio, &data); if (unlikely(!rq)) { rq_qos_cleanup(q, bio); - if (bio->bi_opf & REQ_NOWAIT) + + cookie = BLK_QC_T_NONE; + if (bio->bi_opf & REQ_NOWAIT_INLINE) + cookie = BLK_QC_T_EAGAIN; + else if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); - return BLK_QC_T_NONE; + return cookie; } trace_block_getrq(q, bio, bio->bi_opf); diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c index 659ccb8b693f..3954c0dc1443 100644 --- a/block/blk-rq-qos.c +++ b/block/blk-rq-qos.c @@ -202,6 +202,7 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr, return -1; data->got_token = true; + smp_wmb(); list_del_init(&curr->entry); wake_up_process(data->task); return 1; @@ -244,7 +245,9 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data, return; prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE); + has_sleeper = !wq_has_single_sleeper(&rqw->wait); do { + /* The memory barrier in set_task_state saves us here. */ if (data.got_token) break; if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) { @@ -255,12 +258,14 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data, * which means we now have two. Put our local token * and wake anyone else potentially waiting for one. */ + smp_rmb(); if (data.got_token) cleanup_cb(rqw, private_data); break; } io_schedule(); - has_sleeper = false; + has_sleeper = true; + set_current_state(TASK_UNINTERRUPTIBLE); } while (1); finish_wait(&rqw->wait, &data.wq); } diff --git a/block/blk-settings.c b/block/blk-settings.c index 2ae348c101a0..2c1831207a8f 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -752,7 +752,8 @@ void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) * page (which might not be idential to the Linux PAGE_SIZE). Because * of that they are not limited by our notion of "segment size". */ - q->limits.max_segment_size = UINT_MAX; + if (mask) + q->limits.max_segment_size = UINT_MAX; } EXPORT_SYMBOL(blk_queue_virt_boundary); diff --git a/block/genhd.c b/block/genhd.c index 97887e59f3b2..54f1f0d381f4 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1969,7 +1969,7 @@ static const struct attribute *disk_events_attrs[] = { * The default polling interval can be specified by the kernel * parameter block.events_dfl_poll_msecs which defaults to 0 * (disable). This can also be modified runtime by writing to - * /sys/module/block/events_dfl_poll_msecs. + * /sys/module/block/parameters/events_dfl_poll_msecs. */ static int disk_events_set_dfl_poll_msecs(const char *val, const struct kernel_param *kp) diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index d4551e33fa71..8569b79e8b58 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -611,8 +611,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id, /* Move to ITS specific data */ its = (struct acpi_iort_its_group *)node->node_data; - if (idx > its->its_count) { - dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n", + if (idx >= its->its_count) { + dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n", idx, its->its_count); return -ENXIO; } diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 28cffaaf9d82..f616b16c1f0b 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -232,13 +232,15 @@ int acpi_device_set_power(struct acpi_device *device, int state) if (device->power.flags.power_resources) result = acpi_power_transition(device, target_state); } else { + int cur_state = device->power.state; + if (device->power.flags.power_resources) { result = acpi_power_transition(device, ACPI_STATE_D0); if (result) goto end; } - if (device->power.state == ACPI_STATE_D0) { + if (cur_state == ACPI_STATE_D0) { int psc; /* Nothing to do here if _PSC is not present. */ diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index c02fa27dd3f3..1413324982f0 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1282,7 +1282,7 @@ static ssize_t hw_error_scrub_store(struct device *dev, if (rc) return rc; - device_lock(dev); + nfit_device_lock(dev); nd_desc = dev_get_drvdata(dev); if (nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); @@ -1299,7 +1299,7 @@ static ssize_t hw_error_scrub_store(struct device *dev, break; } } - device_unlock(dev); + nfit_device_unlock(dev); if (rc) return rc; return size; @@ -1319,7 +1319,7 @@ static ssize_t scrub_show(struct device *dev, ssize_t rc = -ENXIO; bool busy; - device_lock(dev); + nfit_device_lock(dev); nd_desc = dev_get_drvdata(dev); if (!nd_desc) { device_unlock(dev); @@ -1339,7 +1339,7 @@ static ssize_t scrub_show(struct device *dev, } mutex_unlock(&acpi_desc->init_mutex); - device_unlock(dev); + nfit_device_unlock(dev); return rc; } @@ -1356,14 +1356,14 @@ static ssize_t scrub_store(struct device *dev, if (val != 1) return -EINVAL; - device_lock(dev); + nfit_device_lock(dev); nd_desc = dev_get_drvdata(dev); if (nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); } - device_unlock(dev); + nfit_device_unlock(dev); if (rc) return rc; return size; @@ -1749,9 +1749,9 @@ static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) struct acpi_device *adev = data; struct device *dev = &adev->dev; - device_lock(dev->parent); + nfit_device_lock(dev->parent); __acpi_nvdimm_notify(dev, event); - device_unlock(dev->parent); + nfit_device_unlock(dev->parent); } static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) @@ -3457,8 +3457,8 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) struct device *dev = acpi_desc->dev; /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ - device_lock(dev); - device_unlock(dev); + nfit_device_lock(dev); + nfit_device_unlock(dev); /* Bounce the init_mutex to complete initial registration */ mutex_lock(&acpi_desc->init_mutex); @@ -3602,8 +3602,8 @@ void acpi_nfit_shutdown(void *data) * acpi_nfit_ars_rescan() submissions have had a chance to * either submit or see ->cancel set. */ - device_lock(bus_dev); - device_unlock(bus_dev); + nfit_device_lock(bus_dev); + nfit_device_unlock(bus_dev); flush_workqueue(nfit_wq); } @@ -3746,9 +3746,9 @@ EXPORT_SYMBOL_GPL(__acpi_nfit_notify); static void acpi_nfit_notify(struct acpi_device *adev, u32 event) { - device_lock(&adev->dev); + nfit_device_lock(&adev->dev); __acpi_nfit_notify(&adev->dev, adev->handle, event); - device_unlock(&adev->dev); + nfit_device_unlock(&adev->dev); } static const struct acpi_device_id acpi_nfit_ids[] = { diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 6ee2b02af73e..24241941181c 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -312,6 +312,30 @@ static inline struct acpi_nfit_desc *to_acpi_desc( return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); } +#ifdef CONFIG_PROVE_LOCKING +static inline void nfit_device_lock(struct device *dev) +{ + device_lock(dev); + mutex_lock(&dev->lockdep_mutex); +} + +static inline void nfit_device_unlock(struct device *dev) +{ + mutex_unlock(&dev->lockdep_mutex); + device_unlock(dev); +} +#else +static inline void nfit_device_lock(struct device *dev) +{ + device_lock(dev); +} + +static inline void nfit_device_unlock(struct device *dev) +{ + device_unlock(dev); +} +#endif + const guid_t *to_nfit_uuid(enum nfit_uuids id); int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz); void acpi_nfit_shutdown(void *data); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 0e28270b0fd8..aad6be5c0af0 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2204,6 +2204,12 @@ int __init acpi_scan_init(void) acpi_gpe_apply_masked_gpes(); acpi_update_all_gpes(); + /* + * Although we call __add_memory() that is documented to require the + * device_hotplug_lock, it is not necessary here because this is an + * early code when userspace or any other code path cannot trigger + * hotplug/hotunplug operations. + */ mutex_lock(&acpi_scan_lock); /* * Enumerate devices in the ACPI namespace. diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 38a59a630cd4..dc1c83eafc22 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2988,7 +2988,7 @@ static void binder_transaction(struct binder_proc *proc, else return_error = BR_DEAD_REPLY; mutex_unlock(&context->context_mgr_node_lock); - if (target_node && target_proc == proc) { + if (target_node && target_proc->pid == proc->pid) { binder_user_error("%d:%d got transaction to context manager from process owning it\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; @@ -3239,7 +3239,8 @@ static void binder_transaction(struct binder_proc *proc, buffer_offset = off_start_offset; off_end_offset = off_start_offset + tr->offsets_size; sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); - sg_buf_end_offset = sg_buf_offset + extra_buffers_size; + sg_buf_end_offset = sg_buf_offset + extra_buffers_size - + ALIGN(secctx_sz, sizeof(u64)); off_min = 0; for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; buffer_offset += sizeof(binder_size_t)) { diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 72312ad2e142..9e9583a6bba9 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -338,6 +338,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port, hpriv->phys[port] = NULL; rc = 0; break; + case -EPROBE_DEFER: + /* Do not complain yet */ + break; default: dev_err(dev, @@ -408,7 +411,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev, hpriv->mmio = devm_ioremap_resource(dev, platform_get_resource(pdev, IORESOURCE_MEM, 0)); if (IS_ERR(hpriv->mmio)) { - dev_err(dev, "no mmio space\n"); rc = PTR_ERR(hpriv->mmio); goto err_out; } diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c index 173e6f2dd9af..eefda51f97d3 100644 --- a/drivers/ata/libata-zpodd.c +++ b/drivers/ata/libata-zpodd.c @@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) unsigned int ret; struct rm_feature_desc *desc; struct ata_taskfile tf; - static const char cdb[] = { GPCMD_GET_CONFIGURATION, + static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION, 2, /* only 1 feature descriptor requested */ 0, 3, /* 3, removable medium feature */ 0, 0, 0,/* reserved */ diff --git a/drivers/base/core.c b/drivers/base/core.c index da84a73f2ba6..636058bbf48a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1663,6 +1663,9 @@ void device_initialize(struct device *dev) kobject_init(&dev->kobj, &device_ktype); INIT_LIST_HEAD(&dev->dma_pools); mutex_init(&dev->mutex); +#ifdef CONFIG_PROVE_LOCKING + mutex_init(&dev->lockdep_mutex); +#endif lockdep_set_novalidate_class(&dev->mutex); spin_lock_init(&dev->devres_lock); INIT_LIST_HEAD(&dev->devres_head); @@ -2211,6 +2214,24 @@ void put_device(struct device *dev) } EXPORT_SYMBOL_GPL(put_device); +bool kill_device(struct device *dev) +{ + /* + * Require the device lock and set the "dead" flag to guarantee that + * the update behavior is consistent with the other bitfields near + * it and that we cannot have an asynchronous probe routine trying + * to run while we are tearing out the bus/class/sysfs from + * underneath the device. + */ + lockdep_assert_held(&dev->mutex); + + if (dev->p->dead) + return false; + dev->p->dead = true; + return true; +} +EXPORT_SYMBOL_GPL(kill_device); + /** * device_del - delete device from system. * @dev: device. @@ -2230,15 +2251,8 @@ void device_del(struct device *dev) struct kobject *glue_dir = NULL; struct class_interface *class_intf; - /* - * Hold the device lock and set the "dead" flag to guarantee that - * the update behavior is consistent with the other bitfields near - * it and that we cannot have an asynchronous probe routine trying - * to run while we are tearing out the bus/class/sysfs from - * underneath the device. - */ device_lock(dev); - dev->p->dead = true; + kill_device(dev); device_unlock(dev); /* Notify clients of device removal. This call must come diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h index 7048a41973ed..7ecd590e67fe 100644 --- a/drivers/base/firmware_loader/firmware.h +++ b/drivers/base/firmware_loader/firmware.h @@ -141,8 +141,8 @@ int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed); int fw_map_paged_buf(struct fw_priv *fw_priv); #else static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {} -int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; } -int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; } +static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; } +static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; } #endif #endif /* __FIRMWARE_LOADER_H */ diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 85f20e371f2f..bd7d3bb8b890 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1726,6 +1726,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, /* MSch: invalidate default_params */ default_params[drive].blocks = 0; set_capacity(floppy->disk, MAX_DISK_SIZE * 2); + /* Fall through */ case FDFMTEND: case FDFLUSH: /* invalidate the buffer track to force a reread */ diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 90ebfcae0ce6..2b3103c30857 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -5417,7 +5417,7 @@ static int drbd_do_auth(struct drbd_connection *connection) unsigned int key_len; char secret[SHARED_SECRET_MAX]; /* 64 byte */ unsigned int resp_size; - SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm); + struct shash_desc *desc; struct packet_info pi; struct net_conf *nc; int err, rv; @@ -5430,6 +5430,13 @@ static int drbd_do_auth(struct drbd_connection *connection) memcpy(secret, nc->shared_secret, key_len); rcu_read_unlock(); + desc = kmalloc(sizeof(struct shash_desc) + + crypto_shash_descsize(connection->cram_hmac_tfm), + GFP_KERNEL); + if (!desc) { + rv = -1; + goto fail; + } desc->tfm = connection->cram_hmac_tfm; rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len); @@ -5571,7 +5578,10 @@ static int drbd_do_auth(struct drbd_connection *connection) kfree(peers_ch); kfree(response); kfree(right_response); - shash_desc_zero(desc); + if (desc) { + shash_desc_zero(desc); + kfree(desc); + } return rv; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 44c9985f352a..3036883fc9f8 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -924,6 +924,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, struct file *file; struct inode *inode; struct address_space *mapping; + struct block_device *claimed_bdev = NULL; int lo_flags = 0; int error; loff_t size; @@ -942,10 +943,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, * here to avoid changing device under exclusive owner. */ if (!(mode & FMODE_EXCL)) { - bdgrab(bdev); - error = blkdev_get(bdev, mode | FMODE_EXCL, loop_set_fd); - if (error) + claimed_bdev = bd_start_claiming(bdev, loop_set_fd); + if (IS_ERR(claimed_bdev)) { + error = PTR_ERR(claimed_bdev); goto out_putf; + } } error = mutex_lock_killable(&loop_ctl_mutex); @@ -1015,15 +1017,15 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, mutex_unlock(&loop_ctl_mutex); if (partscan) loop_reread_partitions(lo, bdev); - if (!(mode & FMODE_EXCL)) - blkdev_put(bdev, mode | FMODE_EXCL); + if (claimed_bdev) + bd_abort_claiming(bdev, claimed_bdev, loop_set_fd); return 0; out_unlock: mutex_unlock(&loop_ctl_mutex); out_bdev: - if (!(mode & FMODE_EXCL)) - blkdev_put(bdev, mode | FMODE_EXCL); + if (claimed_bdev) + bd_abort_claiming(bdev, claimed_bdev, loop_set_fd); out_putf: fput(file); out: diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9bcde2325893..e21d2ded732b 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1231,7 +1231,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd, struct block_device *bdev) { sock_shutdown(nbd); - kill_bdev(bdev); + __invalidate_device(bdev, true); nbd_bdev_reset(bdev); if (test_and_clear_bit(NBD_HAS_CONFIG_REF, &nbd->config->runtime_flags)) diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c index a55be205b91a..dbfe34664633 100644 --- a/drivers/bluetooth/hci_ath.c +++ b/drivers/bluetooth/hci_ath.c @@ -98,6 +98,9 @@ static int ath_open(struct hci_uart *hu) BT_DBG("hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + ath = kzalloc(sizeof(*ath), GFP_KERNEL); if (!ath) return -ENOMEM; diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 8905ad2edde7..ae2624fce913 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -406,6 +406,9 @@ static int bcm_open(struct hci_uart *hu) bt_dev_dbg(hu->hdev, "hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + bcm = kzalloc(sizeof(*bcm), GFP_KERNEL); if (!bcm) return -ENOMEM; diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 207bae5e0d46..31f25153087d 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -391,6 +391,9 @@ static int intel_open(struct hci_uart *hu) BT_DBG("hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + intel = kzalloc(sizeof(*intel), GFP_KERNEL); if (!intel) return -ENOMEM; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 8950e07889fe..85a30fb9177b 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -292,6 +292,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return 0; } +/* Check the underlying device or tty has flow control support */ +bool hci_uart_has_flow_control(struct hci_uart *hu) +{ + /* serdev nodes check if the needed operations are present */ + if (hu->serdev) + return true; + + if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset) + return true; + + return false; +} + /* Flow control or un-flow control the device */ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable) { diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c index f98e5cc343b2..fbc3f7c3a5c7 100644 --- a/drivers/bluetooth/hci_mrvl.c +++ b/drivers/bluetooth/hci_mrvl.c @@ -59,6 +59,9 @@ static int mrvl_open(struct hci_uart *hu) BT_DBG("hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL); if (!mrvl) return -ENOMEM; diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 9a5c9c1f9484..82a0a3691a63 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -473,6 +473,9 @@ static int qca_open(struct hci_uart *hu) BT_DBG("hu %p qca_open", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); if (!qca) return -ENOMEM; diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index f11af3912ce6..6ab631101019 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -104,6 +104,7 @@ int hci_uart_wait_until_sent(struct hci_uart *hu); int hci_uart_init_ready(struct hci_uart *hu); void hci_uart_init_work(struct work_struct *work); void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed); +bool hci_uart_has_flow_control(struct hci_uart *hu); void hci_uart_set_flow_control(struct hci_uart *hu, bool enable); void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed, unsigned int oper_speed); diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 5c39f20378b8..9ac6671bb514 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -567,8 +567,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets, unsigned long long m; m = hpets->hp_tick_freq + (dis >> 1); - do_div(m, dis); - return (unsigned long)m; + return div64_ul(m, dis); } static int diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c index 57204335c5f5..285e0b8f9a97 100644 --- a/drivers/char/ipmi/ipmb_dev_int.c +++ b/drivers/char/ipmi/ipmb_dev_int.c @@ -76,7 +76,7 @@ static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count, struct ipmb_dev *ipmb_dev = to_ipmb_dev(file); struct ipmb_request_elem *queue_elem; struct ipmb_msg msg; - ssize_t ret; + ssize_t ret = 0; memset(&msg, 0, sizeof(msg)); diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index d47ad10a35fe..4838c6a9f0f2 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -77,6 +77,18 @@ static int tpm_go_idle(struct tpm_chip *chip) return chip->ops->go_idle(chip); } +static void tpm_clk_enable(struct tpm_chip *chip) +{ + if (chip->ops->clk_enable) + chip->ops->clk_enable(chip, true); +} + +static void tpm_clk_disable(struct tpm_chip *chip) +{ + if (chip->ops->clk_enable) + chip->ops->clk_enable(chip, false); +} + /** * tpm_chip_start() - power on the TPM * @chip: a TPM chip to use @@ -89,13 +101,12 @@ int tpm_chip_start(struct tpm_chip *chip) { int ret; - if (chip->ops->clk_enable) - chip->ops->clk_enable(chip, true); + tpm_clk_enable(chip); if (chip->locality == -1) { ret = tpm_request_locality(chip); if (ret) { - chip->ops->clk_enable(chip, false); + tpm_clk_disable(chip); return ret; } } @@ -103,8 +114,7 @@ int tpm_chip_start(struct tpm_chip *chip) ret = tpm_cmd_ready(chip); if (ret) { tpm_relinquish_locality(chip); - if (chip->ops->clk_enable) - chip->ops->clk_enable(chip, false); + tpm_clk_disable(chip); return ret; } @@ -124,8 +134,7 @@ void tpm_chip_stop(struct tpm_chip *chip) { tpm_go_idle(chip); tpm_relinquish_locality(chip); - if (chip->ops->clk_enable) - chip->ops->clk_enable(chip, false); + tpm_clk_disable(chip); } EXPORT_SYMBOL_GPL(tpm_chip_stop); @@ -545,6 +554,20 @@ static int tpm_add_hwrng(struct tpm_chip *chip) return hwrng_register(&chip->hwrng); } +static int tpm_get_pcr_allocation(struct tpm_chip *chip) +{ + int rc; + + rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ? + tpm2_get_pcr_allocation(chip) : + tpm1_get_pcr_allocation(chip); + + if (rc > 0) + return -ENODEV; + + return rc; +} + /* * tpm_chip_register() - create a character device for the TPM chip * @chip: TPM chip to use. @@ -564,6 +587,12 @@ int tpm_chip_register(struct tpm_chip *chip) if (rc) return rc; rc = tpm_auto_startup(chip); + if (rc) { + tpm_chip_stop(chip); + return rc; + } + + rc = tpm_get_pcr_allocation(chip); tpm_chip_stop(chip); if (rc) return rc; diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index e503ffc3aa39..a7fea3e0ca86 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -394,6 +394,7 @@ int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf); ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap, const char *desc, size_t min_cap_length); int tpm1_get_random(struct tpm_chip *chip, u8 *out, size_t max); +int tpm1_get_pcr_allocation(struct tpm_chip *chip); unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); int tpm_pm_suspend(struct device *dev); int tpm_pm_resume(struct device *dev); @@ -449,6 +450,7 @@ int tpm2_unseal_trusted(struct tpm_chip *chip, ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, const char *desc); +ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip); int tpm2_auto_startup(struct tpm_chip *chip); void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c index faacbe1ffa1a..149e953ca369 100644 --- a/drivers/char/tpm/tpm1-cmd.c +++ b/drivers/char/tpm/tpm1-cmd.c @@ -699,18 +699,6 @@ int tpm1_auto_startup(struct tpm_chip *chip) goto out; } - chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks), - GFP_KERNEL); - if (!chip->allocated_banks) { - rc = -ENOMEM; - goto out; - } - - chip->allocated_banks[0].alg_id = TPM_ALG_SHA1; - chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1]; - chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1; - chip->nr_allocated_banks = 1; - return rc; out: if (rc > 0) @@ -779,3 +767,27 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr) return rc; } +/** + * tpm1_get_pcr_allocation() - initialize the allocated bank + * @chip: TPM chip to use. + * + * The function initializes the SHA1 allocated bank to extend PCR + * + * Return: + * * 0 on success, + * * < 0 on error. + */ +int tpm1_get_pcr_allocation(struct tpm_chip *chip) +{ + chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks), + GFP_KERNEL); + if (!chip->allocated_banks) + return -ENOMEM; + + chip->allocated_banks[0].alg_id = TPM_ALG_SHA1; + chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1]; + chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1; + chip->nr_allocated_banks = 1; + + return 0; +} diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index d103545e4055..ba9acae83bff 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -840,7 +840,7 @@ struct tpm2_pcr_selection { u8 pcr_select[3]; } __packed; -static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) +ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) { struct tpm2_pcr_selection pcr_selection; struct tpm_buf buf; @@ -1040,10 +1040,6 @@ int tpm2_auto_startup(struct tpm_chip *chip) goto out; } - rc = tpm2_get_pcr_allocation(chip); - if (rc) - goto out; - rc = tpm2_get_cc_attrs_tbl(chip); out: diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c index 44db83a6d01c..44a46dcc0518 100644 --- a/drivers/clk/at91/clk-generated.c +++ b/drivers/clk/at91/clk-generated.c @@ -141,6 +141,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw, continue; div = DIV_ROUND_CLOSEST(parent_rate, req->rate); + if (div > GENERATED_MAX_DIV + 1) + div = GENERATED_MAX_DIV + 1; clk_generated_best_diff(req, parent, parent_rate, div, &best_diff, &best_rate); diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c index 1aa5f4059251..73b7e238eee7 100644 --- a/drivers/clk/mediatek/clk-mt8183.c +++ b/drivers/clk/mediatek/clk-mt8183.c @@ -25,9 +25,11 @@ static const struct mtk_fixed_clk top_fixed_clks[] = { FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000), }; +static const struct mtk_fixed_factor top_early_divs[] = { + FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, 2), +}; + static const struct mtk_fixed_factor top_divs[] = { - FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, - 2), FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1, 2), FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1, @@ -1148,37 +1150,57 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev) return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); } +static struct clk_onecell_data *top_clk_data; + +static void clk_mt8183_top_init_early(struct device_node *node) +{ + int i; + + top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); + + for (i = 0; i < CLK_TOP_NR_CLK; i++) + top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER); + + mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), + top_clk_data); + + of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); +} + +CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen", + clk_mt8183_top_init_early); + static int clk_mt8183_top_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); void __iomem *base; - struct clk_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); - clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); - mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), - clk_data); + top_clk_data); + + mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), + top_clk_data); - mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); + mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data); mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), - node, &mt8183_clk_lock, clk_data); + node, &mt8183_clk_lock, top_clk_data); mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes), - base, &mt8183_clk_lock, clk_data); + base, &mt8183_clk_lock, top_clk_data); mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), - base, &mt8183_clk_lock, clk_data); + base, &mt8183_clk_lock, top_clk_data); mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), - clk_data); + top_clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); } static int clk_mt8183_infra_probe(struct platform_device *pdev) diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 52bbb9ce3807..d4075b130674 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -572,17 +572,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev, unsigned int reg = id / 32; unsigned int bit = id % 32; u32 bitmask = BIT(bit); - unsigned long flags; - u32 value; dev_dbg(priv->dev, "reset %u%02u\n", reg, bit); /* Reset module */ - spin_lock_irqsave(&priv->rmw_lock, flags); - value = readl(priv->base + SRCR(reg)); - value |= bitmask; - writel(value, priv->base + SRCR(reg)); - spin_unlock_irqrestore(&priv->rmw_lock, flags); + writel(bitmask, priv->base + SRCR(reg)); /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ udelay(35); @@ -599,16 +593,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id) unsigned int reg = id / 32; unsigned int bit = id % 32; u32 bitmask = BIT(bit); - unsigned long flags; - u32 value; dev_dbg(priv->dev, "assert %u%02u\n", reg, bit); - spin_lock_irqsave(&priv->rmw_lock, flags); - value = readl(priv->base + SRCR(reg)); - value |= bitmask; - writel(value, priv->base + SRCR(reg)); - spin_unlock_irqrestore(&priv->rmw_lock, flags); + writel(bitmask, priv->base + SRCR(reg)); return 0; } diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig index 91d3d721c801..3c219af25100 100644 --- a/drivers/clk/sprd/Kconfig +++ b/drivers/clk/sprd/Kconfig @@ -3,6 +3,7 @@ config SPRD_COMMON_CLK tristate "Clock support for Spreadtrum SoCs" depends on ARCH_SPRD || COMPILE_TEST default ARCH_SPRD + select REGMAP_MMIO if SPRD_COMMON_CLK diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index 23553ed6b548..2d22d6bf52f2 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -248,16 +248,12 @@ static int __maybe_unused cn_proc_show(struct seq_file *m, void *v) return 0; } -static struct cn_dev cdev = { - .input = cn_rx_skb, -}; - static int cn_init(void) { struct cn_dev *dev = &cdev; struct netlink_kernel_cfg cfg = { .groups = CN_NETLINK_USERS + 0xf, - .input = dev->input, + .input = cn_rx_skb, }; dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg); diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c index 93f39a1d4c3d..c66f566a854c 100644 --- a/drivers/cpufreq/pasemi-cpufreq.c +++ b/drivers/cpufreq/pasemi-cpufreq.c @@ -131,10 +131,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) int err = -ENODEV; cpu = of_get_cpu_node(policy->cpu, NULL); + if (!cpu) + goto out; + max_freqp = of_get_property(cpu, "clock-frequency", NULL); of_node_put(cpu); - if (!cpu) + if (!max_freqp) { + err = -EINVAL; goto out; + } + + /* we need the freq in kHz */ + max_freq = *max_freqp / 1000; dn = of_find_compatible_node(NULL, NULL, "1682m-sdc"); if (!dn) @@ -171,16 +179,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) } pr_debug("init cpufreq on CPU %d\n", policy->cpu); - - max_freqp = of_get_property(cpu, "clock-frequency", NULL); - if (!max_freqp) { - err = -EINVAL; - goto out_unmap_sdcpwr; - } - - /* we need the freq in kHz */ - max_freq = *max_freqp / 1000; - pr_debug("max clock-frequency is at %u kHz\n", max_freq); pr_debug("initializing frequency table\n"); @@ -199,9 +197,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency()); return 0; -out_unmap_sdcpwr: - iounmap(sdcpwr_mapbase); - out_unmap_sdcasr: iounmap(sdcasr_mapbase); out: diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 3dc1cbf849db..b785e936244f 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -957,7 +957,7 @@ static void set_broadcast_channel(struct fw_device *device, int generation) device->bc_implemented = BC_IMPLEMENTED; break; } - /* else fall through to case address error */ + /* else, fall through - to case address error */ case RCODE_ADDRESS_ERROR: device->bc_implemented = BC_UNIMPLEMENTED; } diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 42566b7be8f5..df8a56a979b9 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c @@ -284,7 +284,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, if ((data[0] & bit) == (data[1] & bit)) continue; - /* 1394-1995 IRM, fall through to retry. */ + /* fall through - It's a 1394-1995 IRM, retry. */ default: if (retry) { retry--; diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 46bd22dde535..94a13fca8267 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -54,6 +54,7 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count) switch (port_type) { case SELFID_PORT_CHILD: (*child_port_count)++; + /* fall through */ case SELFID_PORT_PARENT: case SELFID_PORT_NCONN: (*total_port_count)++; diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 53446e39a32c..ba8d3d0ef32c 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -157,7 +157,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK config ISCSI_IBFT_FIND bool "iSCSI Boot Firmware Table Attributes" - depends on X86 && ACPI + depends on X86 && ISCSI_IBFT default n help This option enables the kernel to find the region of memory @@ -168,7 +168,8 @@ config ISCSI_IBFT_FIND config ISCSI_IBFT tristate "iSCSI Boot Firmware Table Attributes module" select ISCSI_BOOT_SYSFS - depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL + select ISCSI_IBFT_FIND if X86 + depends on ACPI && SCSI && SCSI_LOWLEVEL default n help This option enables support for detection and exposing of iSCSI diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index ab3aa3983833..7e12cbdf957c 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -84,6 +84,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information"); MODULE_LICENSE("GPL"); MODULE_VERSION(IBFT_ISCSI_VERSION); +#ifndef CONFIG_ISCSI_IBFT_FIND +struct acpi_table_ibft *ibft_addr; +#endif + struct ibft_hdr { u8 id; u8 version; diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 474f304ec109..cdd4f73b4869 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -40,6 +40,7 @@ config ALTERA_PR_IP_CORE_PLAT config FPGA_MGR_ALTERA_PS_SPI tristate "Altera FPGA Passive Serial over SPI" depends on SPI + select BITREVERSE help FPGA manager driver support for Altera Arria/Cyclone/Stratix using the passive serial interface over SPI. diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 3ee99d070608..f497003f119c 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -956,9 +956,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) } if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) - irqflags |= IRQF_TRIGGER_RISING; + irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? + IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) - irqflags |= IRQF_TRIGGER_FALLING; + irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? + IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; irqflags |= IRQF_ONESHOT; INIT_KFIFO(le->events); @@ -1392,12 +1394,17 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, for (i = 0; i < chip->ngpio; i++) { struct gpio_desc *desc = &gdev->descs[i]; - if (chip->get_direction && gpiochip_line_is_valid(chip, i)) - desc->flags = !chip->get_direction(chip, i) ? - (1 << FLAG_IS_OUT) : 0; - else - desc->flags = !chip->direction_input ? - (1 << FLAG_IS_OUT) : 0; + if (chip->get_direction && gpiochip_line_is_valid(chip, i)) { + if (!chip->get_direction(chip, i)) + set_bit(FLAG_IS_OUT, &desc->flags); + else + clear_bit(FLAG_IS_OUT, &desc->flags); + } else { + if (!chip->direction_input) + set_bit(FLAG_IS_OUT, &desc->flags); + else + clear_bit(FLAG_IS_OUT, &desc->flags); + } } acpi_gpiochip_add(chip); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e6f40fb54c9a..e67c194c2aca 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -398,7 +398,7 @@ config DRM_R128 config DRM_I810 tristate "Intel I810" # !PREEMPT because of missing ioctl locking - depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN) + depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN) help Choose this option if you have an Intel I810 graphics card. If M is selected, the module will be called i810. AGP support is required diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index fe062b76ec91..9410ffceee15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1140,7 +1140,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( adev->asic_type != CHIP_FIJI && adev->asic_type != CHIP_POLARIS10 && adev->asic_type != CHIP_POLARIS11 && - adev->asic_type != CHIP_POLARIS12) ? + adev->asic_type != CHIP_POLARIS12 && + adev->asic_type != CHIP_VEGAM) ? VI_BO_SIZE_ALIGN : 1; mapping_flags = AMDGPU_VM_PAGE_READABLE; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e748cd284780..9ccf32c5456a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1044,29 +1044,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, return r; } - fence = amdgpu_ctx_get_fence(ctx, entity, - deps[i].handle); + fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); + amdgpu_ctx_put(ctx); + + if (IS_ERR(fence)) + return PTR_ERR(fence); + else if (!fence) + continue; if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { - struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); + struct drm_sched_fence *s_fence; struct dma_fence *old = fence; + s_fence = to_drm_sched_fence(fence); fence = dma_fence_get(&s_fence->scheduled); dma_fence_put(old); } - if (IS_ERR(fence)) { - r = PTR_ERR(fence); - amdgpu_ctx_put(ctx); + r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); + dma_fence_put(fence); + if (r) return r; - } else if (fence) { - r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, - true); - dma_fence_put(fence); - amdgpu_ctx_put(ctx); - if (r) - return r; - } } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 6d54decef7f8..5652cc72ed3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, thread = (*pos & GENMASK_ULL(59, 52)) >> 52; bank = (*pos & GENMASK_ULL(61, 60)) >> 60; - data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); + data = kcalloc(1024, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 98df55534a6d..6d24b422e0ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -148,7 +148,7 @@ struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), }; int amdgpu_ras_enable = -1; -uint amdgpu_ras_mask = 0xffffffff; +uint amdgpu_ras_mask = 0xfffffffb; /** * DOC: vramlimit (int) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8b7efd0a7028..2b546567853b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -159,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; - if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) - pm = amdgpu_smu_get_current_power_state(adev); - else if (adev->powerplay.pp_funcs->get_current_power_state) + if (is_support_sw_smu(adev)) { + if (adev->smu.ppt_funcs->get_current_power_state) + pm = amdgpu_smu_get_current_power_state(adev); + else + pm = adev->pm.dpm.user_state; + } else if (adev->powerplay.pp_funcs->get_current_power_state) { pm = amdgpu_dpm_get_current_power_state(adev); - else + } else { pm = adev->pm.dpm.user_state; + } return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : @@ -191,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, goto fail; } - if (adev->powerplay.pp_funcs->dispatch_tasks) { + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.user_state = state; + mutex_unlock(&adev->pm.mutex); + } else if (adev->powerplay.pp_funcs->dispatch_tasks) { amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); } else { mutex_lock(&adev->pm.mutex); @@ -1734,7 +1742,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, return -EINVAL; if (is_support_sw_smu(adev)) { - err = smu_get_current_rpm(&adev->smu, &speed); + err = smu_get_fan_speed_rpm(&adev->smu, &speed); if (err) return err; } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { @@ -1794,7 +1802,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, return -EINVAL; if (is_support_sw_smu(adev)) { - err = smu_get_current_rpm(&adev->smu, &rpm); + err = smu_get_fan_speed_rpm(&adev->smu, &rpm); if (err) return err; } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { @@ -3067,28 +3075,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); - /* UVD clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "UVD: Disabled\n"); - } else { - seq_printf(m, "UVD: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (DCLK)\n", value/100); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + if (adev->asic_type > CHIP_VEGA20) { + /* VCN clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "VCN: Disabled\n"); + } else { + seq_printf(m, "VCN: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } } - } - seq_printf(m, "\n"); + seq_printf(m, "\n"); + } else { + /* UVD clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "UVD: Disabled\n"); + } else { + seq_printf(m, "UVD: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } + } + seq_printf(m, "\n"); - /* VCE clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "VCE: Disabled\n"); - } else { - seq_printf(m, "VCE: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + /* VCE clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "VCE: Disabled\n"); + } else { + seq_printf(m, "VCE: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 1a4412e47810..fac7aa2c244f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -136,11 +136,6 @@ static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, static int amdgpu_ras_release_vram(struct amdgpu_device *adev, struct amdgpu_bo **bo_ptr); -static void amdgpu_ras_self_test(struct amdgpu_device *adev) -{ - /* TODO */ -} - static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -689,6 +684,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, if (!obj) return -EINVAL; + if (block_info.block_id != TA_RAS_BLOCK__UMC) { + DRM_INFO("%s error injection is not supported yet\n", + ras_block_str(info->head.block)); + return -EINVAL; + } + ret = psp_ras_trigger_error(&adev->psp, &block_info); if (ret) DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n", @@ -1557,6 +1558,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev) amdgpu_ras_check_supported(adev, &con->hw_supported, &con->supported); + if (!con->hw_supported) { + amdgpu_ras_set_context(adev, NULL); + kfree(con); + return 0; + } + con->features = 0; INIT_LIST_HEAD(&con->head); /* Might need get this flag from vbios. */ @@ -1570,8 +1577,6 @@ int amdgpu_ras_init(struct amdgpu_device *adev) if (amdgpu_ras_fs_init(adev)) goto fs_out; - amdgpu_ras_self_test(adev); - DRM_INFO("RAS INFO: ras initialized successfully, " "hardware ability[%x] ras_mask[%x]\n", con->hw_supported, con->supported); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 1675d5837c3c..32773b7523d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1441,6 +1441,15 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) } nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0); + } } static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) @@ -4611,6 +4620,7 @@ gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE, 0); WREG32(cp_int_cntl_reg, cp_int_cntl); + break; case AMDGPU_IRQ_STATE_ENABLE: cp_int_cntl = RREG32(cp_int_cntl_reg); cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 0db9f488da7e..21187275dfd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1879,6 +1879,15 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) } cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); + WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); + WREG32(amdgpu_gds_reg_offset[i].gws, 0); + WREG32(amdgpu_gds_reg_offset[i].oa, 0); + } } static void gfx_v7_0_config_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5f401b41ef7c..751567f78567 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3706,6 +3706,15 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev) } vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); + WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); + WREG32(amdgpu_gds_reg_offset[i].gws, 0); + WREG32(amdgpu_gds_reg_offset[i].oa, 0); + } } static void gfx_v8_0_config_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index f4c4eea62526..1cf639a51178 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1918,6 +1918,15 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) } soc15_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0); + } } static void gfx_v9_0_constants_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 988c0adaca91..1cfc2620b2dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -372,11 +372,8 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.gpu_addr)); offset = size; - /* No signed header for now from firmware WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3); - */ - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); } WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 792371442195..4e3fc284f6ac 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -668,6 +668,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, case CHIP_RAVEN: pcache_info = raven_cache_info; num_of_cache_types = ARRAY_SIZE(raven_cache_info); + break; case CHIP_NAVI10: pcache_info = navi10_cache_info; num_of_cache_types = ARRAY_SIZE(navi10_cache_info); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 4f8a6ffc5775..9cd3eb2d90bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -429,7 +429,6 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, switch (type) { case KFD_MQD_TYPE_CP: - pr_debug("%s@%i\n", __func__, __LINE__); case KFD_MQD_TYPE_COMPUTE: pr_debug("%s@%i\n", __func__, __LINE__); mqd->allocate_mqd = allocate_mqd; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index c1a92c16535c..5cc3acccda2a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c @@ -262,12 +262,12 @@ void dce110_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) { + dce_clk_mgr_construct(ctx, clk_mgr); + memcpy(clk_mgr->max_clks_by_state, dce110_max_clks_by_state, sizeof(dce110_max_clks_by_state)); - dce_clk_mgr_construct(ctx, clk_mgr); - clk_mgr->regs = &disp_clk_regs; clk_mgr->clk_mgr_shift = &disp_clk_shift; clk_mgr->clk_mgr_mask = &disp_clk_mask; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index 778392c73187..7c746ef1e32e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c @@ -226,12 +226,12 @@ void dce112_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) { + dce_clk_mgr_construct(ctx, clk_mgr); + memcpy(clk_mgr->max_clks_by_state, dce112_max_clks_by_state, sizeof(dce112_max_clks_by_state)); - dce_clk_mgr_construct(ctx, clk_mgr); - clk_mgr->regs = &disp_clk_regs; clk_mgr->clk_mgr_shift = &disp_clk_shift; clk_mgr->clk_mgr_mask = &disp_clk_mask; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c index 906310c3e2eb..5399b8cf6b75 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c @@ -127,12 +127,12 @@ static struct clk_mgr_funcs dce120_funcs = { void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) { + dce_clk_mgr_construct(ctx, clk_mgr); + memcpy(clk_mgr->max_clks_by_state, dce120_max_clks_by_state, sizeof(dce120_max_clks_by_state)); - dce_clk_mgr_construct(ctx, clk_mgr); - clk_mgr->base.dprefclk_khz = 600000; clk_mgr->base.funcs = &dce120_funcs; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 08a774fc7b67..50bfb5921de0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -301,6 +301,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, void dcn2_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + // Assumption is that boot state always supports pstate + clk_mgr->clks.p_state_change_support = true; } void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) @@ -331,6 +333,7 @@ void dcn20_clk_mgr_construct( struct dccg *dccg) { clk_mgr->base.ctx = ctx; + clk_mgr->pp_smu = pp_smu; clk_mgr->base.funcs = &dcn2_funcs; clk_mgr->regs = &clk_mgr_regs; clk_mgr->clk_mgr_shift = &clk_mgr_shift; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4ef4dc63e221..fa20201eef3a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -502,8 +502,10 @@ void dc_stream_set_static_screen_events(struct dc *dc, static void destruct(struct dc *dc) { - dc_release_state(dc->current_state); - dc->current_state = NULL; + if (dc->current_state) { + dc_release_state(dc->current_state); + dc->current_state = NULL; + } destroy_links(dc); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 8dbf759eba45..355b4ba12796 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -532,6 +532,7 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) uint32_t read_dpcd_retry_cnt = 10; enum dc_status status = DC_ERROR_UNEXPECTED; int i; + union max_down_spread max_down_spread = { {0} }; // Read DPCD 00101h to find out the number of lanes currently set for (i = 0; i < read_dpcd_retry_cnt; i++) { @@ -553,8 +554,6 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) msleep(8); } - ASSERT(status == DC_OK); - // Read DPCD 00100h to find if standard link rates are set core_link_read_dpcd(link, DP_LINK_BW_SET, &link_bw_set, sizeof(link_bw_set)); @@ -576,6 +575,12 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) link->cur_link_settings.link_rate = link_bw_set; link->cur_link_settings.use_link_rate_set = false; } + // Read DPCD 00003h to find the max down spread. + core_link_read_dpcd(link, DP_MAX_DOWNSPREAD, + &max_down_spread.raw, sizeof(max_down_spread)); + link->cur_link_settings.link_spread = + max_down_spread.bits.MAX_DOWN_SPREAD ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; } static bool detect_dp( @@ -717,13 +722,6 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) return false; } - if (link->connector_signal == SIGNAL_TYPE_EDP) { - /* On detect, we want to make sure current link settings are - * up to date, especially if link was powered on by GOP. - */ - read_edp_current_link_settings_on_detect(link); - } - prev_sink = link->local_sink; if (prev_sink != NULL) { dc_sink_retain(prev_sink); @@ -765,6 +763,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) } case SIGNAL_TYPE_EDP: { + read_edp_current_link_settings_on_detect(link); detect_edp_sink_caps(link); sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; @@ -2329,7 +2328,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { if (core_dc->current_state->res_ctx. pipe_ctx[i].stream->link - == link) + == link) { /* DMCU -1 for all controller id values, * therefore +1 here */ @@ -2337,6 +2336,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link, core_dc->current_state-> res_ctx.pipe_ctx[i].stream_res.tg->inst + 1; + + /* Disable brightness ramping when the display is blanked + * as it can hang the DMCU + */ + if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL) + frame_ramp = 0; + } } } abm->funcs->set_backlight_level_pwm( @@ -2984,8 +2990,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc, /* Retrain with preferred link settings only relevant for * DP signal type + * Check for non-DP signal or if passive dongle present */ - if (!dc_is_dp_signal(link->connector_signal)) + if (!dc_is_dp_signal(link->connector_signal) || + link->dongle_max_pix_clk > 0) return; for (i = 0; i < MAX_PIPES; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 056be4c34a98..2c7aaed907b9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2230,18 +2230,25 @@ static void get_active_converter_info( link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); + link->dpcd_caps.is_branch_dev = false; return; } /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ - link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; + if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) { + link->dpcd_caps.is_branch_dev = false; + } + + else { + link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; + } switch (ds_port.fields.PORT_TYPE) { case DOWNSTREAM_VGA: link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; break; - case DOWNSTREAM_DVI_HDMI: - /* At this point we don't know is it DVI or HDMI, + case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS: + /* At this point we don't know is it DVI or HDMI or DP++, * assume DVI.*/ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; break; @@ -2258,6 +2265,10 @@ static void get_active_converter_info( det_caps, sizeof(det_caps)); switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { + /*Handle DP case as DONGLE_NONE*/ + case DOWN_STREAM_DETAILED_DP: + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; + break; case DOWN_STREAM_DETAILED_VGA: link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; @@ -2267,6 +2278,8 @@ static void get_active_converter_info( DISPLAY_DONGLE_DP_DVI_CONVERTER; break; case DOWN_STREAM_DETAILED_HDMI: + case DOWN_STREAM_DETAILED_DP_PLUS_PLUS: + /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_HDMI_CONVERTER; @@ -2282,14 +2295,18 @@ static void get_active_converter_info( link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = - hdmi_caps.bits.YCrCr422_PASS_THROUGH; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = - hdmi_caps.bits.YCrCr420_PASS_THROUGH; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = - hdmi_caps.bits.YCrCr422_CONVERSION; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = - hdmi_caps.bits.YCrCr420_CONVERSION; + /*YCBCR capability only for HDMI case*/ + if (port_caps->bits.DWN_STRM_PORTX_TYPE + == DOWN_STREAM_DETAILED_HDMI) { + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = + hdmi_caps.bits.YCrCr422_PASS_THROUGH; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = + hdmi_caps.bits.YCrCr420_PASS_THROUGH; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = + hdmi_caps.bits.YCrCr422_CONVERSION; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = + hdmi_caps.bits.YCrCr420_CONVERSION; + } link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = translate_dpcd_max_bpc( diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 51a78283a86d..2ceaab4fb5de 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -258,7 +258,7 @@ bool resource_construct( * PORT_CONNECTIVITY == 1 (as instructed by HW team). */ update_num_audio(&straps, &num_audio, &pool->audio_support); - for (i = 0; i < pool->pipe_count && i < num_audio; i++) { + for (i = 0; i < caps->num_audio; i++) { struct audio *aud = create_funcs->create_audio(ctx, i); if (aud == NULL) { @@ -1669,6 +1669,12 @@ static struct audio *find_first_free_audio( return pool->audios[i]; } } + + /* use engine id to find free audio */ + if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) { + return pool->audios[id]; + } + /*not found the matching one, first come first serve*/ for (i = 0; i < pool->audio_count; i++) { if (res_ctx->is_audio_acquired[i] == false) { @@ -1833,6 +1839,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing) pix_clk /= 2; if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) { switch (timing->display_color_depth) { + case COLOR_DEPTH_666: case COLOR_DEPTH_888: normalized_pix_clk = pix_clk; break; @@ -1979,7 +1986,7 @@ enum dc_status resource_map_pool_resources( /* TODO: Add check if ASIC support and EDID audio */ if (!stream->converter_disable_audio && dc_is_audio_capable_signal(pipe_ctx->stream->signal) && - stream->audio_info.mode_count) { + stream->audio_info.mode_count && stream->audio_info.flags.all) { pipe_ctx->stream_res.audio = find_first_free_audio( &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index af7f8be230f7..352862370390 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -612,7 +612,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, pipe_ctx->stream->dmdata_address = attr->address; - if (pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) { + if (pipe_ctx->stream_res.stream_enc && + pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) { if (pipe_ctx->stream->dmdata_address.quad_part != 0) { /* if using dynamic meta, don't set up generic infopackets */ pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index f8903bcabe49..58bd131d5b48 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -239,6 +239,10 @@ static void dmcu_set_backlight_level( s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); REG_WRITE(BIOS_SCRATCH_2, s2); + + /* waitDMCUReadyForCmd */ + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, + 0, 1, 80000); } static void dce_abm_init(struct abm *abm) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 858a58856ebd..fafb4b470140 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -965,11 +965,17 @@ void hwss_edp_backlight_control( void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) { /* notify audio driver for audio modes of monitor */ - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *core_dc; struct pp_smu_funcs *pp_smu = NULL; - struct clk_mgr *clk_mgr = core_dc->clk_mgr; + struct clk_mgr *clk_mgr; unsigned int i, num_audio = 1; + if (!pipe_ctx->stream) + return; + + core_dc = pipe_ctx->stream->ctx->dc; + clk_mgr = core_dc->clk_mgr; + if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true) return; @@ -999,9 +1005,15 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) { - struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc *dc; struct pp_smu_funcs *pp_smu = NULL; - struct clk_mgr *clk_mgr = dc->clk_mgr; + struct clk_mgr *clk_mgr; + + if (!pipe_ctx || !pipe_ctx->stream) + return; + + dc = pipe_ctx->stream->ctx->dc; + clk_mgr = dc->clk_mgr; if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false) return; @@ -1009,6 +1021,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, true); if (pipe_ctx->stream_res.audio) { + pipe_ctx->stream_res.audio->enabled = false; + if (dc->res_pool->pp_smu) pp_smu = dc->res_pool->pp_smu; @@ -1039,8 +1053,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) /* dal_audio_disable_azalia_audio_jack_presence(stream->audio, * stream->stream_engine_id); */ - if (pipe_ctx->stream_res.audio) - pipe_ctx->stream_res.audio->enabled = false; } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index e50a696fcb5d..2118ea21d7e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1195,16 +1195,7 @@ static void dcn10_init_hw(struct dc *dc) * everything down. */ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct hubp *hubp = dc->res_pool->hubps[i]; - struct dpp *dpp = dc->res_pool->dpps[i]; - - hubp->funcs->hubp_init(hubp); - dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; - plane_atomic_power_down(dc, dpp, hubp); - } - - apply_DEGVIDCN10_253_wa(dc); + dc->hwss.init_pipes(dc, dc->current_state); } for (i = 0; i < dc->res_pool->audio_count; i++) { @@ -1375,10 +1366,6 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, return result; } - - - - static bool dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) @@ -2516,6 +2503,12 @@ static void dcn10_apply_ctx_for_surface( if (removed_pipe[i]) dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (removed_pipe[i]) { + dc->hwss.optimize_bandwidth(dc, context); + break; + } + if (dc->hwseq->wa.DEGVIDCN10_254) hubbub1_wm_change_req_wa(dc->res_pool->hubbub); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 1a20461c2937..a12530a3ab9c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -508,7 +508,7 @@ static const struct resource_caps rv2_res_cap = { .num_audio = 3, .num_stream_encoder = 3, .num_pll = 3, - .num_ddc = 3, + .num_ddc = 4, }; static const struct dc_plane_cap plane_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 51a3dfe97f0e..31aa6ee5cd5b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c @@ -102,14 +102,19 @@ void dccg2_init(struct dccg *dccg) switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) { case 6: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1); + /* Fall through */ case 5: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1); + /* Fall through */ case 4: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1); + /* Fall through */ case 3: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1); + /* Fall through */ case 2: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1); + /* Fall through */ case 1: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1); break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index ece6e136437b..6e2dbd03f9bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -337,6 +337,7 @@ static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigne break; default: ASSERT(false); + block_size = page_table_block_size; break; } @@ -366,25 +367,24 @@ int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub, struct dcn_vmid_page_table_config phys_config; REG_SET(DCN_VM_FB_LOCATION_BASE, 0, - FB_BASE, pa_config->system_aperture.fb_base); + FB_BASE, pa_config->system_aperture.fb_base >> 24); REG_SET(DCN_VM_FB_LOCATION_TOP, 0, - FB_TOP, pa_config->system_aperture.fb_top); + FB_TOP, pa_config->system_aperture.fb_top >> 24); REG_SET(DCN_VM_FB_OFFSET, 0, - FB_OFFSET, pa_config->system_aperture.fb_offset); + FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); REG_SET(DCN_VM_AGP_BOT, 0, - AGP_BOT, pa_config->system_aperture.agp_bot); + AGP_BOT, pa_config->system_aperture.agp_bot >> 24); REG_SET(DCN_VM_AGP_TOP, 0, - AGP_TOP, pa_config->system_aperture.agp_top); + AGP_TOP, pa_config->system_aperture.agp_top >> 24); REG_SET(DCN_VM_AGP_BASE, 0, - AGP_BASE, pa_config->system_aperture.agp_base); + AGP_BASE, pa_config->system_aperture.agp_base >> 24); if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { - phys_config.depth = 1; - phys_config.block_size = 4096; phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; - + phys_config.depth = 0; + phys_config.block_size = 0; // Init VMID 0 based on PA config dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 0b84a322b8a2..d810c8940129 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1153,8 +1153,8 @@ void dcn20_enable_plane( apt.sys_default.quad_part = 0; - apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.start_addr; - apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.end_addr; + apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr; + apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr; // Program system aperture settings pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); @@ -1242,6 +1242,8 @@ void dcn20_pipe_control_lock_global( CRTC_STATE_VACTIVE); pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); + pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, + CRTC_STATE_VACTIVE); pipe->stream_res.tg->funcs->lock_doublebuffer_disable( pipe->stream_res.tg); } @@ -1263,6 +1265,17 @@ void dcn20_pipe_control_lock( if (pipe->plane_state != NULL) flip_immediate = pipe->plane_state->flip_immediate; + if (flip_immediate && lock) { + while (pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) { + udelay(1); + } + + if (pipe->bottom_pipe != NULL) + while (pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) { + udelay(1); + } + } + /* In flip immediate and pipe splitting case, we need to use GSL * for synchronization. Only do setup on locking and on flip type change. */ @@ -1740,8 +1753,11 @@ static void dcn20_reset_back_end_for_pipe( else if (pipe_ctx->stream_res.audio) { dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE); } - } +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + else if (pipe_ctx->stream_res.dsc) + dp_set_dsc_enable(pipe_ctx, false); +#endif /* by upper caller loop, parent pipe: pipe0, will be reset last. * back end share by all pipes and will be disable only when disable diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 26a66ccf6e72..1ae973962d53 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -535,7 +535,7 @@ void dcn20_timing_generator_init(struct optc *optc1) optc1->min_h_blank = 32; optc1->min_v_blank = 3; optc1->min_v_blank_interlace = 5; - optc1->min_h_sync_width = 8; + optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue. optc1->min_v_sync_width = 1; optc1->comb_opp_id = 0xf; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index d200bc3cec71..b949e202d6cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2643,6 +2643,10 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ if (dc->bb_overrides.min_dcfclk_mhz > 0) min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; + else + // Accounting for SOC/DCF relationship, we can go as high as + // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506. + min_dcfclk = 507; for (i = 0; i < num_states; i++) { int min_fclk_required_by_uclk; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c index 27679ef6ebe8..96c263223315 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c @@ -23,6 +23,8 @@ * */ +#include <linux/delay.h> + #include "dcn20_vmid.h" #include "reg_helper.h" @@ -36,6 +38,38 @@ #define FN(reg_name, field_name) \ vmid->shifts->field_name, vmid->masks->field_name +static void dcn20_wait_for_vmid_ready(struct dcn20_vmid *vmid) +{ + /* According the hardware spec, we need to poll for the lowest + * bit of PAGE_TABLE_BASE_ADDR_LO32 = 1 any time a GPUVM + * context is updated. We can't use REG_WAIT here since we + * don't have a seperate field to wait on. + * + * TODO: Confirm timeout / poll interval with hardware team + */ + + int max_times = 10000; + int delay_us = 5; + int i; + + for (i = 0; i < max_times; ++i) { + uint32_t entry_lo32; + + REG_GET(PAGE_TABLE_BASE_ADDR_LO32, + VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, + &entry_lo32); + + if (entry_lo32 & 0x1) + return; + + udelay(delay_us); + } + + /* VM setup timed out */ + DC_LOG_WARNING("Timeout while waiting for GPUVM context update\n"); + ASSERT(0); +} + void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config) { REG_SET(PAGE_TABLE_START_ADDR_HI32, 0, @@ -54,6 +88,9 @@ void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_ REG_SET(PAGE_TABLE_BASE_ADDR_HI32, 0, VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, (config->page_table_base_addr >> 32) & 0xFFFFFFFF); + /* Note: per hardware spec PAGE_TABLE_BASE_ADDR_LO32 must be programmed last in sequence */ REG_SET(PAGE_TABLE_BASE_ADDR_LO32, 0, VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, config->page_table_base_addr & 0xFFFFFFFF); + + dcn20_wait_for_vmid_ready(vmid); } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c index 67089765780b..340ef4d41ebd 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c @@ -377,6 +377,12 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay; + /* As per DSC spec v1.2a recommendation: */ + if (vdsc_cfg->native_420) + vdsc_cfg->second_line_offset_adj = 512; + else + vdsc_cfg->second_line_offset_adj = 0; + return 0; } EXPORT_SYMBOL(drm_dsc_compute_rc_parameters); diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index c89393c19232..a148ffde8b12 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -212,7 +212,7 @@ struct resource_pool { struct clock_source *clock_sources[MAX_CLOCK_SOURCES]; unsigned int clk_src_count; - struct audio *audios[MAX_PIPES]; + struct audio *audios[MAX_AUDIOS]; unsigned int audio_count; struct audio_support audio_support; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 959f5b654611..9502478c4a1b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -61,8 +61,8 @@ enum dcn_hubbub_page_table_depth { }; enum dcn_hubbub_page_table_block_size { - DCN_PAGE_TABLE_BLOCK_SIZE_4KB, - DCN_PAGE_TABLE_BLOCK_SIZE_64KB + DCN_PAGE_TABLE_BLOCK_SIZE_4KB = 0, + DCN_PAGE_TABLE_BLOCK_SIZE_64KB = 4, }; struct dcn_hubbub_phys_addr_config { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 8759ec03aede..f82365e2d03c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -34,6 +34,7 @@ * Data types shared between different Virtual HW blocks ******************************************************************************/ +#define MAX_AUDIOS 7 #define MAX_PIPES 6 #if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define MAX_DWB_PIPES 1 diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index 1c66166d0a94..2c90d1b46c8b 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -43,7 +43,7 @@ enum dpcd_revision { enum dpcd_downstream_port_type { DOWNSTREAM_DP = 0, DOWNSTREAM_VGA, - DOWNSTREAM_DVI_HDMI, + DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS,/* DVI, HDMI, DP++ */ DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */ }; diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 9f661bf96ed0..5b1ebb7f995a 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -123,6 +123,7 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, AMDGPU_PP_SENSOR_MIN_FAN_RPM, AMDGPU_PP_SENSOR_MAX_FAN_RPM, + AMDGPU_PP_SENSOR_VCN_POWER_STATE, }; enum amd_pp_task { diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index f1565c448de5..0685a3388e38 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -137,12 +137,37 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, { int ret = 0, clk_id = 0; uint32_t param = 0; + uint32_t clock_limit; if (!min && !max) return -EINVAL; - if (!smu_clk_dpm_is_enabled(smu, clk_type)) + if (!smu_clk_dpm_is_enabled(smu, clk_type)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + clock_limit = smu->smu_table.boot_values.uclk; + break; + case SMU_GFXCLK: + case SMU_SCLK: + clock_limit = smu->smu_table.boot_values.gfxclk; + break; + case SMU_SOCCLK: + clock_limit = smu->smu_table.boot_values.socclk; + break; + default: + clock_limit = 0; + break; + } + + /* clock in Mhz unit */ + if (min) + *min = clock_limit / 100; + if (max) + *max = clock_limit / 100; + return 0; + } mutex_lock(&smu->mutex); clk_id = smu_clk_get_index(smu, clk_type); @@ -281,7 +306,8 @@ int smu_get_power_num_states(struct smu_context *smu, /* not support power state */ memset(state_info, 0, sizeof(struct pp_states_info)); - state_info->nums = 0; + state_info->nums = 1; + state_info->states[0] = POWER_STATE_TYPE_DEFAULT; return 0; } @@ -312,6 +338,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_POWER_STATE: + *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0; + *size = 4; + break; default: ret = -EINVAL; break; @@ -698,6 +728,12 @@ static int smu_sw_init(void *handle) return ret; } + ret = smu_register_irq_handler(smu); + if (ret) { + pr_err("Failed to register smc irq handler!\n"); + return ret; + } + return 0; } @@ -707,6 +743,9 @@ static int smu_sw_fini(void *handle) struct smu_context *smu = &adev->smu; int ret; + kfree(smu->irq_source); + smu->irq_source = NULL; + ret = smu_smc_table_sw_fini(smu); if (ret) { pr_err("Failed to sw fini smc table!\n"); @@ -1063,10 +1102,6 @@ static int smu_hw_init(void *handle) if (ret) goto failed; - ret = smu_register_irq_handler(smu); - if (ret) - goto failed; - if (!smu->pm_enabled) adev->pm.dpm_enabled = false; else @@ -1096,9 +1131,6 @@ static int smu_hw_fini(void *handle) kfree(table_context->overdrive_table); table_context->overdrive_table = NULL; - kfree(smu->irq_source); - smu->irq_source = NULL; - ret = smu_fini_fb_allocations(smu); if (ret) return ret; @@ -1349,13 +1381,49 @@ static int smu_enable_umd_pstate(void *handle, return 0; } +static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +{ + int ret = 0; + uint32_t sclk_mask, mclk_mask, soc_mask; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = smu_force_dpm_limit_value(smu, true); + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = smu_force_dpm_limit_value(smu, false); + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + ret = smu_unforce_dpm_levels(smu); + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = smu_get_profiling_clk_mask(smu, level, + &sclk_mask, + &mclk_mask, + &soc_mask); + if (ret) + return ret; + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); + break; + case AMD_DPM_FORCED_LEVEL_MANUAL: + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: + default: + break; + } + return ret; +} + int smu_adjust_power_state_dynamic(struct smu_context *smu, enum amd_dpm_forced_level level, bool skip_display_settings) { int ret = 0; int index = 0; - uint32_t sclk_mask, mclk_mask, soc_mask; long workload; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); @@ -1386,39 +1454,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, } if (smu_dpm_ctx->dpm_level != level) { - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: - ret = smu_force_dpm_limit_value(smu, true); - break; - case AMD_DPM_FORCED_LEVEL_LOW: - ret = smu_force_dpm_limit_value(smu, false); - break; - - case AMD_DPM_FORCED_LEVEL_AUTO: - case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: - ret = smu_unforce_dpm_levels(smu); - break; - - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: - case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - ret = smu_get_profiling_clk_mask(smu, level, - &sclk_mask, - &mclk_mask, - &soc_mask); - if (ret) - return ret; - smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); - smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); - smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); - break; - - case AMD_DPM_FORCED_LEVEL_MANUAL: - case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: - default: - break; + ret = smu_asic_set_performance_level(smu, level); + if (ret) { + ret = smu_default_set_performance_level(smu, level); } - if (!ret) smu_dpm_ctx->dpm_level = level; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index e32ae9d3373c..18e780f566fa 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1111,6 +1111,7 @@ static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, void *value, int *size) { + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); uint32_t sclk, mclk; int ret = 0; @@ -1132,6 +1133,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, case AMDGPU_PP_SENSOR_GPU_TEMP: *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr); break; + case AMDGPU_PP_SENSOR_VCN_POWER_STATE: + *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1; + *size = 4; + break; default: ret = -EINVAL; break; @@ -1175,18 +1180,22 @@ static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate) static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) { + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); + if (bgate) { amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_GATE); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerDownVcn, 0); + smu10_data->vcn_power_gated = true; } else { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerUpVcn, 0); amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_UNGATE); + smu10_data->vcn_power_gated = false; } } diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 1af992fb0bde..208e6711d506 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -429,7 +429,6 @@ struct smu_table_context struct smu_table *tables; uint32_t table_count; struct smu_table memory_pool; - uint16_t software_shutdown_temp; uint8_t thermal_controller_type; uint16_t TDPODLimit; @@ -613,6 +612,7 @@ struct pptable_funcs { int (*tables_init)(struct smu_context *smu, struct smu_table *tables); int (*set_thermal_fan_table)(struct smu_context *smu); int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed); + int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed); int (*set_watermarks_table)(struct smu_context *smu, void *watermarks, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); int (*get_current_clk_freq_by_table)(struct smu_context *smu, @@ -621,6 +621,7 @@ struct pptable_funcs { int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range); int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states); int (*set_default_od_settings)(struct smu_context *smu, bool initialize); + int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level); }; struct smu_funcs @@ -685,7 +686,6 @@ struct smu_funcs int (*set_watermarks_for_clock_ranges)(struct smu_context *smu, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); int (*conv_power_profile_to_pplib_workload)(int power_profile); - int (*get_current_rpm)(struct smu_context *smu, uint32_t *speed); uint32_t (*get_fan_control_mode)(struct smu_context *smu); int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); @@ -751,8 +751,6 @@ struct smu_funcs ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0) #define smu_set_default_od_settings(smu, initialize) \ ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) -#define smu_get_current_rpm(smu, speed) \ - ((smu)->funcs->get_current_rpm ? (smu)->funcs->get_current_rpm((smu), (speed)) : 0) #define smu_set_fan_speed_rpm(smu, speed) \ ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0) #define smu_send_smc_msg(smu, msg) \ @@ -841,6 +839,8 @@ struct smu_funcs ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0) #define smu_set_fan_speed_percent(smu, speed) \ ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0) +#define smu_get_fan_speed_rpm(smu, speed) \ + ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0) #define smu_msg_get_index(smu, msg) \ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) @@ -918,6 +918,9 @@ struct smu_funcs ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0) #define smu_baco_reset(smu) \ ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0) +#define smu_asic_set_performance_level(smu, level) \ + ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL); + extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, uint16_t *size, uint8_t *frev, uint8_t *crev, diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 2dae0ae0829e..cc0a3b2256af 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -23,6 +23,7 @@ #include "pp_debug.h" #include <linux/firmware.h> +#include <linux/pci.h> #include "amdgpu.h" #include "amdgpu_smu.h" #include "atomfirmware.h" @@ -577,28 +578,20 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) { int ret = 0; - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; - if (enable && power_gate->uvd_gated) { - if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); - if (ret) - return ret; - } - power_gate->uvd_gated = false; + if (enable) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); + if (ret) + return ret; } else { - if (!enable && !power_gate->uvd_gated) { - if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); - if (ret) - return ret; - } - power_gate->uvd_gated = true; - } + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + if (ret) + return ret; } - return 0; + ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable); + + return ret; } static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, @@ -626,11 +619,26 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, return ret; } +static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + DpmDescriptor_t *dpm_desc = NULL; + uint32_t clk_index = 0; + + clk_index = smu_clk_get_index(smu, clk_type); + dpm_desc = &pptable->DpmDescriptor[clk_index]; + + /* 0 - Fine grained DPM, 1 - Discrete DPM */ + return dpm_desc->SnapToDiscrete == 0 ? true : false; +} + static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; + uint32_t freq_values[3] = {0}; + uint32_t mark_index = 0; switch (clk_type) { case SMU_GFXCLK: @@ -643,22 +651,42 @@ static int navi10_print_clk_levels(struct smu_context *smu, ret = smu_get_current_clk_freq(smu, clk_type, &cur_value); if (ret) return size; + /* 10KHz -> MHz */ cur_value = cur_value / 100; - size += sprintf(buf, "current clk: %uMhz\n", cur_value); - ret = smu_get_dpm_level_count(smu, clk_type, &count); if (ret) return size; - for (i = 0; i < count; i++) { - ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value); + if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { + for (i = 0; i < count; i++) { + ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value); + if (ret) + return size; + + size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, + cur_value == value ? "*" : ""); + } + } else { + ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]); + if (ret) + return size; + ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]); if (ret) return size; - size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, - cur_value == value ? "*" : ""); + freq_values[1] = cur_value; + mark_index = cur_value == freq_values[0] ? 0 : + cur_value == freq_values[2] ? 2 : 1; + if (mark_index != 1) + freq_values[1] = (freq_values[0] + freq_values[2]) / 2; + + for (i = 0; i < 3; i++) { + size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i], + i == mark_index ? "*" : ""); + } + } break; default: @@ -919,12 +947,13 @@ static bool navi10_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value) +static int navi10_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed) { SmuMetrics_t metrics; int ret = 0; - if (!value) + if (!speed) return -EINVAL; memset(&metrics, 0, sizeof(metrics)); @@ -934,7 +963,7 @@ static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value) if (ret) return ret; - *value = metrics.CurrFanSpeed; + *speed = metrics.CurrFanSpeed; return ret; } @@ -944,10 +973,10 @@ static int navi10_get_fan_speed_percent(struct smu_context *smu, { int ret = 0; uint32_t percent = 0; - uint16_t current_rpm; + uint32_t current_rpm; PPTable_t *pptable = smu->smu_table.driver_pptable; - ret = navi10_get_fan_speed(smu, ¤t_rpm); + ret = navi10_get_fan_speed_rpm(smu, ¤t_rpm); if (ret) return ret; @@ -1530,6 +1559,76 @@ static int navi10_set_ppfeature_status(struct smu_context *smu, return 0; } +static int navi10_set_peak_clock_by_device(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + uint32_t sclk_freq = 0, uclk_freq = 0; + uint32_t uclk_level = 0; + + switch (adev->pdev->revision) { + case 0xf0: /* XTX */ + case 0xc0: + sclk_freq = NAVI10_PEAK_SCLK_XTX; + break; + case 0xf1: /* XT */ + case 0xc1: + sclk_freq = NAVI10_PEAK_SCLK_XT; + break; + default: /* XL */ + sclk_freq = NAVI10_PEAK_SCLK_XL; + break; + } + + ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level); + if (ret) + return ret; + ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq); + if (ret) + return ret; + + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + if (ret) + return ret; + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + if (ret) + return ret; + + return ret; +} + +static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = navi10_set_peak_clock_by_device(smu); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int navi10_get_thermal_temperature_range(struct smu_context *smu, + struct smu_temperature_range *range) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table; + + if (!range || !powerplay_table) + return -EINVAL; + + /* The unit is temperature */ + range->min = 0; + range->max = powerplay_table->software_shutdown_temp; + + return 0; +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -1557,6 +1656,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .unforce_dpm_levels = navi10_unforce_dpm_levels, .is_dpm_running = navi10_is_dpm_running, .get_fan_speed_percent = navi10_get_fan_speed_percent, + .get_fan_speed_rpm = navi10_get_fan_speed_rpm, .get_power_profile_mode = navi10_get_power_profile_mode, .set_power_profile_mode = navi10_set_power_profile_mode, .get_profiling_clk_mask = navi10_get_profiling_clk_mask, @@ -1565,6 +1665,8 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_uclk_dpm_states = navi10_get_uclk_dpm_states, .get_ppfeature_status = navi10_get_ppfeature_status, .set_ppfeature_status = navi10_set_ppfeature_status, + .set_performance_level = navi10_set_performance_level, + .get_thermal_temperature_range = navi10_get_thermal_temperature_range, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h index 957288e22f47..620ff17c2fef 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h @@ -23,6 +23,10 @@ #ifndef __NAVI10_PPT_H__ #define __NAVI10_PPT_H__ +#define NAVI10_PEAK_SCLK_XTX (1830) +#define NAVI10_PEAK_SCLK_XT (1755) +#define NAVI10_PEAK_SCLK_XL (1625) + extern void navi10_set_ppt_funcs(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 95c7c4dae523..ac5b26228e75 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1124,10 +1124,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, struct smu_temperature_range *range) { struct amdgpu_device *adev = smu->adev; - int low = SMU_THERMAL_MINIMUM_ALERT_TEMP * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = SMU_THERMAL_MINIMUM_ALERT_TEMP; + int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; if (!range) @@ -1138,6 +1136,9 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, if (high > range->max) high = range->max; + low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min); + high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max); + if (low > high) return -EINVAL; @@ -1146,8 +1147,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); @@ -1186,7 +1187,10 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) if (!smu->pm_enabled) return ret; + ret = smu_get_thermal_temperature_range(smu, &range); + if (ret) + return ret; if (smu->smu_table.thermal_controller_type) { ret = smu_v11_0_set_thermal_range(smu, &range); @@ -1202,15 +1206,17 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) return ret; } - adev->pm.dpm.thermal.min_temp = range.min; - adev->pm.dpm.thermal.max_temp = range.max; - adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max; - adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min; - adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max; - adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max; - adev->pm.dpm.thermal.min_mem_temp = range.mem_min; - adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; - adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; + adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; return ret; } @@ -1371,23 +1377,6 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) return ret; } -static int smu_v11_0_get_current_rpm(struct smu_context *smu, - uint32_t *current_rpm) -{ - int ret; - - ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm); - - if (ret) { - pr_err("Attempt to get current RPM from SMC Failed!\n"); - return ret; - } - - smu_read_smc_arg(smu, current_rpm); - - return 0; -} - static uint32_t smu_v11_0_get_fan_control_mode(struct smu_context *smu) { @@ -1773,7 +1762,6 @@ static const struct smu_funcs smu_v11_0_funcs = { .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges, - .get_current_rpm = smu_v11_0_get_current_rpm, .get_fan_control_mode = smu_v11_0_get_fan_control_mode, .set_fan_control_mode = smu_v11_0_set_fan_control_mode, .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index bb9bb09cfc7a..dd6fd1c8bf24 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -450,7 +450,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu) memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable, sizeof(PPTable_t)); - table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp; table_context->thermal_controller_type = powerplay_table->ucThermalControllerType; table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]); @@ -3015,6 +3014,23 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu) return ret; } +static int vega20_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed) +{ + int ret; + + ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm); + + if (ret) { + pr_err("Attempt to get current RPM from SMC Failed!\n"); + return ret; + } + + smu_read_smc_arg(smu, speed); + + return 0; +} + static int vega20_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) { @@ -3022,7 +3038,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu, uint32_t current_rpm = 0, percent = 0; PPTable_t *pptable = smu->smu_table.driver_pptable; - ret = smu_get_current_rpm(smu, ¤t_rpm); + ret = vega20_get_fan_speed_rpm(smu, ¤t_rpm); if (ret) return ret; @@ -3217,35 +3233,24 @@ static int vega20_set_watermarks_table(struct smu_context *smu, return 0; } -static const struct smu_temperature_range vega20_thermal_policy[] = -{ - {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, - { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, -}; - static int vega20_get_thermal_temperature_range(struct smu_context *smu, struct smu_temperature_range *range) { - + struct smu_table_context *table_context = &smu->smu_table; + ATOM_Vega20_POWERPLAYTABLE *powerplay_table = table_context->power_play_table; PPTable_t *pptable = smu->smu_table.driver_pptable; - if (!range) + if (!range || !powerplay_table) return -EINVAL; - memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range)); - - range->max = pptable->TedgeLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_crit_max = pptable->ThotspotLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_crit_max = pptable->ThbmLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)* - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + /* The unit is temperature */ + range->min = 0; + range->max = powerplay_table->usSoftwareShutdownTemp; + range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE); + range->hotspot_crit_max = pptable->ThotspotLimit; + range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT); + range->mem_crit_max = pptable->ThbmLimit; + range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM); return 0; @@ -3293,6 +3298,7 @@ static const struct pptable_funcs vega20_ppt_funcs = { .is_dpm_running = vega20_is_dpm_running, .set_thermal_fan_table = vega20_set_thermal_fan_table, .get_fan_speed_percent = vega20_get_fan_speed_percent, + .get_fan_speed_rpm = vega20_get_fan_speed_rpm, .set_watermarks_table = vega20_set_watermarks_table, .get_thermal_temperature_range = vega20_get_thermal_temperature_range }; diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index a6eec908c43e..1cc9f502c1f2 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC config DRM_LVDS_ENCODER tristate "Transparent parallel to LVDS encoder support" depends on OF + select DRM_KMS_HELPER select DRM_PANEL_BRIDGE help Support for transparent parallel to LVDS encoders that don't require @@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024 config DRM_TOSHIBA_TC358764 tristate "TC358764 DSI/LVDS bridge" - depends on DRM && DRM_PANEL depends on OF select DRM_MIPI_DSI + select DRM_KMS_HELPER + select DRM_PANEL help Toshiba TC358764 DSI/LVDS bridge driver. diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 56d36779d213..c8922b7cac09 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -859,7 +859,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation) * simple XOR between the two handle the addition nicely. */ cmdline = &connector->cmdline_mode; - if (cmdline->specified) { + if (cmdline->specified && cmdline->rotation_reflection) { unsigned int cmdline_rest, panel_rest; unsigned int cmdline_rot, panel_rot; unsigned int sum_rot, sum_rest; diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 0b72468e8131..57564318ceea 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -835,7 +835,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb) struct drm_device *dev = fb->dev; struct drm_atomic_state *state; struct drm_plane *plane; - struct drm_connector *conn; + struct drm_connector *conn __maybe_unused; struct drm_connector_state *conn_state; int i, ret; unsigned plane_mask; diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 246743d18130..226a1d0720cf 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1686,7 +1686,7 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len, * * Additionals options can be provided following the mode, using a comma to * separate each option. Valid options can be found in - * Documentation/fb/modedb.txt. + * Documentation/fb/modedb.rst. * * The intermediate drm_cmdline_mode structure is required to store additional * options from the command line modline like the force-enable/disable flag. diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 60ce4a8ad9e1..6f7d3b3b3628 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -2,6 +2,7 @@ config DRM_EXYNOS tristate "DRM Support for Samsung SoC EXYNOS Series" depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST) + depends on MMU select DRM_KMS_HELPER select VIDEOMODE_HELPERS select SND_SOC_HDMI_CODEC if SND_SOC diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index a594ab7be2c0..164d914cbe9a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -44,7 +44,7 @@ static unsigned int fimc_mask = 0xc; module_param_named(fimc_devs, fimc_mask, uint, 0644); MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM"); -#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev)) +#define get_fimc_context(dev) dev_get_drvdata(dev) enum { FIMC_CLK_LCLK, diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 50904eee96f7..2a3382d43bc9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -267,7 +267,7 @@ static inline void g2d_hw_reset(struct g2d_data *g2d) static int g2d_init_cmdlist(struct g2d_data *g2d) { struct device *dev = g2d->dev; - struct g2d_cmdlist_node *node = g2d->cmdlist_node; + struct g2d_cmdlist_node *node; int nr; int ret; struct g2d_buf_info *buf_info; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 1e4b21c49a06..1c524db9570f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -58,7 +58,7 @@ #define GSC_COEF_DEPTH 3 #define GSC_AUTOSUSPEND_DELAY 2000 -#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev)) +#define get_gsc_context(dev) dev_get_drvdata(dev) #define gsc_read(offset) readl(ctx->regs + (offset)) #define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset)) diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index 9af096479e1c..b24ba948b725 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -94,12 +94,12 @@ static inline int scaler_reset(struct scaler_context *scaler) scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG); do { cpu_relax(); - } while (retry > 1 && + } while (--retry > 1 && scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET); do { cpu_relax(); scaler_write(1, SCALER_INT_EN); - } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1); + } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1); return retry ? 0 : -EIO; } diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 8d922bb4d953..87a38c6aaa41 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -7,6 +7,7 @@ config DRM_I915_WERROR # We use the dependency on !COMPILE_TEST to not be enabled in # allmodconfig or allyesconfig configurations depends on !COMPILE_TEST + select HEADER_TEST default n help Add -Werror to the build flags for (and only for) i915.ko. @@ -94,6 +95,20 @@ config DRM_I915_TRACE_GEM If in doubt, say "N". +config DRM_I915_TRACE_GTT + bool "Insert extra ftrace output from the GTT internals" + depends on DRM_I915_DEBUG_GEM + select TRACING + default n + help + Enable additional and verbose debugging output that will spam + ordinary tests, but may be vital for post-mortem debugging when + used with /proc/sys/kernel/ftrace_dump_on_oops + + Recommended for driver developers only. + + If in doubt, say "N". + config DRM_I915_SW_FENCE_DEBUG_OBJECTS bool "Enable additional driver debugging for fence objects" depends on DRM_I915 diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 91355c2ea8a5..331b19cc8247 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -16,7 +16,6 @@ subdir-ccflags-y := -Wall -Wextra subdir-ccflags-y += $(call cc-disable-warning, unused-parameter) subdir-ccflags-y += $(call cc-disable-warning, type-limits) subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) -subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough) subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) # clang warnings subdir-ccflags-y += $(call cc-disable-warning, sign-compare) @@ -33,9 +32,9 @@ subdir-ccflags-y += \ $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA) # Extra header tests -include $(src)/Makefile.header-test +header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h -subdir-ccflags-y += -I$(src) +subdir-ccflags-y += -I$(srctree)/$(src) # Please keep these build lists sorted! @@ -74,14 +73,23 @@ gt-y += \ gt/intel_context.o \ gt/intel_engine_cs.o \ gt/intel_engine_pm.o \ + gt/intel_gt.o \ gt/intel_gt_pm.o \ gt/intel_hangcheck.o \ gt/intel_lrc.o \ + gt/intel_renderstate.o \ gt/intel_reset.o \ gt/intel_ringbuffer.o \ gt/intel_mocs.o \ gt/intel_sseu.o \ + gt/intel_timeline.o \ gt/intel_workarounds.o +# autogenerated null render state +gt-y += \ + gt/gen6_renderstate.o \ + gt/gen7_renderstate.o \ + gt/gen8_renderstate.o \ + gt/gen9_renderstate.o gt-$(CONFIG_DRM_I915_SELFTEST) += \ gt/mock_engine.o i915-y += $(gt-y) @@ -121,33 +129,26 @@ i915-y += \ i915_gem_fence_reg.o \ i915_gem_gtt.o \ i915_gem.o \ - i915_gem_render_state.o \ i915_globals.o \ i915_query.o \ i915_request.o \ i915_scheduler.o \ - i915_timeline.o \ i915_trace_points.o \ i915_vma.o \ intel_wopcm.o # general-purpose microcontroller (GuC) support -i915-y += intel_uc.o \ - intel_uc_fw.o \ - intel_guc.o \ - intel_guc_ads.o \ - intel_guc_ct.o \ - intel_guc_fw.o \ - intel_guc_log.o \ - intel_guc_submission.o \ - intel_huc.o \ - intel_huc_fw.o - -# autogenerated null render state -i915-y += intel_renderstate_gen6.o \ - intel_renderstate_gen7.o \ - intel_renderstate_gen8.o \ - intel_renderstate_gen9.o +obj-y += gt/uc/ +i915-y += gt/uc/intel_uc.o \ + gt/uc/intel_uc_fw.o \ + gt/uc/intel_guc.o \ + gt/uc/intel_guc_ads.o \ + gt/uc/intel_guc_ct.o \ + gt/uc/intel_guc_fw.o \ + gt/uc/intel_guc_log.o \ + gt/uc/intel_guc_submission.o \ + gt/uc/intel_huc.o \ + gt/uc/intel_huc_fw.o # modesetting core code obj-y += display/ @@ -174,7 +175,8 @@ i915-y += \ display/intel_overlay.o \ display/intel_psr.o \ display/intel_quirks.o \ - display/intel_sprite.o + display/intel_sprite.o \ + display/intel_tc.o i915-$(CONFIG_ACPI) += \ display/intel_acpi.o \ display/intel_opregion.o @@ -211,6 +213,25 @@ i915-y += \ display/vlv_dsi.o \ display/vlv_dsi_pll.o +# perf code +obj-y += oa/ +i915-y += \ + oa/i915_oa_hsw.o \ + oa/i915_oa_bdw.o \ + oa/i915_oa_chv.o \ + oa/i915_oa_sklgt2.o \ + oa/i915_oa_sklgt3.o \ + oa/i915_oa_sklgt4.o \ + oa/i915_oa_bxt.o \ + oa/i915_oa_kblgt2.o \ + oa/i915_oa_kblgt3.o \ + oa/i915_oa_glk.o \ + oa/i915_oa_cflgt2.o \ + oa/i915_oa_cflgt3.o \ + oa/i915_oa_cnl.o \ + oa/i915_oa_icl.o +i915-y += i915_perf.o + # Post-mortem debug and GPU hang state capture i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o i915-$(CONFIG_DRM_I915_SELFTEST) += \ @@ -225,23 +246,6 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \ # virtual gpu code i915-y += i915_vgpu.o -# perf code -i915-y += i915_perf.o \ - i915_oa_hsw.o \ - i915_oa_bdw.o \ - i915_oa_chv.o \ - i915_oa_sklgt2.o \ - i915_oa_sklgt3.o \ - i915_oa_sklgt4.o \ - i915_oa_bxt.o \ - i915_oa_kblgt2.o \ - i915_oa_kblgt3.o \ - i915_oa_glk.o \ - i915_oa_cflgt2.o \ - i915_oa_cflgt3.o \ - i915_oa_cnl.o \ - i915_oa_icl.o - ifeq ($(CONFIG_DRM_I915_GVT),y) i915-y += intel_gvt.o include $(src)/gvt/Makefile diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test deleted file mode 100644 index 7cde0ec34615..000000000000 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ /dev/null @@ -1,22 +0,0 @@ -# SPDX-License-Identifier: MIT -# Copyright © 2019 Intel Corporation - -# Test the headers are compilable as standalone units -header-test-$(CONFIG_DRM_I915_WERROR) := \ - i915_active_types.h \ - i915_debugfs.h \ - i915_drv.h \ - i915_irq.h \ - i915_params.h \ - i915_priolist_types.h \ - i915_reg.h \ - i915_scheduler_types.h \ - i915_timeline_types.h \ - i915_utils.h \ - intel_csr.h \ - intel_drv.h \ - intel_pm.h \ - intel_runtime_pm.h \ - intel_sideband.h \ - intel_uncore.h \ - intel_wakeref.h diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile index 1c75b5c9790c..173c305d7866 100644 --- a/drivers/gpu/drm/i915/display/Makefile +++ b/drivers/gpu/drm/i915/display/Makefile @@ -1,2 +1,6 @@ +# For building individual subdir files on the command line +subdir-ccflags-y += -I$(srctree)/$(src)/.. + # Extra header tests -include $(src)/Makefile.header-test +header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h +header-test- := intel_vbt_defs.h diff --git a/drivers/gpu/drm/i915/display/Makefile.header-test b/drivers/gpu/drm/i915/display/Makefile.header-test deleted file mode 100644 index fc7d4e5bd2c6..000000000000 --- a/drivers/gpu/drm/i915/display/Makefile.header-test +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: MIT -# Copyright © 2019 Intel Corporation - -# Test the headers are compilable as standalone units -header_test := $(notdir $(filter-out %/intel_vbt_defs.h,$(wildcard $(src)/*.h))) - -quiet_cmd_header_test = HDRTEST $@ - cmd_header_test = echo "\#include \"$(<F)\"" > $@ - -header_test_%.c: %.h - $(call cmd,header_test) - -extra-$(CONFIG_DRM_I915_WERROR) += \ - $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h))) - -clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h))) diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 74448e6bf749..a42348be0438 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -202,63 +202,62 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - enum port port; + enum phy phy; u32 tmp; int lane; - for_each_dsi_port(port, intel_dsi->ports) { - + for_each_dsi_phy(phy, intel_dsi->phys) { /* * Program voltage swing and pre-emphasis level values as per * table in BSPEC under DDI buffer programing */ - tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); tmp |= SCALING_MODE_SEL(0x2); tmp |= TAP2_DISABLE | TAP3_DISABLE; tmp |= RTERM_SELECT(0x6); - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); + I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); + tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy)); tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); tmp |= SCALING_MODE_SEL(0x2); tmp |= TAP2_DISABLE | TAP3_DISABLE; tmp |= RTERM_SELECT(0x6); - I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); + I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port)); + tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy)); tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); tmp |= SWING_SEL_UPPER(0x2); tmp |= SWING_SEL_LOWER(0x2); tmp |= RCOMP_SCALAR(0x98); - I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp); + I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port)); + tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy)); tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); tmp |= SWING_SEL_UPPER(0x2); tmp |= SWING_SEL_LOWER(0x2); tmp |= RCOMP_SCALAR(0x98); - I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp); + I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port)); + tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy)); tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); tmp |= POST_CURSOR_1(0x0); tmp |= POST_CURSOR_2(0x0); tmp |= CURSOR_COEFF(0x3f); - I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp); + I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp); for (lane = 0; lane <= 3; lane++) { /* Bspec: must not use GRP register for write */ - tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port)); + tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy)); tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); tmp |= POST_CURSOR_1(0x0); tmp |= POST_CURSOR_2(0x0); tmp |= CURSOR_COEFF(0x3f); - I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp); + I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp); } } } @@ -364,10 +363,10 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - enum port port; + enum phy phy; - for_each_dsi_port(port, intel_dsi->ports) - intel_combo_phy_power_up_lanes(dev_priv, port, true, + for_each_dsi_phy(phy, intel_dsi->phys) + intel_combo_phy_power_up_lanes(dev_priv, phy, true, intel_dsi->lane_count, false); } @@ -375,34 +374,47 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - enum port port; + enum phy phy; u32 tmp; int lane; /* Step 4b(i) set loadgen select for transmit and aux lanes */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port)); + for_each_dsi_phy(phy, intel_dsi->phys) { + tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy)); tmp &= ~LOADGEN_SELECT; - I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp); + I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp); for (lane = 0; lane <= 3; lane++) { - tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port)); + tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy)); tmp &= ~LOADGEN_SELECT; if (lane != 2) tmp |= LOADGEN_SELECT; - I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp); + I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp); } } /* Step 4b(ii) set latency optimization for transmit and aux lanes */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port)); + for_each_dsi_phy(phy, intel_dsi->phys) { + tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy)); tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); - I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp); - tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port)); + I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp); + tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy)); tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); - I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp); + I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp); + + /* For EHL set latency optimization for PCS_DW1 lanes */ + if (IS_ELKHARTLAKE(dev_priv)) { + tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy)); + tmp &= ~LATENCY_OPTIM_MASK; + tmp |= LATENCY_OPTIM_VAL(0); + I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp); + + tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy)); + tmp &= ~LATENCY_OPTIM_MASK; + tmp |= LATENCY_OPTIM_VAL(0x1); + I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp); + } } } @@ -412,16 +424,16 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); u32 tmp; - enum port port; + enum phy phy; /* clear common keeper enable bit */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port)); + for_each_dsi_phy(phy, intel_dsi->phys) { + tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy)); tmp &= ~COMMON_KEEPER_EN; - I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp); - tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port)); + I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp); + tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy)); tmp &= ~COMMON_KEEPER_EN; - I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp); + I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp); } /* @@ -429,33 +441,33 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) * Note: loadgen select program is done * as part of lane phy sequence configuration */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_PORT_CL_DW5(port)); + for_each_dsi_phy(phy, intel_dsi->phys) { + tmp = I915_READ(ICL_PORT_CL_DW5(phy)); tmp |= SUS_CLOCK_CONFIG; - I915_WRITE(ICL_PORT_CL_DW5(port), tmp); + I915_WRITE(ICL_PORT_CL_DW5(phy), tmp); } /* Clear training enable to change swing values */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + for_each_dsi_phy(phy, intel_dsi->phys) { + tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); tmp &= ~TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); - tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); + I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp); + tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy)); tmp &= ~TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); + I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp); } /* Program swing and de-emphasis */ dsi_program_swing_and_deemphasis(encoder); /* Set training enable to trigger update */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + for_each_dsi_phy(phy, intel_dsi->phys) { + tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); tmp |= TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); - tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); + I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp); + tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy)); tmp |= TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); + I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp); } } @@ -484,6 +496,7 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder) struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); u32 tmp; enum port port; + enum phy phy; /* Program T-INIT master registers */ for_each_dsi_port(port, intel_dsi->ports) { @@ -531,6 +544,14 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder) I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp); } } + + if (IS_ELKHARTLAKE(dev_priv)) { + for_each_dsi_phy(phy, intel_dsi->phys) { + tmp = I915_READ(ICL_DPHY_CHKN(phy)); + tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP; + I915_WRITE(ICL_DPHY_CHKN(phy), tmp); + } + } } static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) @@ -538,15 +559,14 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); u32 tmp; - enum port port; + enum phy phy; mutex_lock(&dev_priv->dpll_lock); - tmp = I915_READ(DPCLKA_CFGCR0_ICL); - for_each_dsi_port(port, intel_dsi->ports) { - tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port); - } + tmp = I915_READ(ICL_DPCLKA_CFGCR0); + for_each_dsi_phy(phy, intel_dsi->phys) + tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - I915_WRITE(DPCLKA_CFGCR0_ICL, tmp); + I915_WRITE(ICL_DPCLKA_CFGCR0, tmp); mutex_unlock(&dev_priv->dpll_lock); } @@ -555,15 +575,14 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); u32 tmp; - enum port port; + enum phy phy; mutex_lock(&dev_priv->dpll_lock); - tmp = I915_READ(DPCLKA_CFGCR0_ICL); - for_each_dsi_port(port, intel_dsi->ports) { - tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); - } + tmp = I915_READ(ICL_DPCLKA_CFGCR0); + for_each_dsi_phy(phy, intel_dsi->phys) + tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - I915_WRITE(DPCLKA_CFGCR0_ICL, tmp); + I915_WRITE(ICL_DPCLKA_CFGCR0, tmp); mutex_unlock(&dev_priv->dpll_lock); } @@ -573,24 +592,24 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_shared_dpll *pll = crtc_state->shared_dpll; - enum port port; + enum phy phy; u32 val; mutex_lock(&dev_priv->dpll_lock); - val = I915_READ(DPCLKA_CFGCR0_ICL); - for_each_dsi_port(port, intel_dsi->ports) { - val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); - val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); + val = I915_READ(ICL_DPCLKA_CFGCR0); + for_each_dsi_phy(phy, intel_dsi->phys) { + val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); + val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); } - I915_WRITE(DPCLKA_CFGCR0_ICL, val); + I915_WRITE(ICL_DPCLKA_CFGCR0, val); - for_each_dsi_port(port, intel_dsi->ports) { - val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); + for_each_dsi_phy(phy, intel_dsi->phys) { + val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } - I915_WRITE(DPCLKA_CFGCR0_ICL, val); + I915_WRITE(ICL_DPCLKA_CFGCR0, val); - POSTING_READ(DPCLKA_CFGCR0_ICL); + POSTING_READ(ICL_DPCLKA_CFGCR0); mutex_unlock(&dev_priv->dpll_lock); } @@ -744,7 +763,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, enum transcoder dsi_trans; /* horizontal timings */ u16 htotal, hactive, hsync_start, hsync_end, hsync_size; - u16 hfront_porch, hback_porch; + u16 hback_porch; /* vertical timings */ u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift; @@ -753,8 +772,6 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, hsync_start = adjusted_mode->crtc_hsync_start; hsync_end = adjusted_mode->crtc_hsync_end; hsync_size = hsync_end - hsync_start; - hfront_porch = (adjusted_mode->crtc_hsync_start - - adjusted_mode->crtc_hdisplay); hback_porch = (adjusted_mode->crtc_htotal - adjusted_mode->crtc_hsync_end); vactive = adjusted_mode->crtc_vdisplay; @@ -1487,6 +1504,26 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) intel_dsi_log_params(intel_dsi); } +static void icl_dsi_add_properties(struct intel_connector *connector) +{ + u32 allowed_scalers; + + allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | + BIT(DRM_MODE_SCALE_FULLSCREEN) | + BIT(DRM_MODE_SCALE_CENTER); + + drm_connector_attach_scaling_mode_property(&connector->base, + allowed_scalers); + + connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT; + + connector->base.display_info.panel_orientation = + intel_dsi_get_panel_orientation(connector); + drm_connector_init_panel_orientation_property(&connector->base, + connector->panel.fixed_mode->hdisplay, + connector->panel.fixed_mode->vdisplay); +} + void icl_dsi_init(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; @@ -1580,6 +1617,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) } icl_dphy_param_init(intel_dsi); + + icl_dsi_add_properties(intel_connector); return; err: diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 30bd4e76fff9..ab411d5e093c 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -176,33 +176,49 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ new_crtc_state->data_rate[plane->id] = intel_plane_data_rate(new_crtc_state, new_plane_state); - return intel_plane_atomic_calc_changes(old_crtc_state, - &new_crtc_state->base, - old_plane_state, - &new_plane_state->base); + return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state, + old_plane_state, new_plane_state); } -static int intel_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *new_plane_state) +static struct intel_crtc * +get_crtc_from_states(const struct intel_plane_state *old_plane_state, + const struct intel_plane_state *new_plane_state) { - struct drm_atomic_state *state = new_plane_state->state; - const struct drm_plane_state *old_plane_state = - drm_atomic_get_old_plane_state(state, plane); - struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc; - const struct drm_crtc_state *old_crtc_state; - struct drm_crtc_state *new_crtc_state; - - new_plane_state->visible = false; + if (new_plane_state->base.crtc) + return to_intel_crtc(new_plane_state->base.crtc); + + if (old_plane_state->base.crtc) + return to_intel_crtc(old_plane_state->base.crtc); + + return NULL; +} + +static int intel_plane_atomic_check(struct drm_plane *_plane, + struct drm_plane_state *_new_plane_state) +{ + struct intel_plane *plane = to_intel_plane(_plane); + struct intel_atomic_state *state = + to_intel_atomic_state(_new_plane_state->state); + struct intel_plane_state *new_plane_state = + to_intel_plane_state(_new_plane_state); + const struct intel_plane_state *old_plane_state = + intel_atomic_get_old_plane_state(state, plane); + struct intel_crtc *crtc = + get_crtc_from_states(old_plane_state, new_plane_state); + const struct intel_crtc_state *old_crtc_state; + struct intel_crtc_state *new_crtc_state; + + new_plane_state->base.visible = false; if (!crtc) return 0; - old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); - new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); + new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state), - to_intel_crtc_state(new_crtc_state), - to_intel_plane_state(old_plane_state), - to_intel_plane_state(new_plane_state)); + return intel_plane_atomic_check_with_state(old_crtc_state, + new_crtc_state, + old_plane_state, + new_plane_state); } static struct intel_plane * diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index 1437a8797e10..cb7ef4f9eafd 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -8,7 +8,6 @@ #include <linux/types.h> -struct drm_crtc_state; struct drm_plane; struct drm_property; struct intel_atomic_state; @@ -43,8 +42,8 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ const struct intel_plane_state *old_plane_state, struct intel_plane_state *intel_state); int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, - struct drm_crtc_state *crtc_state, + struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, - struct drm_plane_state *plane_state); + struct intel_plane_state *plane_state); #endif /* __INTEL_ATOMIC_PLANE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 840daff12246..c8fd35a7ca42 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -72,6 +72,13 @@ struct dp_aud_n_m { u16 n; }; +struct hdmi_aud_ncts { + int sample_rate; + int clock; + int n; + int cts; +}; + /* Values according to DP 1.4 Table 2-104 */ static const struct dp_aud_n_m dp_aud_n_m[] = { { 32000, LC_162M, 1024, 10125 }, @@ -148,12 +155,7 @@ static const struct { #define TMDS_594M 594000 #define TMDS_593M 593407 -static const struct { - int sample_rate; - int clock; - int n; - int cts; -} hdmi_aud_ncts[] = { +static const struct hdmi_aud_ncts hdmi_aud_ncts_24bpp[] = { { 32000, TMDS_296M, 5824, 421875 }, { 32000, TMDS_297M, 3072, 222750 }, { 32000, TMDS_593M, 5824, 843750 }, @@ -184,6 +186,49 @@ static const struct { { 192000, TMDS_594M, 24576, 594000 }, }; +/* Appendix C - N & CTS values for deep color from HDMI 2.0 spec*/ +/* HDMI N/CTS table for 10 bit deep color(30 bpp)*/ +#define TMDS_371M 371250 +#define TMDS_370M 370878 + +static const struct hdmi_aud_ncts hdmi_aud_ncts_30bpp[] = { + { 32000, TMDS_370M, 5824, 527344 }, + { 32000, TMDS_371M, 6144, 556875 }, + { 44100, TMDS_370M, 8918, 585938 }, + { 44100, TMDS_371M, 4704, 309375 }, + { 88200, TMDS_370M, 17836, 585938 }, + { 88200, TMDS_371M, 9408, 309375 }, + { 176400, TMDS_370M, 35672, 585938 }, + { 176400, TMDS_371M, 18816, 309375 }, + { 48000, TMDS_370M, 11648, 703125 }, + { 48000, TMDS_371M, 5120, 309375 }, + { 96000, TMDS_370M, 23296, 703125 }, + { 96000, TMDS_371M, 10240, 309375 }, + { 192000, TMDS_370M, 46592, 703125 }, + { 192000, TMDS_371M, 20480, 309375 }, +}; + +/* HDMI N/CTS table for 12 bit deep color(36 bpp)*/ +#define TMDS_445_5M 445500 +#define TMDS_445M 445054 + +static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = { + { 32000, TMDS_445M, 5824, 632813 }, + { 32000, TMDS_445_5M, 4096, 445500 }, + { 44100, TMDS_445M, 8918, 703125 }, + { 44100, TMDS_445_5M, 4704, 371250 }, + { 88200, TMDS_445M, 17836, 703125 }, + { 88200, TMDS_445_5M, 9408, 371250 }, + { 176400, TMDS_445M, 35672, 703125 }, + { 176400, TMDS_445_5M, 18816, 371250 }, + { 48000, TMDS_445M, 5824, 421875 }, + { 48000, TMDS_445_5M, 5120, 371250 }, + { 96000, TMDS_445M, 11648, 421875 }, + { 96000, TMDS_445_5M, 10240, 371250 }, + { 192000, TMDS_445M, 23296, 421875 }, + { 192000, TMDS_445_5M, 20480, 371250 }, +}; + /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state) { @@ -212,14 +257,24 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state, int rate) { - const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; - int i; + const struct hdmi_aud_ncts *hdmi_ncts_table; + int i, size; + + if (crtc_state->pipe_bpp == 36) { + hdmi_ncts_table = hdmi_aud_ncts_36bpp; + size = ARRAY_SIZE(hdmi_aud_ncts_36bpp); + } else if (crtc_state->pipe_bpp == 30) { + hdmi_ncts_table = hdmi_aud_ncts_30bpp; + size = ARRAY_SIZE(hdmi_aud_ncts_30bpp); + } else { + hdmi_ncts_table = hdmi_aud_ncts_24bpp; + size = ARRAY_SIZE(hdmi_aud_ncts_24bpp); + } - for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) { - if (rate == hdmi_aud_ncts[i].sample_rate && - adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) { - return hdmi_aud_ncts[i].n; + for (i = 0; i < size; i++) { + if (rate == hdmi_ncts_table[i].sample_rate && + crtc_state->port_clock == hdmi_ncts_table[i].clock) { + return hdmi_ncts_table[i].n; } } return 0; diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index c4710889cb32..b416b394b641 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -28,6 +28,7 @@ #include <drm/drm_dp_helper.h> #include <drm/i915_drm.h> +#include "display/intel_display.h" #include "display/intel_gmbus.h" #include "i915_drv.h" @@ -765,7 +766,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) } if (bdb->version >= 226) { - u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time; + u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time; wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3; switch (wakeup_time) { @@ -1354,12 +1355,27 @@ static const u8 mcc_ddc_pin_map[] = { [MCC_DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP, }; +static const u8 tgp_ddc_pin_map[] = { + [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, + [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, + [TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT, + [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP, + [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP, + [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP, + [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP, + [TGL_DDC_BUS_PORT_5] = GMBUS_PIN_13_TC5_TGP, + [TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP, +}; + static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) { const u8 *ddc_pin_map; int n_entries; - if (HAS_PCH_MCC(dev_priv)) { + if (HAS_PCH_TGP(dev_priv)) { + ddc_pin_map = tgp_ddc_pin_map; + n_entries = ARRAY_SIZE(tgp_ddc_pin_map); + } else if (HAS_PCH_MCC(dev_priv)) { ddc_pin_map = mcc_ddc_pin_map; n_entries = ARRAY_SIZE(mcc_ddc_pin_map); } else if (HAS_PCH_ICP(dev_priv)) { @@ -1668,6 +1684,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv, if (!child->device_type) continue; + DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n", + child->device_type); + /* * Copy as much as we know (sizeof) and is available * (child_dev_size) of the child device. Accessing the data must @@ -1730,12 +1749,13 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv) for (port = PORT_A; port < I915_MAX_PORTS; port++) { struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; + enum phy phy = intel_port_to_phy(dev_priv, port); /* * VBT has the TypeC mode (native,TBT/USB) and we don't want * to detect it. */ - if (intel_port_is_tc(dev_priv, port)) + if (intel_phy_is_tc(dev_priv, phy)) continue; info->supports_dvi = (port != PORT_A && port != PORT_E); @@ -1888,10 +1908,10 @@ out: } /** - * intel_bios_cleanup - Free any resources allocated by intel_bios_init() + * intel_bios_driver_remove - Free any resources allocated by intel_bios_init() * @dev_priv: i915 device instance */ -void intel_bios_cleanup(struct drm_i915_private *dev_priv) +void intel_bios_driver_remove(struct drm_i915_private *dev_priv) { kfree(dev_priv->vbt.child_dev); dev_priv->vbt.child_dev = NULL; diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index 4e42cfaf61a7..4969189e620f 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -42,6 +42,7 @@ enum intel_backlight_type { INTEL_BACKLIGHT_DISPLAY_DDI, INTEL_BACKLIGHT_DSI_DCS, INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE, + INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE, }; struct edp_power_seq { @@ -227,7 +228,7 @@ struct mipi_pps_data { } __packed; void intel_bios_init(struct drm_i915_private *dev_priv); -void intel_bios_cleanup(struct drm_i915_private *dev_priv); +void intel_bios_driver_remove(struct drm_i915_private *dev_priv); bool intel_bios_is_valid_vbt(const void *buf, size_t size); bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 753ac3165061..ee52c5b4643b 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -65,7 +65,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, struct intel_qgv_point *sp, int point) { - u32 val = 0, val2; + u32 val = 0, val2 = 0; int ret; ret = sandybridge_pcode_read(dev_priv, @@ -178,6 +178,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv) clpchgroup = (sa->deburst * deinterleave / num_channels) << i; bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; + bi->num_qgv_points = qi.num_points; + for (j = 0; j < qi.num_points; j++) { const struct intel_qgv_point *sp = &qi.points[j]; int ct, bw; @@ -195,7 +197,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv) bi->deratedbw[j] = min(maxdebw, bw * 9 / 10); /* 90% */ - DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n", + DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n", i, j, bi->num_planes, bi->deratedbw[j]); } @@ -211,14 +213,17 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, { int i; - /* Did we initialize the bw limits successfully? */ - if (dev_priv->max_bw[0].num_planes == 0) - return UINT_MAX; - for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { const struct intel_bw_info *bi = &dev_priv->max_bw[i]; + /* + * Pcode will not expose all QGV points when + * SAGV is forced to off/min/med/max. + */ + if (qgv_point >= bi->num_qgv_points) + return UINT_MAX; + if (num_planes >= bi->num_planes) return bi->deratedbw[qgv_point]; } diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 8993ab283562..93b0d190c184 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -545,10 +545,10 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, /* There are cases where we can end up here with power domains * off and a CDCLK frequency other than the minimum, like when * issuing a modeset without actually changing any display after - * a system suspend. So grab the PIPE-A domain, which covers + * a system suspend. So grab the display core domain, which covers * the HW blocks needed for the following programming. */ - wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_CCK) | @@ -606,7 +606,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, vlv_program_pfi_credits(dev_priv); - intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref); + intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); } static void chv_set_cdclk(struct drm_i915_private *dev_priv, @@ -631,10 +631,10 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, /* There are cases where we can end up here with power domains * off and a CDCLK frequency other than the minimum, like when * issuing a modeset without actually changing any display after - * a system suspend. So grab the PIPE-A domain, which covers + * a system suspend. So grab the display core domain, which covers * the HW blocks needed for the following programming. */ - wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); vlv_punit_get(dev_priv); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); @@ -653,7 +653,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, vlv_program_pfi_credits(dev_priv); - intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref); + intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); } static int bdw_calc_cdclk(int min_cdclk) @@ -1756,9 +1756,10 @@ sanitize: static int icl_calc_cdclk(int min_cdclk, unsigned int ref) { - int ranges_24[] = { 312000, 552000, 648000 }; - int ranges_19_38[] = { 307200, 556800, 652800 }; - int *ranges; + static const int ranges_24[] = { 180000, 192000, 312000, 552000, 648000 }; + static const int ranges_19_38[] = { 172800, 192000, 307200, 556800, 652800 }; + const int *ranges; + int len, i; switch (ref) { default: @@ -1766,19 +1767,22 @@ static int icl_calc_cdclk(int min_cdclk, unsigned int ref) /* fall through */ case 24000: ranges = ranges_24; + len = ARRAY_SIZE(ranges_24); break; case 19200: case 38400: ranges = ranges_19_38; + len = ARRAY_SIZE(ranges_19_38); break; } - if (min_cdclk > ranges[1]) - return ranges[2]; - else if (min_cdclk > ranges[0]) - return ranges[1]; - else - return ranges[0]; + for (i = 0; i < len; i++) { + if (min_cdclk <= ranges[i]) + return ranges[i]; + } + + WARN_ON(min_cdclk > ranges[len - 1]); + return ranges[len - 1]; } static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) @@ -1792,16 +1796,24 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) default: MISSING_CASE(cdclk); /* fall through */ + case 172800: case 307200: case 556800: case 652800: WARN_ON(dev_priv->cdclk.hw.ref != 19200 && dev_priv->cdclk.hw.ref != 38400); break; + case 180000: case 312000: case 552000: case 648000: WARN_ON(dev_priv->cdclk.hw.ref != 24000); + break; + case 192000: + WARN_ON(dev_priv->cdclk.hw.ref != 19200 && + dev_priv->cdclk.hw.ref != 38400 && + dev_priv->cdclk.hw.ref != 24000); + break; } ratio = cdclk / (dev_priv->cdclk.hw.ref / 2); @@ -1854,14 +1866,23 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv, dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level; } -static u8 icl_calc_voltage_level(int cdclk) +static u8 icl_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) { - if (cdclk > 556800) - return 2; - else if (cdclk > 312000) - return 1; - else - return 0; + if (IS_ELKHARTLAKE(dev_priv)) { + if (cdclk > 312000) + return 2; + else if (cdclk > 180000) + return 1; + else + return 0; + } else { + if (cdclk > 556800) + return 2; + else if (cdclk > 312000) + return 1; + else + return 0; + } } static void icl_get_cdclk(struct drm_i915_private *dev_priv, @@ -1912,7 +1933,7 @@ out: * at least what the CDCLK frequency requires. */ cdclk_state->voltage_level = - icl_calc_voltage_level(cdclk_state->cdclk); + icl_calc_voltage_level(dev_priv, cdclk_state->cdclk); } static void icl_init_cdclk(struct drm_i915_private *dev_priv) @@ -1947,7 +1968,8 @@ sanitize: sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv, sanitized_state.cdclk); sanitized_state.voltage_level = - icl_calc_voltage_level(sanitized_state.cdclk); + icl_calc_voltage_level(dev_priv, + sanitized_state.cdclk); icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE); } @@ -1958,7 +1980,8 @@ static void icl_uninit_cdclk(struct drm_i915_private *dev_priv) cdclk_state.cdclk = cdclk_state.bypass; cdclk_state.vco = 0; - cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk); + cdclk_state.voltage_level = icl_calc_voltage_level(dev_priv, + cdclk_state.cdclk); icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } @@ -2240,6 +2263,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) min_cdclk = max(2 * 96000, min_cdclk); /* + * "For DP audio configuration, cdclk frequency shall be set to + * meet the following requirements: + * DP Link Frequency(MHz) | Cdclk frequency(MHz) + * 270 | 320 or higher + * 162 | 200 or higher" + */ + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio) + min_cdclk = max(crtc_state->port_clock, min_cdclk); + + /* * On Valleyview some DSI panels lose (v|h)sync when the clock is lower * than 320000KHz. */ @@ -2549,7 +2583,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.logical.vco = vco; state->cdclk.logical.cdclk = cdclk; state->cdclk.logical.voltage_level = - max(icl_calc_voltage_level(cdclk), + max(icl_calc_voltage_level(dev_priv, cdclk), cnl_compute_min_voltage_level(state)); if (!state->active_crtcs) { @@ -2559,7 +2593,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.actual.vco = vco; state->cdclk.actual.cdclk = cdclk; state->cdclk.actual.voltage_level = - icl_calc_voltage_level(cdclk); + icl_calc_voltage_level(dev_priv, cdclk); } else { state->cdclk.actual = state->cdclk.logical; } @@ -2594,7 +2628,12 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) */ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) { - if (INTEL_GEN(dev_priv) >= 11) { + if (IS_ELKHARTLAKE(dev_priv)) { + if (dev_priv->cdclk.hw.ref == 24000) + dev_priv->max_cdclk_freq = 552000; + else + dev_priv->max_cdclk_freq = 556800; + } else if (INTEL_GEN(dev_priv) >= 11) { if (dev_priv->cdclk.hw.ref == 24000) dev_priv->max_cdclk_freq = 648000; else diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 841708da5a56..ac8218a040ab 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -6,13 +6,13 @@ #include "intel_combo_phy.h" #include "intel_drv.h" -#define for_each_combo_port(__dev_priv, __port) \ - for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ - for_each_if(intel_port_is_combophy(__dev_priv, __port)) +#define for_each_combo_phy(__dev_priv, __phy) \ + for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \ + for_each_if(intel_phy_is_combo(__dev_priv, __phy)) -#define for_each_combo_port_reverse(__dev_priv, __port) \ - for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \ - for_each_if(intel_port_is_combophy(__dev_priv, __port)) +#define for_each_combo_phy_reverse(__dev_priv, __phy) \ + for ((__phy) = I915_MAX_PHYS; (__phy)-- > PHY_A;) \ + for_each_if(intel_phy_is_combo(__dev_priv, __phy)) enum { PROCMON_0_85V_DOT_0, @@ -38,18 +38,17 @@ static const struct cnl_procmon { }; /* - * CNL has just one set of registers, while ICL has two sets: one for port A and - * the other for port B. The CNL registers are equivalent to the ICL port A - * registers, that's why we call the ICL macros even though the function has CNL - * on its name. + * CNL has just one set of registers, while gen11 has a set for each combo PHY. + * The CNL registers are equivalent to the gen11 PHY A registers, that's why we + * call the ICL macros even though the function has CNL on its name. */ static const struct cnl_procmon * -cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port) +cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy) { const struct cnl_procmon *procmon; u32 val; - val = I915_READ(ICL_PORT_COMP_DW3(port)); + val = I915_READ(ICL_PORT_COMP_DW3(phy)); switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { default: MISSING_CASE(val); @@ -75,32 +74,32 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port) } static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv, - enum port port) + enum phy phy) { const struct cnl_procmon *procmon; u32 val; - procmon = cnl_get_procmon_ref_values(dev_priv, port); + procmon = cnl_get_procmon_ref_values(dev_priv, phy); - val = I915_READ(ICL_PORT_COMP_DW1(port)); + val = I915_READ(ICL_PORT_COMP_DW1(phy)); val &= ~((0xff << 16) | 0xff); val |= procmon->dw1; - I915_WRITE(ICL_PORT_COMP_DW1(port), val); + I915_WRITE(ICL_PORT_COMP_DW1(phy), val); - I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9); - I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10); + I915_WRITE(ICL_PORT_COMP_DW9(phy), procmon->dw9); + I915_WRITE(ICL_PORT_COMP_DW10(phy), procmon->dw10); } static bool check_phy_reg(struct drm_i915_private *dev_priv, - enum port port, i915_reg_t reg, u32 mask, + enum phy phy, i915_reg_t reg, u32 mask, u32 expected_val) { u32 val = I915_READ(reg); if ((val & mask) != expected_val) { - DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: " + DRM_DEBUG_DRIVER("Combo PHY %c reg %08x state mismatch: " "current %08x mask %08x expected %08x\n", - port_name(port), + phy_name(phy), reg.reg, val, mask, expected_val); return false; } @@ -109,18 +108,18 @@ static bool check_phy_reg(struct drm_i915_private *dev_priv, } static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv, - enum port port) + enum phy phy) { const struct cnl_procmon *procmon; bool ret; - procmon = cnl_get_procmon_ref_values(dev_priv, port); + procmon = cnl_get_procmon_ref_values(dev_priv, phy); - ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port), + ret = check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW1(phy), (0xff << 16) | 0xff, procmon->dw1); - ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port), + ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW9(phy), -1U, procmon->dw9); - ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port), + ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW10(phy), -1U, procmon->dw10); return ret; @@ -134,15 +133,15 @@ static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv) static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv) { - enum port port = PORT_A; + enum phy phy = PHY_A; bool ret; if (!cnl_combo_phy_enabled(dev_priv)) return false; - ret = cnl_verify_procmon_ref_values(dev_priv, port); + ret = cnl_verify_procmon_ref_values(dev_priv, phy); - ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5, + ret &= check_phy_reg(dev_priv, phy, CNL_PORT_CL1CM_DW5, CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE); return ret; @@ -157,7 +156,7 @@ static void cnl_combo_phys_init(struct drm_i915_private *dev_priv) I915_WRITE(CHICKEN_MISC_2, val); /* Dummy PORT_A to get the correct CNL register from the ICL macro */ - cnl_set_procmon_ref_values(dev_priv, PORT_A); + cnl_set_procmon_ref_values(dev_priv, PHY_A); val = I915_READ(CNL_PORT_COMP_DW0); val |= COMP_INIT; @@ -181,35 +180,39 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv) } static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv, - enum port port) + enum phy phy) { - return !(I915_READ(ICL_PHY_MISC(port)) & - ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) && - (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT); + /* The PHY C added by EHL has no PHY_MISC register */ + if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C) + return I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT; + else + return !(I915_READ(ICL_PHY_MISC(phy)) & + ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) && + (I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT); } static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, - enum port port) + enum phy phy) { bool ret; - if (!icl_combo_phy_enabled(dev_priv, port)) + if (!icl_combo_phy_enabled(dev_priv, phy)) return false; - ret = cnl_verify_procmon_ref_values(dev_priv, port); + ret = cnl_verify_procmon_ref_values(dev_priv, phy); - if (port == PORT_A) - ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW8(port), + if (phy == PHY_A) + ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy), IREFGEN, IREFGEN); - ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port), + ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy), CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE); return ret; } void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, - enum port port, bool is_dsi, + enum phy phy, bool is_dsi, int lane_count, bool lane_reversal) { u8 lane_mask; @@ -254,66 +257,120 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, } } - val = I915_READ(ICL_PORT_CL_DW10(port)); + val = I915_READ(ICL_PORT_CL_DW10(phy)); val &= ~PWR_DOWN_LN_MASK; val |= lane_mask << PWR_DOWN_LN_SHIFT; - I915_WRITE(ICL_PORT_CL_DW10(port), val); + I915_WRITE(ICL_PORT_CL_DW10(phy), val); +} + +static u32 ehl_combo_phy_a_mux(struct drm_i915_private *i915, u32 val) +{ + bool ddi_a_present = i915->vbt.ddi_port_info[PORT_A].child != NULL; + bool ddi_d_present = i915->vbt.ddi_port_info[PORT_D].child != NULL; + bool dsi_present = intel_bios_is_dsi_present(i915, NULL); + + /* + * VBT's 'dvo port' field for child devices references the DDI, not + * the PHY. So if combo PHY A is wired up to drive an external + * display, we should see a child device present on PORT_D and + * nothing on PORT_A and no DSI. + */ + if (ddi_d_present && !ddi_a_present && !dsi_present) + return val | ICL_PHY_MISC_MUX_DDID; + + /* + * If we encounter a VBT that claims to have an external display on + * DDI-D _and_ an internal display on DDI-A/DSI leave an error message + * in the log and let the internal display win. + */ + if (ddi_d_present) + DRM_ERROR("VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n"); + + return val & ~ICL_PHY_MISC_MUX_DDID; } static void icl_combo_phys_init(struct drm_i915_private *dev_priv) { - enum port port; + enum phy phy; - for_each_combo_port(dev_priv, port) { + for_each_combo_phy(dev_priv, phy) { u32 val; - if (icl_combo_phy_verify_state(dev_priv, port)) { - DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n", - port_name(port)); + if (icl_combo_phy_verify_state(dev_priv, phy)) { + DRM_DEBUG_DRIVER("Combo PHY %c already enabled, won't reprogram it.\n", + phy_name(phy)); continue; } - val = I915_READ(ICL_PHY_MISC(port)); + /* + * Although EHL adds a combo PHY C, there's no PHY_MISC + * register for it and no need to program the + * DE_IO_COMP_PWR_DOWN setting on PHY C. + */ + if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C) + goto skip_phy_misc; + + /* + * EHL's combo PHY A can be hooked up to either an external + * display (via DDI-D) or an internal display (via DDI-A or + * the DSI DPHY). This is a motherboard design decision that + * can't be changed on the fly, so initialize the PHY's mux + * based on whether our VBT indicates the presence of any + * "internal" child devices. + */ + val = I915_READ(ICL_PHY_MISC(phy)); + if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A) + val = ehl_combo_phy_a_mux(dev_priv, val); val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; - I915_WRITE(ICL_PHY_MISC(port), val); + I915_WRITE(ICL_PHY_MISC(phy), val); - cnl_set_procmon_ref_values(dev_priv, port); +skip_phy_misc: + cnl_set_procmon_ref_values(dev_priv, phy); - if (port == PORT_A) { - val = I915_READ(ICL_PORT_COMP_DW8(port)); + if (phy == PHY_A) { + val = I915_READ(ICL_PORT_COMP_DW8(phy)); val |= IREFGEN; - I915_WRITE(ICL_PORT_COMP_DW8(port), val); + I915_WRITE(ICL_PORT_COMP_DW8(phy), val); } - val = I915_READ(ICL_PORT_COMP_DW0(port)); + val = I915_READ(ICL_PORT_COMP_DW0(phy)); val |= COMP_INIT; - I915_WRITE(ICL_PORT_COMP_DW0(port), val); + I915_WRITE(ICL_PORT_COMP_DW0(phy), val); - val = I915_READ(ICL_PORT_CL_DW5(port)); + val = I915_READ(ICL_PORT_CL_DW5(phy)); val |= CL_POWER_DOWN_ENABLE; - I915_WRITE(ICL_PORT_CL_DW5(port), val); + I915_WRITE(ICL_PORT_CL_DW5(phy), val); } } static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv) { - enum port port; + enum phy phy; - for_each_combo_port_reverse(dev_priv, port) { + for_each_combo_phy_reverse(dev_priv, phy) { u32 val; - if (port == PORT_A && - !icl_combo_phy_verify_state(dev_priv, port)) - DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n", - port_name(port)); + if (phy == PHY_A && + !icl_combo_phy_verify_state(dev_priv, phy)) + DRM_WARN("Combo PHY %c HW state changed unexpectedly\n", + phy_name(phy)); + + /* + * Although EHL adds a combo PHY C, there's no PHY_MISC + * register for it and no need to program the + * DE_IO_COMP_PWR_DOWN setting on PHY C. + */ + if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C) + goto skip_phy_misc; - val = I915_READ(ICL_PHY_MISC(port)); + val = I915_READ(ICL_PHY_MISC(phy)); val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; - I915_WRITE(ICL_PHY_MISC(port), val); + I915_WRITE(ICL_PHY_MISC(phy), val); - val = I915_READ(ICL_PORT_COMP_DW0(port)); +skip_phy_misc: + val = I915_READ(ICL_PORT_COMP_DW0(phy)); val &= ~COMP_INIT; - I915_WRITE(ICL_PORT_COMP_DW0(port), val); + I915_WRITE(ICL_PORT_COMP_DW0(phy), val); } } diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.h b/drivers/gpu/drm/i915/display/intel_combo_phy.h index e6e195a83b19..660886f86c59 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.h +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.h @@ -7,14 +7,14 @@ #define __INTEL_COMBO_PHY_H__ #include <linux/types.h> -#include <drm/i915_drm.h> struct drm_i915_private; +enum phy; void intel_combo_phy_init(struct drm_i915_private *dev_priv); void intel_combo_phy_uninit(struct drm_i915_private *dev_priv); void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, - enum port port, bool is_dsi, + enum phy phy, bool is_dsi, int lane_count, bool lane_reversal); #endif /* __INTEL_COMBO_PHY_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 41310f8e5a2a..d0163d86c42a 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -118,7 +118,7 @@ int intel_connector_register(struct drm_connector *connector) if (ret) goto err; - if (i915_inject_load_failure()) { + if (i915_inject_probe_failure()) { ret = -EFAULT; goto err_backlight; } diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index dcb8b6dff91d..cf3c3fd7089f 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -45,6 +45,7 @@ #include "intel_lspcon.h" #include "intel_panel.h" #include "intel_psr.h" +#include "intel_tc.h" #include "intel_vdsc.h" struct ddi_buf_trans { @@ -846,8 +847,8 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) } static const struct cnl_ddi_buf_trans * -icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, - int type, int rate, int *n_entries) +icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, + int *n_entries) { if (type == INTEL_OUTPUT_HDMI) { *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); @@ -867,12 +868,13 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) { int n_entries, level, default_entry; + enum phy phy = intel_port_to_phy(dev_priv, port); level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; if (INTEL_GEN(dev_priv) >= 11) { - if (intel_port_is_combophy(dev_priv, port)) - icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI, + if (intel_phy_is_combo(dev_priv, phy)) + icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0, &n_entries); else n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); @@ -1486,9 +1488,10 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, port); int link_clock; - if (intel_port_is_combophy(dev_priv, port)) { + if (intel_phy_is_combo(dev_priv, phy)) { link_clock = cnl_calc_wrpll_link(dev_priv, pll_state); } else { enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv, @@ -1770,7 +1773,10 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */ temp = TRANS_DDI_FUNC_ENABLE; - temp |= TRANS_DDI_SELECT_PORT(port); + if (INTEL_GEN(dev_priv) >= 12) + temp |= TGL_TRANS_DDI_SELECT_PORT(port); + else + temp |= TRANS_DDI_SELECT_PORT(port); switch (crtc_state->pipe_bpp) { case 18: @@ -1850,8 +1856,13 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); u32 val = I915_READ(reg); - val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); - val |= TRANS_DDI_PORT_NONE; + if (INTEL_GEN(dev_priv) >= 12) { + val &= ~(TRANS_DDI_FUNC_ENABLE | TGL_TRANS_DDI_PORT_MASK | + TRANS_DDI_DP_VC_PAYLOAD_ALLOC); + } else { + val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | + TRANS_DDI_DP_VC_PAYLOAD_ALLOC); + } I915_WRITE(reg, val); if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME && @@ -2003,10 +2014,19 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, mst_pipe_mask = 0; for_each_pipe(dev_priv, p) { enum transcoder cpu_transcoder = (enum transcoder)p; + unsigned int port_mask, ddi_select; + + if (INTEL_GEN(dev_priv) >= 12) { + port_mask = TGL_TRANS_DDI_PORT_MASK; + ddi_select = TGL_TRANS_DDI_SELECT_PORT(port); + } else { + port_mask = TRANS_DDI_PORT_MASK; + ddi_select = TRANS_DDI_SELECT_PORT(port); + } tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); - if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port)) + if ((tmp & port_mask) != ddi_select) continue; if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == @@ -2085,6 +2105,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port; + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); /* * TODO: Add support for MST encoders. Atm, the following should never @@ -2102,7 +2123,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, * ports. */ if (intel_crtc_has_dp_encoder(crtc_state) || - intel_port_is_tc(dev_priv, encoder->port)) + intel_phy_is_tc(dev_priv, phy)) intel_display_power_get(dev_priv, intel_ddi_main_link_aux_domain(dig_port)); @@ -2122,9 +2143,14 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) enum port port = encoder->port; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - if (cpu_transcoder != TRANSCODER_EDP) - I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), - TRANS_CLK_SEL_PORT(port)); + if (cpu_transcoder != TRANSCODER_EDP) { + if (INTEL_GEN(dev_priv) >= 12) + I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), + TGL_TRANS_CLK_SEL_PORT(port)); + else + I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), + TRANS_CLK_SEL_PORT(port)); + } } void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state) @@ -2132,9 +2158,14 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - if (cpu_transcoder != TRANSCODER_EDP) - I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), - TRANS_CLK_SEL_DISABLED); + if (cpu_transcoder != TRANSCODER_EDP) { + if (INTEL_GEN(dev_priv) >= 12) + I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), + TGL_TRANS_CLK_SEL_DISABLED); + else + I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), + TRANS_CLK_SEL_DISABLED); + } } static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, @@ -2227,11 +2258,12 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, port); int n_entries; if (INTEL_GEN(dev_priv) >= 11) { - if (intel_port_is_combophy(dev_priv, port)) - icl_get_combo_buf_trans(dev_priv, port, encoder->type, + if (intel_phy_is_combo(dev_priv, phy)) + icl_get_combo_buf_trans(dev_priv, encoder->type, intel_dp->link_rate, &n_entries); else n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); @@ -2413,15 +2445,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, } static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, - u32 level, enum port port, int type, + u32 level, enum phy phy, int type, int rate) { const struct cnl_ddi_buf_trans *ddi_translations = NULL; u32 n_entries, val; int ln; - ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, - rate, &n_entries); + ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate, + &n_entries); if (!ddi_translations) return; @@ -2431,41 +2463,41 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, } /* Set PORT_TX_DW5 */ - val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + val = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | TAP2_DISABLE | TAP3_DISABLE); val |= SCALING_MODE_SEL(0x2); val |= RTERM_SELECT(0x6); val |= TAP3_DISABLE; - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); + I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val); /* Program PORT_TX_DW2 */ - val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); + val = I915_READ(ICL_PORT_TX_DW2_LN0(phy)); val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); /* Program Rcomp scalar for every table entry */ val |= RCOMP_SCALAR(0x98); - I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); + I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), val); /* Program PORT_TX_DW4 */ /* We cannot write to GRP. It would overwrite individual loadgen. */ for (ln = 0; ln <= 3; ln++) { - val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port)); + val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy)); val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); - I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val); + I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val); } /* Program PORT_TX_DW7 */ - val = I915_READ(ICL_PORT_TX_DW7_LN0(port)); + val = I915_READ(ICL_PORT_TX_DW7_LN0(phy)); val &= ~N_SCALAR_MASK; val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); - I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val); + I915_WRITE(ICL_PORT_TX_DW7_GRP(phy), val); } static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, @@ -2473,7 +2505,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, enum intel_output_type type) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); int width = 0; int rate = 0; u32 val; @@ -2494,12 +2526,12 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, * set PORT_PCS_DW1 cmnkeeper_enable to 1b, * else clear to 0b. */ - val = I915_READ(ICL_PORT_PCS_DW1_LN0(port)); + val = I915_READ(ICL_PORT_PCS_DW1_LN0(phy)); if (type == INTEL_OUTPUT_HDMI) val &= ~COMMON_KEEPER_EN; else val |= COMMON_KEEPER_EN; - I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), val); + I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), val); /* 2. Program loadgen select */ /* @@ -2509,33 +2541,33 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) */ for (ln = 0; ln <= 3; ln++) { - val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port)); + val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy)); val &= ~LOADGEN_SELECT; if ((rate <= 600000 && width == 4 && ln >= 1) || (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { val |= LOADGEN_SELECT; } - I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val); + I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val); } /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ - val = I915_READ(ICL_PORT_CL_DW5(port)); + val = I915_READ(ICL_PORT_CL_DW5(phy)); val |= SUS_CLOCK_CONFIG; - I915_WRITE(ICL_PORT_CL_DW5(port), val); + I915_WRITE(ICL_PORT_CL_DW5(phy), val); /* 4. Clear training enable to change swing values */ - val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + val = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); val &= ~TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); + I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val); /* 5. Program swing and de-emphasis */ - icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate); + icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate); /* 6. Set training enable to trigger update */ - val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + val = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); val |= TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); + I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val); } static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, @@ -2663,9 +2695,9 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, enum intel_output_type type) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - if (intel_port_is_combophy(dev_priv, port)) + if (intel_phy_is_combo(dev_priv, phy)) icl_combo_phy_ddi_vswing_sequence(encoder, level, type); else icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level); @@ -2728,12 +2760,13 @@ u32 ddi_signal_levels(struct intel_dp *intel_dp) static inline u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, - enum port port) + enum phy phy) { - if (intel_port_is_combophy(dev_priv, port)) { - return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port); - } else if (intel_port_is_tc(dev_priv, port)) { - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + if (intel_phy_is_combo(dev_priv, phy)) { + return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); + } else if (intel_phy_is_tc(dev_priv, phy)) { + enum tc_port tc_port = intel_port_to_tc(dev_priv, + (enum port)phy); return ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port); } @@ -2746,23 +2779,33 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_shared_dpll *pll = crtc_state->shared_dpll; - enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); u32 val; mutex_lock(&dev_priv->dpll_lock); - val = I915_READ(DPCLKA_CFGCR0_ICL); - WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0); + val = I915_READ(ICL_DPCLKA_CFGCR0); + WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0); - if (intel_port_is_combophy(dev_priv, port)) { - val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); - val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); - I915_WRITE(DPCLKA_CFGCR0_ICL, val); - POSTING_READ(DPCLKA_CFGCR0_ICL); + if (intel_phy_is_combo(dev_priv, phy)) { + /* + * Even though this register references DDIs, note that we + * want to pass the PHY rather than the port (DDI). For + * ICL, port=phy in all cases so it doesn't matter, but for + * EHL the bspec notes the following: + * + * "DDID clock tied to DDIA clock, so DPCLKA_CFGCR0 DDIA + * Clock Select chooses the PLL for both DDIA and DDID and + * drives port A in all cases." + */ + val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); + val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); + I915_WRITE(ICL_DPCLKA_CFGCR0, val); + POSTING_READ(ICL_DPCLKA_CFGCR0); } - val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port); - I915_WRITE(DPCLKA_CFGCR0_ICL, val); + val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy); + I915_WRITE(ICL_DPCLKA_CFGCR0, val); mutex_unlock(&dev_priv->dpll_lock); } @@ -2770,14 +2813,14 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder, static void icl_unmap_plls_to_ports(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); u32 val; mutex_lock(&dev_priv->dpll_lock); - val = I915_READ(DPCLKA_CFGCR0_ICL); - val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port); - I915_WRITE(DPCLKA_CFGCR0_ICL, val); + val = I915_READ(ICL_DPCLKA_CFGCR0); + val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy); + I915_WRITE(ICL_DPCLKA_CFGCR0, val); mutex_unlock(&dev_priv->dpll_lock); } @@ -2835,11 +2878,13 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) ddi_clk_needed = false; } - val = I915_READ(DPCLKA_CFGCR0_ICL); + val = I915_READ(ICL_DPCLKA_CFGCR0); for_each_port_masked(port, port_mask) { + enum phy phy = intel_port_to_phy(dev_priv, port); + bool ddi_clk_ungated = !(val & icl_dpclka_cfgcr0_clk_off(dev_priv, - port)); + phy)); if (ddi_clk_needed == ddi_clk_ungated) continue; @@ -2851,10 +2896,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) if (WARN_ON(ddi_clk_needed)) continue; - DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n", - port_name(port)); - val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port); - I915_WRITE(DPCLKA_CFGCR0_ICL, val); + DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n", + phy_name(port)); + val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy); + I915_WRITE(ICL_DPCLKA_CFGCR0, val); } } @@ -2863,6 +2908,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, port); u32 val; const struct intel_shared_dpll *pll = crtc_state->shared_dpll; @@ -2872,7 +2918,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, mutex_lock(&dev_priv->dpll_lock); if (INTEL_GEN(dev_priv) >= 11) { - if (!intel_port_is_combophy(dev_priv, port)) + if (!intel_phy_is_combo(dev_priv, phy)) I915_WRITE(DDI_CLK_SEL(port), icl_pll_to_ddi_clk_sel(encoder, crtc_state)); } else if (IS_CANNONLAKE(dev_priv)) { @@ -2912,9 +2958,10 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, port); if (INTEL_GEN(dev_priv) >= 11) { - if (!intel_port_is_combophy(dev_priv, port)) + if (!intel_phy_is_combo(dev_priv, phy)) I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); } else if (IS_CANNONLAKE(dev_priv)) { I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) | @@ -2995,25 +3042,22 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) { struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); enum port port = intel_dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - u32 ln0, ln1, lane_info; + u32 ln0, ln1, lane_mask; - if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT) + if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) return; ln0 = I915_READ(MG_DP_MODE(0, port)); ln1 = I915_READ(MG_DP_MODE(1, port)); - switch (intel_dig_port->tc_type) { - case TC_PORT_TYPEC: + switch (intel_dig_port->tc_mode) { + case TC_PORT_DP_ALT: ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); - lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & - DP_LANE_ASSIGNMENT_MASK(tc_port)) >> - DP_LANE_ASSIGNMENT_SHIFT(tc_port); + lane_mask = intel_tc_port_get_lane_mask(intel_dig_port); - switch (lane_info) { + switch (lane_mask) { case 0x1: case 0x4: break; @@ -3038,7 +3082,7 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) MG_DP_MODE_CFG_DP_X2_MODE; break; default: - MISSING_CASE(lane_info); + MISSING_CASE(lane_mask); } break; @@ -3048,7 +3092,7 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) break; default: - MISSING_CASE(intel_dig_port->tc_type); + MISSING_CASE(intel_dig_port->tc_mode); return; } @@ -3110,6 +3154,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, port); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); int level = intel_ddi_dp_level(intel_dp); @@ -3123,7 +3168,10 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_ddi_clk_select(encoder, crtc_state); - intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); + if (!intel_phy_is_tc(dev_priv, phy) || + dig_port->tc_mode != TC_PORT_TBT_ALT) + intel_display_power_get(dev_priv, + dig_port->ddi_io_power_domain); icl_program_mg_dp_mode(dig_port); icl_disable_phy_clock_gating(dig_port); @@ -3138,11 +3186,11 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, else intel_prepare_dp_ddi_buffers(encoder, crtc_state); - if (intel_port_is_combophy(dev_priv, port)) { + if (intel_phy_is_combo(dev_priv, phy)) { bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; - intel_combo_phy_power_up_lanes(dev_priv, port, false, + intel_combo_phy_power_up_lanes(dev_priv, phy, false, crtc_state->lane_count, lane_reversal); } @@ -3290,6 +3338,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, struct intel_dp *intel_dp = &dig_port->dp; bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); if (!is_mst) { intel_ddi_disable_pipe_clock(old_crtc_state); @@ -3305,8 +3354,10 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, intel_edp_panel_vdd_on(intel_dp); intel_edp_panel_off(intel_dp); - intel_display_power_put_unchecked(dev_priv, - dig_port->ddi_io_power_domain); + if (!intel_phy_is_tc(dev_priv, phy) || + dig_port->tc_mode != TC_PORT_TBT_ALT) + intel_display_power_put_unchecked(dev_priv, + dig_port->ddi_io_power_domain); intel_ddi_clk_disable(encoder); } @@ -3618,33 +3669,28 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder, intel_hdcp_enable(connector, (u8)conn_state->hdcp_content_type); } -static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config, - enum port port) +static void +intel_ddi_update_prepare(struct intel_atomic_state *state, + struct intel_encoder *encoder, + struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - u32 val = I915_READ(PORT_TX_DFLEXDPMLE1); - bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; - - val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port); - switch (pipe_config->lane_count) { - case 1: - val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) : - DFLEXDPMLE1_DPMLETC_ML0(tc_port); - break; - case 2: - val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) : - DFLEXDPMLE1_DPMLETC_ML1_0(tc_port); - break; - case 4: - val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port); - break; - default: - MISSING_CASE(pipe_config->lane_count); - } - I915_WRITE(PORT_TX_DFLEXDPMLE1, val); + struct intel_crtc_state *crtc_state = + crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL; + int required_lanes = crtc_state ? crtc_state->lane_count : 1; + + WARN_ON(crtc && crtc->active); + + intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes); + if (crtc_state && crtc_state->base.active) + intel_update_active_dpll(state, crtc, encoder); +} + +static void +intel_ddi_update_complete(struct intel_atomic_state *state, + struct intel_encoder *encoder, + struct intel_crtc *crtc) +{ + intel_tc_port_put_link(enc_to_dig_port(&encoder->base)); } static void @@ -3654,26 +3700,25 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); - enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + bool is_tc_port = intel_phy_is_tc(dev_priv, phy); - if (intel_crtc_has_dp_encoder(crtc_state) || - intel_port_is_tc(dev_priv, encoder->port)) + if (is_tc_port) + intel_tc_port_get_link(dig_port, crtc_state->lane_count); + + if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) intel_display_power_get(dev_priv, intel_ddi_main_link_aux_domain(dig_port)); - if (IS_GEN9_LP(dev_priv)) + if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT) + /* + * Program the lane count for static/dynamic connections on + * Type-C ports. Skip this step for TBT. + */ + intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count); + else if (IS_GEN9_LP(dev_priv)) bxt_ddi_phy_set_lane_optim_mask(encoder, crtc_state->lane_lat_optim_mask); - - /* - * Program the lane count for static/dynamic connections on Type-C ports. - * Skip this step for TBT. - */ - if (dig_port->tc_type == TC_PORT_UNKNOWN || - dig_port->tc_type == TC_PORT_TBT) - return; - - intel_ddi_set_fia_lane_count(encoder, crtc_state, port); } static void @@ -3683,11 +3728,15 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + bool is_tc_port = intel_phy_is_tc(dev_priv, phy); - if (intel_crtc_has_dp_encoder(crtc_state) || - intel_port_is_tc(dev_priv, encoder->port)) + if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) intel_display_power_put_unchecked(dev_priv, intel_ddi_main_link_aux_domain(dig_port)); + + if (is_tc_port) + intel_tc_port_put_link(dig_port); } static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) @@ -3764,7 +3813,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; - struct intel_digital_port *intel_dig_port; u32 temp, flags = 0; /* XXX: DSI transcoder paranoia */ @@ -3803,7 +3851,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, switch (temp & TRANS_DDI_MODE_SELECT_MASK) { case TRANS_DDI_MODE_SELECT_HDMI: pipe_config->has_hdmi_sink = true; - intel_dig_port = enc_to_dig_port(&encoder->base); pipe_config->infoframes.enable |= intel_hdmi_infoframes_enabled(encoder, pipe_config); @@ -3941,49 +3988,18 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, return 0; } -static void intel_ddi_encoder_suspend(struct intel_encoder *encoder) -{ - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - - intel_dp_encoder_suspend(encoder); - - /* - * TODO: disconnect also from USB DP alternate mode once we have a - * way to handle the modeset restore in that mode during resume - * even if the sink has disappeared while being suspended. - */ - if (dig_port->tc_legacy_port) - icl_tc_phy_disconnect(i915, dig_port); -} - -static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder) -{ - struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder); - struct drm_i915_private *i915 = to_i915(drm_encoder->dev); - - if (intel_port_is_tc(i915, dig_port->base.port)) - intel_digital_port_connected(&dig_port->base); - - intel_dp_encoder_reset(drm_encoder); -} - static void intel_ddi_encoder_destroy(struct drm_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - struct drm_i915_private *i915 = to_i915(encoder->dev); intel_dp_encoder_flush_work(encoder); - if (intel_port_is_tc(i915, dig_port->base.port)) - icl_tc_phy_disconnect(i915, dig_port); - drm_encoder_cleanup(encoder); kfree(dig_port); } static const struct drm_encoder_funcs intel_ddi_funcs = { - .reset = intel_ddi_encoder_reset, + .reset = intel_dp_encoder_reset, .destroy = intel_ddi_encoder_destroy, }; @@ -4108,14 +4124,17 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder, return modeset_pipe(&crtc->base, ctx); } -static bool intel_ddi_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector) +static enum intel_hotplug_state +intel_ddi_hotplug(struct intel_encoder *encoder, + struct intel_connector *connector, + bool irq_received) { + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); struct drm_modeset_acquire_ctx ctx; - bool changed; + enum intel_hotplug_state state; int ret; - changed = intel_encoder_hotplug(encoder, connector); + state = intel_encoder_hotplug(encoder, connector, irq_received); drm_modeset_acquire_init(&ctx, 0); @@ -4137,7 +4156,27 @@ static bool intel_ddi_hotplug(struct intel_encoder *encoder, drm_modeset_acquire_fini(&ctx); WARN(ret, "Acquiring modeset locks failed with %i\n", ret); - return changed; + /* + * Unpowered type-c dongles can take some time to boot and be + * responsible, so here giving some time to those dongles to power up + * and then retrying the probe. + * + * On many platforms the HDMI live state signal is known to be + * unreliable, so we can't use it to detect if a sink is connected or + * not. Instead we detect if it's connected based on whether we can + * read the EDID or not. That in turn has a problem during disconnect, + * since the HPD interrupt may be raised before the DDC lines get + * disconnected (due to how the required length of DDC vs. HPD + * connector pins are specified) and so we'll still be able to get a + * valid EDID. To solve this schedule another detection cycle if this + * time around we didn't detect any change in the sink's connection + * status. + */ + if (state == INTEL_HOTPLUG_UNCHANGED && irq_received && + !dig_port->dp.is_mst) + state = INTEL_HOTPLUG_RETRY; + + return state; } static struct intel_connector * @@ -4225,6 +4264,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) struct drm_encoder *encoder; bool init_hdmi, init_dp, init_lspcon = false; enum pipe pipe; + enum phy phy = intel_port_to_phy(dev_priv, port); init_hdmi = port_info->supports_dvi || port_info->supports_hdmi; init_dp = port_info->supports_dp; @@ -4269,7 +4309,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_encoder->update_pipe = intel_ddi_update_pipe; intel_encoder->get_hw_state = intel_ddi_get_hw_state; intel_encoder->get_config = intel_ddi_get_config; - intel_encoder->suspend = intel_ddi_encoder_suspend; + intel_encoder->suspend = intel_dp_encoder_suspend; intel_encoder->get_power_domains = intel_ddi_get_power_domains; intel_encoder->type = INTEL_OUTPUT_DDI; intel_encoder->power_domain = intel_port_to_power_domain(port); @@ -4288,9 +4328,15 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); - intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) && - !port_info->supports_typec_usb && - !port_info->supports_tbt; + if (intel_phy_is_tc(dev_priv, phy)) { + bool is_legacy = !port_info->supports_typec_usb && + !port_info->supports_tbt; + + intel_tc_port_init(intel_dig_port, is_legacy); + + intel_encoder->update_prepare = intel_ddi_update_prepare; + intel_encoder->update_complete = intel_ddi_update_complete; + } switch (port) { case PORT_A: @@ -4317,6 +4363,18 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_F_IO; break; + case PORT_G: + intel_dig_port->ddi_io_power_domain = + POWER_DOMAIN_PORT_DDI_G_IO; + break; + case PORT_H: + intel_dig_port->ddi_io_power_domain = + POWER_DOMAIN_PORT_DDI_H_IO; + break; + case PORT_I: + intel_dig_port->ddi_io_power_domain = + POWER_DOMAIN_PORT_DDI_I_IO; + break; default: MISSING_CASE(port); } @@ -4351,9 +4409,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_infoframe_init(intel_dig_port); - if (intel_port_is_tc(dev_priv, port)) - intel_digital_port_connected(intel_encoder); - return; err: diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 8592a7d422de..9e4ee29fd0fc 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -78,6 +78,7 @@ #include "intel_quirks.h" #include "intel_sideband.h" #include "intel_sprite.h" +#include "intel_tc.h" /* Primary plane formats for gen <= 3 */ static const u32 i8xx_primary_formats[] = { @@ -515,9 +516,9 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, } static bool -needs_modeset(const struct drm_crtc_state *state) +needs_modeset(const struct intel_crtc_state *state) { - return drm_atomic_crtc_needs_modeset(state); + return drm_atomic_crtc_needs_modeset(&state->base); } /* @@ -1839,7 +1840,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) /* FIXME: assert CPU port conditions for SNB+ */ } - trace_intel_pipe_enable(dev_priv, pipe); + trace_intel_pipe_enable(crtc); reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); @@ -1880,7 +1881,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) */ assert_planes_disabled(crtc); - trace_intel_pipe_disable(dev_priv, pipe); + trace_intel_pipe_disable(crtc); reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); @@ -3715,10 +3716,27 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state) return 0; } +static bool i9xx_plane_has_windowing(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + + if (IS_CHERRYVIEW(dev_priv)) + return i9xx_plane == PLANE_B; + else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) + return false; + else if (IS_GEN(dev_priv, 4)) + return i9xx_plane == PLANE_C; + else + return i9xx_plane == PLANE_B || + i9xx_plane == PLANE_C; +} + static int i9xx_plane_check(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); int ret; ret = chv_plane_check_rotation(plane_state); @@ -3729,7 +3747,8 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state, &crtc_state->base, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, - false, true); + i9xx_plane_has_windowing(plane), + true); if (ret) return ret; @@ -3758,6 +3777,10 @@ static void i9xx_update_plane(struct intel_plane *plane, u32 linear_offset; int x = plane_state->color_plane[0].x; int y = plane_state->color_plane[0].y; + int crtc_x = plane_state->base.dst.x1; + int crtc_y = plane_state->base.dst.y1; + int crtc_w = drm_rect_width(&plane_state->base.dst); + int crtc_h = drm_rect_height(&plane_state->base.dst); unsigned long irqflags; u32 dspaddr_offset; u32 dspcntr; @@ -3776,18 +3799,18 @@ static void i9xx_update_plane(struct intel_plane *plane, I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride); if (INTEL_GEN(dev_priv) < 4) { - /* pipesrc and dspsize control the size that is scaled from, - * which should always be the user's requested size. + /* + * PLANE_A doesn't actually have a full window + * generator but let's assume we still need to + * program whatever is there. */ - I915_WRITE_FW(DSPPOS(i9xx_plane), 0); + I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x); I915_WRITE_FW(DSPSIZE(i9xx_plane), - ((crtc_state->pipe_src_h - 1) << 16) | - (crtc_state->pipe_src_w - 1)); + ((crtc_h - 1) << 16) | (crtc_w - 1)); } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { - I915_WRITE_FW(PRIMPOS(i9xx_plane), 0); + I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x); I915_WRITE_FW(PRIMSIZE(i9xx_plane), - ((crtc_state->pipe_src_h - 1) << 16) | - (crtc_state->pipe_src_w - 1)); + ((crtc_h - 1) << 16) | (crtc_w - 1)); I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0); } @@ -3950,10 +3973,10 @@ static u32 skl_plane_ctl_format(u32 pixel_format) case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: return PLANE_CTL_FORMAT_XRGB_8888; + case DRM_FORMAT_XBGR2101010: + return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; case DRM_FORMAT_XRGB2101010: return PLANE_CTL_FORMAT_XRGB_2101010; - case DRM_FORMAT_XBGR2101010: - return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; @@ -4248,12 +4271,13 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) return; /* We have a modeset vs reset deadlock, defensively unbreak it. */ - set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags); - wake_up_all(&dev_priv->gpu_error.wait_queue); + set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags); + smp_mb__after_atomic(); + wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET); if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n"); - i915_gem_set_wedged(dev_priv); + intel_gt_set_wedged(&dev_priv->gt); } /* @@ -4299,7 +4323,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) int ret; /* reset doesn't touch the display */ - if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags)) + if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) return; state = fetch_and_zero(&dev_priv->modeset_restore_state); @@ -4339,7 +4363,7 @@ unlock: drm_modeset_acquire_fini(ctx); mutex_unlock(&dev->mode_config.mutex); - clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags); + clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); } static void icl_set_pipe_chicken(struct intel_crtc *crtc) @@ -5796,7 +5820,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s if (!old_crtc_state->ips_enabled) return false; - if (needs_modeset(&new_crtc_state->base)) + if (needs_modeset(new_crtc_state)) return true; /* @@ -5823,7 +5847,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s if (!new_crtc_state->ips_enabled) return false; - if (needs_modeset(&new_crtc_state->base)) + if (needs_modeset(new_crtc_state)) return true; /* @@ -5877,13 +5901,13 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_atomic_state *old_state = old_crtc_state->base.state; + struct drm_atomic_state *state = old_crtc_state->base.state; struct intel_crtc_state *pipe_config = - intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state), + intel_atomic_get_new_crtc_state(to_intel_atomic_state(state), crtc); struct drm_plane *primary = crtc->base.primary; struct drm_plane_state *old_primary_state = - drm_atomic_get_old_plane_state(old_state, primary); + drm_atomic_get_old_plane_state(state, primary); intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); @@ -5895,12 +5919,12 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) if (old_primary_state) { struct drm_plane_state *new_primary_state = - drm_atomic_get_new_plane_state(old_state, primary); + drm_atomic_get_new_plane_state(state, primary); intel_fbc_post_update(crtc); if (new_primary_state->visible && - (needs_modeset(&pipe_config->base) || + (needs_modeset(pipe_config) || !old_primary_state->visible)) intel_post_enable_primary(&crtc->base, pipe_config); } @@ -5920,20 +5944,20 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_atomic_state *old_state = old_crtc_state->base.state; + struct drm_atomic_state *state = old_crtc_state->base.state; struct drm_plane *primary = crtc->base.primary; struct drm_plane_state *old_primary_state = - drm_atomic_get_old_plane_state(old_state, primary); - bool modeset = needs_modeset(&pipe_config->base); - struct intel_atomic_state *old_intel_state = - to_intel_atomic_state(old_state); + drm_atomic_get_old_plane_state(state, primary); + bool modeset = needs_modeset(pipe_config); + struct intel_atomic_state *intel_state = + to_intel_atomic_state(state); if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config)) hsw_disable_ips(old_crtc_state); if (old_primary_state) { struct intel_plane_state *new_primary_state = - intel_atomic_get_new_plane_state(old_intel_state, + intel_atomic_get_new_plane_state(intel_state, to_intel_plane(primary)); intel_fbc_pre_update(crtc, pipe_config, new_primary_state); @@ -5984,7 +6008,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, * If we're doing a modeset, we're done. No need to do any pre-vblank * watermark programming here. */ - if (needs_modeset(&pipe_config->base)) + if (needs_modeset(pipe_config)) return; /* @@ -6002,7 +6026,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, * us to. */ if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(old_intel_state, + dev_priv->display.initial_watermarks(intel_state, pipe_config); else if (pipe_config->update_wm_pre) intel_update_watermarks(crtc); @@ -6036,19 +6060,111 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state, intel_frontbuffer_flip(dev_priv, fb_bits); } -static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, +/* + * intel_connector_primary_encoder - get the primary encoder for a connector + * @connector: connector for which to return the encoder + * + * Returns the primary encoder for a connector. There is a 1:1 mapping from + * all connectors to their encoder, except for DP-MST connectors which have + * both a virtual and a primary encoder. These DP-MST primary encoders can be + * pointed to by as many DP-MST connectors as there are pipes. + */ +static struct intel_encoder * +intel_connector_primary_encoder(struct intel_connector *connector) +{ + struct intel_encoder *encoder; + + if (connector->mst_port) + return &dp_to_dig_port(connector->mst_port)->base; + + encoder = intel_attached_encoder(&connector->base); + WARN_ON(!encoder); + + return encoder; +} + +static bool +intel_connector_needs_modeset(struct intel_atomic_state *state, + const struct drm_connector_state *old_conn_state, + const struct drm_connector_state *new_conn_state) +{ + struct intel_crtc *old_crtc = old_conn_state->crtc ? + to_intel_crtc(old_conn_state->crtc) : NULL; + struct intel_crtc *new_crtc = new_conn_state->crtc ? + to_intel_crtc(new_conn_state->crtc) : NULL; + + return new_crtc != old_crtc || + (new_crtc && + needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc))); +} + +static void intel_encoders_update_prepare(struct intel_atomic_state *state) +{ + struct drm_connector_state *old_conn_state; + struct drm_connector_state *new_conn_state; + struct drm_connector *conn; + int i; + + for_each_oldnew_connector_in_state(&state->base, conn, + old_conn_state, new_conn_state, i) { + struct intel_encoder *encoder; + struct intel_crtc *crtc; + + if (!intel_connector_needs_modeset(state, + old_conn_state, + new_conn_state)) + continue; + + encoder = intel_connector_primary_encoder(to_intel_connector(conn)); + if (!encoder->update_prepare) + continue; + + crtc = new_conn_state->crtc ? + to_intel_crtc(new_conn_state->crtc) : NULL; + encoder->update_prepare(state, encoder, crtc); + } +} + +static void intel_encoders_update_complete(struct intel_atomic_state *state) +{ + struct drm_connector_state *old_conn_state; + struct drm_connector_state *new_conn_state; + struct drm_connector *conn; + int i; + + for_each_oldnew_connector_in_state(&state->base, conn, + old_conn_state, new_conn_state, i) { + struct intel_encoder *encoder; + struct intel_crtc *crtc; + + if (!intel_connector_needs_modeset(state, + old_conn_state, + new_conn_state)) + continue; + + encoder = intel_connector_primary_encoder(to_intel_connector(conn)); + if (!encoder->update_complete) + continue; + + crtc = new_conn_state->crtc ? + to_intel_crtc(new_conn_state->crtc) : NULL; + encoder->update_complete(state, encoder, crtc); + } +} + +static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_connector_state *conn_state; struct drm_connector *conn; int i; - for_each_new_connector_in_state(old_state, conn, conn_state, i) { + for_each_new_connector_in_state(&state->base, conn, conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); - if (conn_state->crtc != crtc) + if (conn_state->crtc != &crtc->base) continue; if (encoder->pre_pll_enable) @@ -6056,19 +6172,19 @@ static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, } } -static void intel_encoders_pre_enable(struct drm_crtc *crtc, +static void intel_encoders_pre_enable(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_connector_state *conn_state; struct drm_connector *conn; int i; - for_each_new_connector_in_state(old_state, conn, conn_state, i) { + for_each_new_connector_in_state(&state->base, conn, conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); - if (conn_state->crtc != crtc) + if (conn_state->crtc != &crtc->base) continue; if (encoder->pre_enable) @@ -6076,19 +6192,19 @@ static void intel_encoders_pre_enable(struct drm_crtc *crtc, } } -static void intel_encoders_enable(struct drm_crtc *crtc, +static void intel_encoders_enable(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_connector_state *conn_state; struct drm_connector *conn; int i; - for_each_new_connector_in_state(old_state, conn, conn_state, i) { + for_each_new_connector_in_state(&state->base, conn, conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); - if (conn_state->crtc != crtc) + if (conn_state->crtc != &crtc->base) continue; if (encoder->enable) @@ -6097,19 +6213,19 @@ static void intel_encoders_enable(struct drm_crtc *crtc, } } -static void intel_encoders_disable(struct drm_crtc *crtc, +static void intel_encoders_disable(struct intel_crtc *crtc, struct intel_crtc_state *old_crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_connector_state *old_conn_state; struct drm_connector *conn; int i; - for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { + for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(old_conn_state->best_encoder); - if (old_conn_state->crtc != crtc) + if (old_conn_state->crtc != &crtc->base) continue; intel_opregion_notify_encoder(encoder, false); @@ -6118,19 +6234,19 @@ static void intel_encoders_disable(struct drm_crtc *crtc, } } -static void intel_encoders_post_disable(struct drm_crtc *crtc, +static void intel_encoders_post_disable(struct intel_crtc *crtc, struct intel_crtc_state *old_crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_connector_state *old_conn_state; struct drm_connector *conn; int i; - for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { + for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(old_conn_state->best_encoder); - if (old_conn_state->crtc != crtc) + if (old_conn_state->crtc != &crtc->base) continue; if (encoder->post_disable) @@ -6138,19 +6254,19 @@ static void intel_encoders_post_disable(struct drm_crtc *crtc, } } -static void intel_encoders_post_pll_disable(struct drm_crtc *crtc, +static void intel_encoders_post_pll_disable(struct intel_crtc *crtc, struct intel_crtc_state *old_crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_connector_state *old_conn_state; struct drm_connector *conn; int i; - for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { + for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(old_conn_state->best_encoder); - if (old_conn_state->crtc != crtc) + if (old_conn_state->crtc != &crtc->base) continue; if (encoder->post_pll_disable) @@ -6158,19 +6274,19 @@ static void intel_encoders_post_pll_disable(struct drm_crtc *crtc, } } -static void intel_encoders_update_pipe(struct drm_crtc *crtc, +static void intel_encoders_update_pipe(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_connector_state *conn_state; struct drm_connector *conn; int i; - for_each_new_connector_in_state(old_state, conn, conn_state, i) { + for_each_new_connector_in_state(&state->base, conn, conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); - if (conn_state->crtc != crtc) + if (conn_state->crtc != &crtc->base) continue; if (encoder->update_pipe) @@ -6187,15 +6303,13 @@ static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_stat } static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - struct intel_atomic_state *old_intel_state = - to_intel_atomic_state(old_state); if (WARN_ON(intel_crtc->active)) return; @@ -6231,7 +6345,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, intel_crtc->active = true; - intel_encoders_pre_enable(crtc, pipe_config, old_state); + intel_encoders_pre_enable(intel_crtc, pipe_config, state); if (pipe_config->has_pch_encoder) { /* Note: FDI PLL enabling _must_ be done before we enable the @@ -6255,16 +6369,16 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, intel_disable_primary_plane(pipe_config); if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(old_intel_state, pipe_config); + dev_priv->display.initial_watermarks(state, pipe_config); intel_enable_pipe(pipe_config); if (pipe_config->has_pch_encoder) - ironlake_pch_enable(old_intel_state, pipe_config); + ironlake_pch_enable(state, pipe_config); assert_vblank_disabled(crtc); intel_crtc_vblank_on(pipe_config); - intel_encoders_enable(crtc, pipe_config, old_state); + intel_encoders_enable(intel_crtc, pipe_config, state); if (HAS_PCH_CPT(dev_priv)) cpt_verify_modeset(dev, intel_crtc->pipe); @@ -6310,33 +6424,37 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc) u32 val; val = MBUS_DBOX_A_CREDIT(2); - val |= MBUS_DBOX_BW_CREDIT(1); - val |= MBUS_DBOX_B_CREDIT(8); + + if (INTEL_GEN(dev_priv) >= 12) { + val |= MBUS_DBOX_BW_CREDIT(2); + val |= MBUS_DBOX_B_CREDIT(12); + } else { + val |= MBUS_DBOX_BW_CREDIT(1); + val |= MBUS_DBOX_B_CREDIT(8); + } I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); } static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe, hsw_workaround_pipe; enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; - struct intel_atomic_state *old_intel_state = - to_intel_atomic_state(old_state); bool psl_clkgate_wa; if (WARN_ON(intel_crtc->active)) return; - intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); + intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state); if (pipe_config->shared_dpll) intel_enable_shared_dpll(pipe_config); - intel_encoders_pre_enable(crtc, pipe_config, old_state); + intel_encoders_pre_enable(intel_crtc, pipe_config, state); if (intel_crtc_has_dp_encoder(pipe_config)) intel_dp_set_m_n(pipe_config, M1_N1); @@ -6394,7 +6512,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, intel_ddi_enable_transcoder_func(pipe_config); if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(old_intel_state, pipe_config); + dev_priv->display.initial_watermarks(state, pipe_config); if (INTEL_GEN(dev_priv) >= 11) icl_pipe_mbus_enable(intel_crtc); @@ -6404,7 +6522,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, intel_enable_pipe(pipe_config); if (pipe_config->has_pch_encoder) - lpt_pch_enable(old_intel_state, pipe_config); + lpt_pch_enable(state, pipe_config); if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) intel_ddi_set_vc_payload_alloc(pipe_config, true); @@ -6412,7 +6530,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, assert_vblank_disabled(crtc); intel_crtc_vblank_on(pipe_config); - intel_encoders_enable(crtc, pipe_config, old_state); + intel_encoders_enable(intel_crtc, pipe_config, state); if (psl_clkgate_wa) { intel_wait_for_vblank(dev_priv, pipe); @@ -6444,7 +6562,7 @@ static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) } static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_crtc *crtc = old_crtc_state->base.crtc; struct drm_device *dev = crtc->dev; @@ -6460,7 +6578,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); - intel_encoders_disable(crtc, old_crtc_state, old_state); + intel_encoders_disable(intel_crtc, old_crtc_state, state); drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); @@ -6472,7 +6590,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, if (old_crtc_state->has_pch_encoder) ironlake_fdi_disable(crtc); - intel_encoders_post_disable(crtc, old_crtc_state, old_state); + intel_encoders_post_disable(intel_crtc, old_crtc_state, state); if (old_crtc_state->has_pch_encoder) { ironlake_disable_pch_transcoder(dev_priv, pipe); @@ -6503,14 +6621,14 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, } static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_crtc *crtc = old_crtc_state->base.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; - intel_encoders_disable(crtc, old_crtc_state, old_state); + intel_encoders_disable(intel_crtc, old_crtc_state, state); drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); @@ -6532,9 +6650,9 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, else ironlake_pfit_disable(old_crtc_state); - intel_encoders_post_disable(crtc, old_crtc_state, old_state); + intel_encoders_post_disable(intel_crtc, old_crtc_state, state); - intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); + intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state); } static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) @@ -6560,33 +6678,47 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) I915_WRITE(BCLRPAT(crtc->pipe), 0); } -bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port) +bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) { - if (port == PORT_NONE) + if (phy == PHY_NONE) return false; - if (IS_ELKHARTLAKE(dev_priv)) - return port <= PORT_C; + if (IS_ELKHARTLAKE(dev_priv) || INTEL_GEN(dev_priv) >= 12) + return phy <= PHY_C; if (INTEL_GEN(dev_priv) >= 11) - return port <= PORT_B; + return phy <= PHY_B; return false; } -bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port) +bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) { + if (INTEL_GEN(dev_priv) >= 12) + return phy >= PHY_D && phy <= PHY_I; + if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) - return port >= PORT_C && port <= PORT_F; + return phy >= PHY_C && phy <= PHY_F; return false; } +enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) +{ + if (IS_ELKHARTLAKE(i915) && port == PORT_D) + return PHY_A; + + return (enum phy)port; +} + enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) { - if (!intel_port_is_tc(dev_priv, port)) + if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) return PORT_TC_NONE; + if (INTEL_GEN(dev_priv) >= 12) + return port - PORT_D; + return port - PORT_C; } @@ -6614,6 +6746,26 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port) enum intel_display_power_domain intel_aux_power_domain(struct intel_digital_port *dig_port) { + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); + + if (intel_phy_is_tc(dev_priv, phy) && + dig_port->tc_mode == TC_PORT_TBT_ALT) { + switch (dig_port->aux_ch) { + case AUX_CH_C: + return POWER_DOMAIN_AUX_TBT1; + case AUX_CH_D: + return POWER_DOMAIN_AUX_TBT2; + case AUX_CH_E: + return POWER_DOMAIN_AUX_TBT3; + case AUX_CH_F: + return POWER_DOMAIN_AUX_TBT4; + default: + MISSING_CASE(dig_port->aux_ch); + return POWER_DOMAIN_AUX_TBT1; + } + } + switch (dig_port->aux_ch) { case AUX_CH_A: return POWER_DOMAIN_AUX_A; @@ -6633,14 +6785,12 @@ intel_aux_power_domain(struct intel_digital_port *dig_port) } } -static u64 get_crtc_power_domains(struct drm_crtc *crtc, - struct intel_crtc_state *crtc_state) +static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_encoder *encoder; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; + enum pipe pipe = crtc->pipe; u64 mask; enum transcoder transcoder = crtc_state->cpu_transcoder; @@ -6653,7 +6803,8 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc, crtc_state->pch_pfit.force_thru) mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); - drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) { + drm_for_each_encoder_mask(encoder, &dev_priv->drm, + crtc_state->base.encoder_mask) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); mask |= BIT_ULL(intel_encoder->power_domain); @@ -6669,17 +6820,16 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc, } static u64 -modeset_get_crtc_power_domains(struct drm_crtc *crtc, - struct intel_crtc_state *crtc_state) +modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain domain; u64 domains, new_domains, old_domains; - old_domains = intel_crtc->enabled_power_domains; - intel_crtc->enabled_power_domains = new_domains = - get_crtc_power_domains(crtc, crtc_state); + old_domains = crtc->enabled_power_domains; + crtc->enabled_power_domains = new_domains = + get_crtc_power_domains(crtc_state); domains = new_domains & ~old_domains; @@ -6699,10 +6849,8 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv, } static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { - struct intel_atomic_state *old_intel_state = - to_intel_atomic_state(old_state); struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -6729,7 +6877,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); + intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state); if (IS_CHERRYVIEW(dev_priv)) { chv_prepare_pll(intel_crtc, pipe_config); @@ -6739,7 +6887,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, vlv_enable_pll(intel_crtc, pipe_config); } - intel_encoders_pre_enable(crtc, pipe_config, old_state); + intel_encoders_pre_enable(intel_crtc, pipe_config, state); i9xx_pfit_enable(pipe_config); @@ -6748,14 +6896,13 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, /* update DSPCNTR to configure gamma for pipe bottom color */ intel_disable_primary_plane(pipe_config); - dev_priv->display.initial_watermarks(old_intel_state, - pipe_config); + dev_priv->display.initial_watermarks(state, pipe_config); intel_enable_pipe(pipe_config); assert_vblank_disabled(crtc); intel_crtc_vblank_on(pipe_config); - intel_encoders_enable(crtc, pipe_config, old_state); + intel_encoders_enable(intel_crtc, pipe_config, state); } static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) @@ -6768,10 +6915,8 @@ static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) } static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { - struct intel_atomic_state *old_intel_state = - to_intel_atomic_state(old_state); struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -6796,7 +6941,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, if (!IS_GEN(dev_priv, 2)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - intel_encoders_pre_enable(crtc, pipe_config, old_state); + intel_encoders_pre_enable(intel_crtc, pipe_config, state); i9xx_enable_pll(intel_crtc, pipe_config); @@ -6808,7 +6953,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, intel_disable_primary_plane(pipe_config); if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(old_intel_state, + dev_priv->display.initial_watermarks(state, pipe_config); else intel_update_watermarks(intel_crtc); @@ -6817,7 +6962,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, assert_vblank_disabled(crtc); intel_crtc_vblank_on(pipe_config); - intel_encoders_enable(crtc, pipe_config, old_state); + intel_encoders_enable(intel_crtc, pipe_config, state); } static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) @@ -6836,7 +6981,7 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) } static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_crtc *crtc = old_crtc_state->base.crtc; struct drm_device *dev = crtc->dev; @@ -6851,7 +6996,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, if (IS_GEN(dev_priv, 2)) intel_wait_for_vblank(dev_priv, pipe); - intel_encoders_disable(crtc, old_crtc_state, old_state); + intel_encoders_disable(intel_crtc, old_crtc_state, state); drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); @@ -6860,7 +7005,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, i9xx_pfit_disable(old_crtc_state); - intel_encoders_post_disable(crtc, old_crtc_state, old_state); + intel_encoders_post_disable(intel_crtc, old_crtc_state, state); if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { if (IS_CHERRYVIEW(dev_priv)) @@ -6871,7 +7016,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, i9xx_disable_pll(old_crtc_state); } - intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); + intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state); if (!IS_GEN(dev_priv, 2)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); @@ -6925,7 +7070,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, WARN_ON(IS_ERR(crtc_state) || ret); - dev_priv->display.crtc_disable(crtc_state, state); + dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state)); drm_atomic_state_put(state); @@ -6988,7 +7133,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder) /* Cross check the actual hw state with our own modeset state tracking (and it's * internal consistency). */ -static void intel_connector_verify_state(struct drm_crtc_state *crtc_state, +static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); @@ -7006,7 +7151,7 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state, if (!crtc_state) return; - I915_STATE_WARN(!crtc_state->active, + I915_STATE_WARN(!crtc_state->base.active, "connector is active, but attached crtc isn't\n"); if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) @@ -7018,7 +7163,7 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state, I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, "attached encoder crtc differs from connector crtc\n"); } else { - I915_STATE_WARN(crtc_state && crtc_state->active, + I915_STATE_WARN(crtc_state && crtc_state->base.active, "attached crtc is active, but connector isn't\n"); I915_STATE_WARN(!crtc_state && conn_state->best_encoder, "best encoder set without crtc!\n"); @@ -9484,6 +9629,8 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_atomic_state *state = + to_intel_atomic_state(crtc_state->base.state); const struct intel_limit *limit; int refclk = 120000; @@ -9525,7 +9672,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, ironlake_compute_dpll(crtc, crtc_state, NULL); - if (!intel_get_shared_dpll(crtc_state, NULL)) { + if (!intel_reserve_shared_dplls(state, crtc, NULL)) { DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", pipe_name(crtc->pipe)); return -EINVAL; @@ -9906,7 +10053,7 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc, struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); - if (!intel_get_shared_dpll(crtc_state, encoder)) { + if (!intel_reserve_shared_dplls(state, crtc, encoder)) { DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", pipe_name(crtc->pipe)); return -EINVAL; @@ -9936,22 +10083,37 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, struct intel_crtc_state *pipe_config) { + enum phy phy = intel_port_to_phy(dev_priv, port); + enum icl_port_dpll_id port_dpll_id; enum intel_dpll_id id; u32 temp; - /* TODO: TBT pll not implemented. */ - if (intel_port_is_combophy(dev_priv, port)) { - temp = I915_READ(DPCLKA_CFGCR0_ICL) & - DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); - id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); - } else if (intel_port_is_tc(dev_priv, port)) { - id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port)); + if (intel_phy_is_combo(dev_priv, phy)) { + temp = I915_READ(ICL_DPCLKA_CFGCR0) & + ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); + id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); + port_dpll_id = ICL_PORT_DPLL_DEFAULT; + } else if (intel_phy_is_tc(dev_priv, phy)) { + u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; + + if (clk_sel == DDI_CLK_SEL_MG) { + id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, + port)); + port_dpll_id = ICL_PORT_DPLL_MG_PHY; + } else { + WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162); + id = DPLL_ID_ICL_TBTPLL; + port_dpll_id = ICL_PORT_DPLL_DEFAULT; + } } else { WARN(1, "Invalid port %x\n", port); return; } - pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); + pipe_config->icl_port_dplls[port_dpll_id].pll = + intel_get_shared_dpll_by_id(dev_priv, id); + + icl_set_active_port_dpll(pipe_config, port_dpll_id); } static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, @@ -10191,7 +10353,11 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); - port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; + if (INTEL_GEN(dev_priv) >= 12) + port = (tmp & TGL_TRANS_DDI_PORT_MASK) >> + TGL_TRANS_DDI_PORT_SHIFT; + else + port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; if (INTEL_GEN(dev_priv) >= 11) icelake_get_ddi_pll(dev_priv, port, pipe_config); @@ -11297,7 +11463,7 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) * * Returns true or false. */ -static bool intel_wm_need_update(struct intel_plane_state *cur, +static bool intel_wm_need_update(const struct intel_plane_state *cur, struct intel_plane_state *new) { /* Update watermarks on tiling or size changes. */ @@ -11329,33 +11495,28 @@ static bool needs_scaling(const struct intel_plane_state *state) } int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, - struct drm_crtc_state *crtc_state, + struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, - struct drm_plane_state *plane_state) + struct intel_plane_state *plane_state) { - struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); - struct drm_crtc *crtc = crtc_state->crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_plane *plane = to_intel_plane(plane_state->plane); - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); bool mode_changed = needs_modeset(crtc_state); bool was_crtc_enabled = old_crtc_state->base.active; - bool is_crtc_enabled = crtc_state->active; + bool is_crtc_enabled = crtc_state->base.active; bool turn_off, turn_on, visible, was_visible; - struct drm_framebuffer *fb = plane_state->fb; + struct drm_framebuffer *fb = plane_state->base.fb; int ret; if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { - ret = skl_update_scaler_plane( - to_intel_crtc_state(crtc_state), - to_intel_plane_state(plane_state)); + ret = skl_update_scaler_plane(crtc_state, plane_state); if (ret) return ret; } was_visible = old_plane_state->base.visible; - visible = plane_state->visible; + visible = plane_state->base.visible; if (!was_crtc_enabled && WARN_ON(was_visible)) was_visible = false; @@ -11371,22 +11532,22 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat * only combine the results from all planes in the current place? */ if (!is_crtc_enabled) { - plane_state->visible = visible = false; - to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id); - to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0; + plane_state->base.visible = visible = false; + crtc_state->active_planes &= ~BIT(plane->id); + crtc_state->data_rate[plane->id] = 0; } if (!was_visible && !visible) return 0; if (fb != old_plane_state->base.fb) - pipe_config->fb_changed = true; + crtc_state->fb_changed = true; turn_off = was_visible && (!visible || mode_changed); turn_on = visible && (!was_visible || mode_changed); DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", - intel_crtc->base.base.id, intel_crtc->base.name, + crtc->base.base.id, crtc->base.name, plane->base.base.id, plane->base.name, fb ? fb->base.id : -1); @@ -11397,29 +11558,28 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat if (turn_on) { if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) - pipe_config->update_wm_pre = true; + crtc_state->update_wm_pre = true; /* must disable cxsr around plane enable/disable */ if (plane->id != PLANE_CURSOR) - pipe_config->disable_cxsr = true; + crtc_state->disable_cxsr = true; } else if (turn_off) { if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) - pipe_config->update_wm_post = true; + crtc_state->update_wm_post = true; /* must disable cxsr around plane enable/disable */ if (plane->id != PLANE_CURSOR) - pipe_config->disable_cxsr = true; - } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state), - to_intel_plane_state(plane_state))) { + crtc_state->disable_cxsr = true; + } else if (intel_wm_need_update(old_plane_state, plane_state)) { if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { /* FIXME bollocks */ - pipe_config->update_wm_pre = true; - pipe_config->update_wm_post = true; + crtc_state->update_wm_pre = true; + crtc_state->update_wm_post = true; } } if (visible || was_visible) - pipe_config->fb_bits |= plane->frontbuffer_bit; + crtc_state->fb_bits |= plane->frontbuffer_bit; /* * ILK/SNB DVSACNTR/Sprite Enable @@ -11458,8 +11618,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat (IS_GEN_RANGE(dev_priv, 5, 6) || IS_IVYBRIDGE(dev_priv)) && (turn_on || (!needs_scaling(old_plane_state) && - needs_scaling(to_intel_plane_state(plane_state))))) - pipe_config->disable_lp_wm = true; + needs_scaling(plane_state)))) + crtc_state->disable_lp_wm = true; return 0; } @@ -11608,7 +11768,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); int ret; - bool mode_changed = needs_modeset(crtc_state); + bool mode_changed = needs_modeset(pipe_config); if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && mode_changed && !crtc_state->active) @@ -12042,7 +12202,7 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state) case INTEL_OUTPUT_DDI: if (WARN_ON(!HAS_DDI(to_i915(dev)))) break; - /* else: fall through */ + /* else, fall through */ case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: @@ -12090,6 +12250,8 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state) saved_state->scaler_state = crtc_state->scaler_state; saved_state->shared_dpll = crtc_state->shared_dpll; saved_state->dpll_hw_state = crtc_state->dpll_hw_state; + memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, + sizeof(saved_state->icl_port_dplls)); saved_state->crc_enabled = crtc_state->crc_enabled; if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) @@ -12706,10 +12868,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, } } -static void verify_wm_state(struct drm_crtc *crtc, - struct drm_crtc_state *new_state) +static void verify_wm_state(struct intel_crtc *crtc, + struct intel_crtc_state *new_crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct skl_hw_state { struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; @@ -12719,21 +12881,20 @@ static void verify_wm_state(struct drm_crtc *crtc, struct skl_ddb_allocation *sw_ddb; struct skl_pipe_wm *sw_wm; struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - const enum pipe pipe = intel_crtc->pipe; + const enum pipe pipe = crtc->pipe; int plane, level, max_level = ilk_wm_max_level(dev_priv); - if (INTEL_GEN(dev_priv) < 9 || !new_state->active) + if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active) return; hw = kzalloc(sizeof(*hw), GFP_KERNEL); if (!hw) return; - skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm); - sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal; + skl_pipe_wm_get_hw_state(crtc, &hw->wm); + sw_wm = &new_crtc_state->wm.skl.optimal; - skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv); + skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); skl_ddb_get_hw_state(dev_priv, &hw->ddb); sw_ddb = &dev_priv->wm.skl_hw.ddb; @@ -12781,7 +12942,7 @@ static void verify_wm_state(struct drm_crtc *crtc, /* DDB */ hw_ddb_entry = &hw->ddb_y[plane]; - sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane]; + sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", @@ -12833,7 +12994,7 @@ static void verify_wm_state(struct drm_crtc *crtc, /* DDB */ hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; - sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR]; + sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", @@ -12847,23 +13008,22 @@ static void verify_wm_state(struct drm_crtc *crtc, } static void -verify_connector_state(struct drm_device *dev, - struct drm_atomic_state *state, - struct drm_crtc *crtc) +verify_connector_state(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct drm_connector *connector; struct drm_connector_state *new_conn_state; int i; - for_each_new_connector_in_state(state, connector, new_conn_state, i) { + for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { struct drm_encoder *encoder = connector->encoder; - struct drm_crtc_state *crtc_state = NULL; + struct intel_crtc_state *crtc_state = NULL; - if (new_conn_state->crtc != crtc) + if (new_conn_state->crtc != &crtc->base) continue; if (crtc) - crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); intel_connector_verify_state(crtc_state, new_conn_state); @@ -12873,14 +13033,14 @@ verify_connector_state(struct drm_device *dev, } static void -verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state) +verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) { struct intel_encoder *encoder; struct drm_connector *connector; struct drm_connector_state *old_conn_state, *new_conn_state; int i; - for_each_intel_encoder(dev, encoder) { + for_each_intel_encoder(&dev_priv->drm, encoder) { bool enabled = false, found = false; enum pipe pipe; @@ -12888,7 +13048,7 @@ verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state) encoder->base.base.id, encoder->base.name); - for_each_oldnew_connector_in_state(state, connector, old_conn_state, + for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, new_conn_state, i) { if (old_conn_state->best_encoder == &encoder->base) found = true; @@ -12922,50 +13082,49 @@ verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state) } static void -verify_crtc_state(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state, - struct drm_crtc_state *new_crtc_state) +verify_crtc_state(struct intel_crtc *crtc, + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *pipe_config, *sw_config; - struct drm_atomic_state *old_state; + struct intel_crtc_state *pipe_config; + struct drm_atomic_state *state; bool active; - old_state = old_crtc_state->state; - __drm_atomic_helper_crtc_destroy_state(old_crtc_state); - pipe_config = to_intel_crtc_state(old_crtc_state); + state = old_crtc_state->base.state; + __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base); + pipe_config = old_crtc_state; memset(pipe_config, 0, sizeof(*pipe_config)); - pipe_config->base.crtc = crtc; - pipe_config->base.state = old_state; + pipe_config->base.crtc = &crtc->base; + pipe_config->base.state = state; - DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); + DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); - active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); + active = dev_priv->display.get_pipe_config(crtc, pipe_config); /* we keep both pipes enabled on 830 */ if (IS_I830(dev_priv)) - active = new_crtc_state->active; + active = new_crtc_state->base.active; - I915_STATE_WARN(new_crtc_state->active != active, + I915_STATE_WARN(new_crtc_state->base.active != active, "crtc active state doesn't match with hw state " - "(expected %i, found %i)\n", new_crtc_state->active, active); + "(expected %i, found %i)\n", new_crtc_state->base.active, active); - I915_STATE_WARN(intel_crtc->active != new_crtc_state->active, + I915_STATE_WARN(crtc->active != new_crtc_state->base.active, "transitional active state does not match atomic hw state " - "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active); + "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active); - for_each_encoder_on_crtc(dev, crtc, encoder) { + for_each_encoder_on_crtc(dev, &crtc->base, encoder) { enum pipe pipe; active = encoder->get_hw_state(encoder, &pipe); - I915_STATE_WARN(active != new_crtc_state->active, + I915_STATE_WARN(active != new_crtc_state->base.active, "[ENCODER:%i] active %i with crtc active %i\n", - encoder->base.base.id, active, new_crtc_state->active); + encoder->base.base.id, active, new_crtc_state->base.active); - I915_STATE_WARN(active && intel_crtc->pipe != pipe, + I915_STATE_WARN(active && crtc->pipe != pipe, "Encoder connected to wrong pipe %c\n", pipe_name(pipe)); @@ -12975,16 +13134,16 @@ verify_crtc_state(struct drm_crtc *crtc, intel_crtc_compute_pixel_rate(pipe_config); - if (!new_crtc_state->active) + if (!new_crtc_state->base.active) return; intel_pipe_config_sanity_check(dev_priv, pipe_config); - sw_config = to_intel_crtc_state(new_crtc_state); - if (!intel_pipe_config_compare(sw_config, pipe_config, false)) { + if (!intel_pipe_config_compare(new_crtc_state, + pipe_config, false)) { I915_STATE_WARN(1, "pipe state doesn't match!\n"); intel_dump_pipe_config(pipe_config, NULL, "[hw state]"); - intel_dump_pipe_config(sw_config, NULL, "[sw state]"); + intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]"); } } @@ -13004,8 +13163,8 @@ intel_verify_planes(struct intel_atomic_state *state) static void verify_single_dpll_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, - struct drm_crtc *crtc, - struct drm_crtc_state *new_state) + struct intel_crtc *crtc, + struct intel_crtc_state *new_crtc_state) { struct intel_dpll_hw_state dpll_hw_state; unsigned int crtc_mask; @@ -13035,16 +13194,16 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, return; } - crtc_mask = drm_crtc_mask(crtc); + crtc_mask = drm_crtc_mask(&crtc->base); - if (new_state->active) + if (new_crtc_state->base.active) I915_STATE_WARN(!(pll->active_mask & crtc_mask), "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", - pipe_name(drm_crtc_index(crtc)), pll->active_mask); + pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); else I915_STATE_WARN(pll->active_mask & crtc_mask, "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", - pipe_name(drm_crtc_index(crtc)), pll->active_mask); + pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", @@ -13057,51 +13216,47 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, } static void -verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state, - struct drm_crtc_state *new_crtc_state) +verify_shared_dpll_state(struct intel_crtc *crtc, + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state); - struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - if (new_state->shared_dpll) - verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state); + if (new_crtc_state->shared_dpll) + verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state); - if (old_state->shared_dpll && - old_state->shared_dpll != new_state->shared_dpll) { - unsigned int crtc_mask = drm_crtc_mask(crtc); - struct intel_shared_dpll *pll = old_state->shared_dpll; + if (old_crtc_state->shared_dpll && + old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { + unsigned int crtc_mask = drm_crtc_mask(&crtc->base); + struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; I915_STATE_WARN(pll->active_mask & crtc_mask, "pll active mismatch (didn't expect pipe %c in active mask)\n", - pipe_name(drm_crtc_index(crtc))); + pipe_name(drm_crtc_index(&crtc->base))); I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, "pll enabled crtcs mismatch (found %x in enabled mask)\n", - pipe_name(drm_crtc_index(crtc))); + pipe_name(drm_crtc_index(&crtc->base))); } } static void -intel_modeset_verify_crtc(struct drm_crtc *crtc, - struct drm_atomic_state *state, - struct drm_crtc_state *old_state, - struct drm_crtc_state *new_state) +intel_modeset_verify_crtc(struct intel_crtc *crtc, + struct intel_atomic_state *state, + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) { - if (!needs_modeset(new_state) && - !to_intel_crtc_state(new_state)->update_pipe) + if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) return; - verify_wm_state(crtc, new_state); - verify_connector_state(crtc->dev, state, crtc); - verify_crtc_state(crtc, old_state, new_state); - verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state); + verify_wm_state(crtc, new_crtc_state); + verify_connector_state(state, crtc); + verify_crtc_state(crtc, old_crtc_state, new_crtc_state); + verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state); } static void -verify_disabled_dpll_state(struct drm_device *dev) +verify_disabled_dpll_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); int i; for (i = 0; i < dev_priv->num_shared_dpll; i++) @@ -13109,12 +13264,12 @@ verify_disabled_dpll_state(struct drm_device *dev) } static void -intel_modeset_verify_disabled(struct drm_device *dev, - struct drm_atomic_state *state) +intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, + struct intel_atomic_state *state) { - verify_encoder_state(dev, state); - verify_connector_state(dev, state, NULL); - verify_disabled_dpll_state(dev); + verify_encoder_state(dev_priv, state); + verify_connector_state(state, NULL); + verify_disabled_dpll_state(dev_priv); } static void update_scanline_offset(const struct intel_crtc_state *crtc_state) @@ -13168,27 +13323,18 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state) static void intel_modeset_clear_plls(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc_state *old_crtc_state, *new_crtc_state; + struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; int i; if (!dev_priv->display.crtc_compute_clock) return; - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - struct intel_shared_dpll *old_dpll = - old_crtc_state->shared_dpll; - - if (!needs_modeset(&new_crtc_state->base)) - continue; - - new_crtc_state->shared_dpll = NULL; - - if (!old_dpll) + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + if (!needs_modeset(new_crtc_state)) continue; - intel_release_shared_dpll(old_dpll, crtc, &state->base); + intel_release_shared_dplls(state, crtc); } } @@ -13210,7 +13356,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) /* look at all crtc's that are going to be enabled in during modeset */ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { if (!crtc_state->base.active || - !needs_modeset(&crtc_state->base)) + !needs_modeset(crtc_state)) continue; if (first_crtc_state) { @@ -13235,7 +13381,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) crtc_state->hsw_workaround_pipe = INVALID_PIPE; if (!crtc_state->base.active || - needs_modeset(&crtc_state->base)) + needs_modeset(crtc_state)) continue; /* 2 or more enabled crtcs means no need for w/a */ @@ -13253,15 +13399,16 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) return 0; } -static int intel_lock_all_pipes(struct drm_atomic_state *state) +static int intel_lock_all_pipes(struct intel_atomic_state *state) { - struct drm_crtc *crtc; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; /* Add all pipes to the state */ - for_each_crtc(state->dev, crtc) { - struct drm_crtc_state *crtc_state; + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state; - crtc_state = drm_atomic_get_crtc_state(state, crtc); + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); } @@ -13269,32 +13416,35 @@ static int intel_lock_all_pipes(struct drm_atomic_state *state) return 0; } -static int intel_modeset_all_pipes(struct drm_atomic_state *state) +static int intel_modeset_all_pipes(struct intel_atomic_state *state) { - struct drm_crtc *crtc; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; /* * Add all pipes to the state, and force * a modeset on all the active ones. */ - for_each_crtc(state->dev, crtc) { - struct drm_crtc_state *crtc_state; + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state; int ret; - crtc_state = drm_atomic_get_crtc_state(state, crtc); + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); - if (!crtc_state->active || needs_modeset(crtc_state)) + if (!crtc_state->base.active || needs_modeset(crtc_state)) continue; - crtc_state->mode_changed = true; + crtc_state->base.mode_changed = true; - ret = drm_atomic_add_affected_connectors(state, crtc); + ret = drm_atomic_add_affected_connectors(&state->base, + &crtc->base); if (ret) return ret; - ret = drm_atomic_add_affected_planes(state, crtc); + ret = drm_atomic_add_affected_planes(&state->base, + &crtc->base); if (ret) return ret; } @@ -13356,18 +13506,18 @@ static int intel_modeset_checks(struct intel_atomic_state *state) */ if (intel_cdclk_changed(&dev_priv->cdclk.logical, &state->cdclk.logical)) { - ret = intel_lock_all_pipes(&state->base); + ret = intel_lock_all_pipes(state); if (ret < 0) return ret; } if (is_power_of_2(state->active_crtcs)) { - struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; + struct intel_crtc *crtc; + struct intel_crtc_state *crtc_state; pipe = ilog2(state->active_crtcs); - crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base; - crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc); + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (crtc_state && needs_modeset(crtc_state)) pipe = INVALID_PIPE; } else { @@ -13379,14 +13529,14 @@ static int intel_modeset_checks(struct intel_atomic_state *state) intel_cdclk_needs_cd2x_update(dev_priv, &dev_priv->cdclk.actual, &state->cdclk.actual)) { - ret = intel_lock_all_pipes(&state->base); + ret = intel_lock_all_pipes(state); if (ret < 0) return ret; state->cdclk.pipe = pipe; } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual, &state->cdclk.actual)) { - ret = intel_modeset_all_pipes(&state->base); + ret = intel_modeset_all_pipes(state); if (ret < 0) return ret; @@ -13478,7 +13628,7 @@ static int intel_atomic_check(struct drm_device *dev, for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!needs_modeset(&new_crtc_state->base)) + if (!needs_modeset(new_crtc_state)) continue; if (!new_crtc_state->base.enable) { @@ -13492,7 +13642,7 @@ static int intel_atomic_check(struct drm_device *dev, intel_crtc_check_fastset(old_crtc_state, new_crtc_state); - if (needs_modeset(&new_crtc_state->base)) + if (needs_modeset(new_crtc_state)) any_ms = true; } @@ -13527,12 +13677,12 @@ static int intel_atomic_check(struct drm_device *dev, for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!needs_modeset(&new_crtc_state->base) && + if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) continue; intel_dump_pipe_config(new_crtc_state, state, - needs_modeset(&new_crtc_state->base) ? + needs_modeset(new_crtc_state) ? "[modeset]" : "[fastset]"); } @@ -13553,10 +13703,10 @@ static int intel_atomic_check(struct drm_device *dev, return ret; } -static int intel_atomic_prepare_commit(struct drm_device *dev, - struct drm_atomic_state *state) +static int intel_atomic_prepare_commit(struct intel_atomic_state *state) { - return drm_atomic_helper_prepare_planes(dev, state); + return drm_atomic_helper_prepare_planes(state->base.dev, + &state->base); } u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) @@ -13567,60 +13717,57 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) if (!vblank->max_vblank_count) return (u32)drm_crtc_accurate_vblank_count(&crtc->base); - return dev->driver->get_vblank_counter(dev, crtc->pipe); + return crtc->base.funcs->get_vblank_counter(&crtc->base); } -static void intel_update_crtc(struct drm_crtc *crtc, - struct drm_atomic_state *state, - struct drm_crtc_state *old_crtc_state, - struct drm_crtc_state *new_crtc_state) +static void intel_update_crtc(struct intel_crtc *crtc, + struct intel_atomic_state *state, + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = state->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); bool modeset = needs_modeset(new_crtc_state); struct intel_plane_state *new_plane_state = - intel_atomic_get_new_plane_state(to_intel_atomic_state(state), - to_intel_plane(crtc->primary)); + intel_atomic_get_new_plane_state(state, + to_intel_plane(crtc->base.primary)); if (modeset) { - update_scanline_offset(pipe_config); - dev_priv->display.crtc_enable(pipe_config, state); + update_scanline_offset(new_crtc_state); + dev_priv->display.crtc_enable(new_crtc_state, state); /* vblanks work again, re-enable pipe CRC. */ - intel_crtc_enable_pipe_crc(intel_crtc); + intel_crtc_enable_pipe_crc(crtc); } else { - intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), - pipe_config); + intel_pre_plane_update(old_crtc_state, new_crtc_state); - if (pipe_config->update_pipe) - intel_encoders_update_pipe(crtc, pipe_config, state); + if (new_crtc_state->update_pipe) + intel_encoders_update_pipe(crtc, new_crtc_state, state); } - if (pipe_config->update_pipe && !pipe_config->enable_fbc) - intel_fbc_disable(intel_crtc); + if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) + intel_fbc_disable(crtc); else if (new_plane_state) - intel_fbc_enable(intel_crtc, pipe_config, new_plane_state); + intel_fbc_enable(crtc, new_crtc_state, new_plane_state); - intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc); + intel_begin_crtc_commit(state, crtc); if (INTEL_GEN(dev_priv) >= 9) - skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc); + skl_update_planes_on_crtc(state, crtc); else - i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc); + i9xx_update_planes_on_crtc(state, crtc); - intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc); + intel_finish_crtc_commit(state, crtc); } -static void intel_update_crtcs(struct drm_atomic_state *state) +static void intel_update_crtcs(struct intel_atomic_state *state) { - struct drm_crtc *crtc; - struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct intel_crtc *crtc; + struct intel_crtc_state *old_crtc_state, *new_crtc_state; int i; - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!new_crtc_state->active) + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (!new_crtc_state->base.active) continue; intel_update_crtc(crtc, state, old_crtc_state, @@ -13628,26 +13775,23 @@ static void intel_update_crtcs(struct drm_atomic_state *state) } } -static void skl_update_crtcs(struct drm_atomic_state *state) +static void skl_update_crtcs(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->dev); - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_crtc *crtc; - struct intel_crtc *intel_crtc; - struct drm_crtc_state *old_crtc_state, *new_crtc_state; - struct intel_crtc_state *cstate; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; + struct intel_crtc_state *old_crtc_state, *new_crtc_state; unsigned int updated = 0; bool progress; enum pipe pipe; int i; u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; - u8 required_slices = intel_state->wm_results.ddb.enabled_slices; + u8 required_slices = state->wm_results.ddb.enabled_slices; struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) /* ignore allocations for crtc's that have been turned off. */ - if (new_crtc_state->active) - entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; + if (new_crtc_state->base.active) + entries[i] = old_crtc_state->wm.skl.ddb; /* If 2nd DBuf slice required, enable it here */ if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) @@ -13662,24 +13806,22 @@ static void skl_update_crtcs(struct drm_atomic_state *state) do { progress = false; - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { bool vbl_wait = false; - unsigned int cmask = drm_crtc_mask(crtc); + unsigned int cmask = drm_crtc_mask(&crtc->base); - intel_crtc = to_intel_crtc(crtc); - cstate = to_intel_crtc_state(new_crtc_state); - pipe = intel_crtc->pipe; + pipe = crtc->pipe; - if (updated & cmask || !cstate->base.active) + if (updated & cmask || !new_crtc_state->base.active) continue; - if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb, + if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, entries, INTEL_INFO(dev_priv)->num_pipes, i)) continue; updated |= cmask; - entries[i] = cstate->wm.skl.ddb; + entries[i] = new_crtc_state->wm.skl.ddb; /* * If this is an already active pipe, it's DDB changed, @@ -13687,10 +13829,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state) * then we need to wait for a vblank to pass for the * new ddb allocation to take effect. */ - if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb, - &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) && - !new_crtc_state->active_changed && - intel_state->wm_results.dirty_pipes != updated) + if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, + &old_crtc_state->wm.skl.ddb) && + !new_crtc_state->base.active_changed && + state->wm_results.dirty_pipes != updated) vbl_wait = true; intel_update_crtc(crtc, state, old_crtc_state, @@ -13736,18 +13878,21 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat for (;;) { prepare_to_wait(&intel_state->commit_ready.wait, &wait_fence, TASK_UNINTERRUPTIBLE); - prepare_to_wait(&dev_priv->gpu_error.wait_queue, + prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags, + I915_RESET_MODESET), &wait_reset, TASK_UNINTERRUPTIBLE); - if (i915_sw_fence_done(&intel_state->commit_ready) - || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags)) + if (i915_sw_fence_done(&intel_state->commit_ready) || + test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) break; schedule(); } finish_wait(&intel_state->commit_ready.wait, &wait_fence); - finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset); + finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags, + I915_RESET_MODESET), + &wait_reset); } static void intel_atomic_cleanup_work(struct work_struct *work) @@ -13763,57 +13908,49 @@ static void intel_atomic_cleanup_work(struct work_struct *work) intel_atomic_helper_free_state(i915); } -static void intel_atomic_commit_tail(struct drm_atomic_state *state) +static void intel_atomic_commit_tail(struct intel_atomic_state *state) { - struct drm_device *dev = state->dev; - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_device *dev = state->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_crtc_state *old_crtc_state, *new_crtc_state; - struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state; - struct drm_crtc *crtc; - struct intel_crtc *intel_crtc; + struct intel_crtc_state *new_crtc_state, *old_crtc_state; + struct intel_crtc *crtc; u64 put_domains[I915_MAX_PIPES] = {}; intel_wakeref_t wakeref = 0; int i; - intel_atomic_commit_fence_wait(intel_state); + intel_atomic_commit_fence_wait(state); - drm_atomic_helper_wait_for_dependencies(state); + drm_atomic_helper_wait_for_dependencies(&state->base); - if (intel_state->modeset) + if (state->modeset) wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - old_intel_crtc_state = to_intel_crtc_state(old_crtc_state); - new_intel_crtc_state = to_intel_crtc_state(new_crtc_state); - intel_crtc = to_intel_crtc(crtc); - + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (needs_modeset(new_crtc_state) || - to_intel_crtc_state(new_crtc_state)->update_pipe) { + new_crtc_state->update_pipe) { - put_domains[intel_crtc->pipe] = - modeset_get_crtc_power_domains(crtc, - new_intel_crtc_state); + put_domains[crtc->pipe] = + modeset_get_crtc_power_domains(new_crtc_state); } if (!needs_modeset(new_crtc_state)) continue; - intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state); + intel_pre_plane_update(old_crtc_state, new_crtc_state); - if (old_crtc_state->active) { - intel_crtc_disable_planes(intel_state, intel_crtc); + if (old_crtc_state->base.active) { + intel_crtc_disable_planes(state, crtc); /* * We need to disable pipe CRC before disabling the pipe, * or we race against vblank off. */ - intel_crtc_disable_pipe_crc(intel_crtc); + intel_crtc_disable_pipe_crc(crtc); - dev_priv->display.crtc_disable(old_intel_crtc_state, state); - intel_crtc->active = false; - intel_fbc_disable(intel_crtc); - intel_disable_shared_dpll(old_intel_crtc_state); + dev_priv->display.crtc_disable(old_crtc_state, state); + crtc->active = false; + intel_fbc_disable(crtc); + intel_disable_shared_dpll(old_crtc_state); /* * Underruns don't always raise @@ -13823,25 +13960,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_check_pch_fifo_underruns(dev_priv); /* FIXME unify this for all platforms */ - if (!new_crtc_state->active && + if (!new_crtc_state->base.active && !HAS_GMCH(dev_priv) && dev_priv->display.initial_watermarks) - dev_priv->display.initial_watermarks(intel_state, - new_intel_crtc_state); + dev_priv->display.initial_watermarks(state, + new_crtc_state); } } - /* FIXME: Eventually get rid of our intel_crtc->config pointer */ - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) - to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state); + /* FIXME: Eventually get rid of our crtc->config pointer */ + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) + crtc->config = new_crtc_state; - if (intel_state->modeset) { - drm_atomic_helper_update_legacy_modeset_state(state->dev, state); + if (state->modeset) { + drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); intel_set_cdclk_pre_plane_update(dev_priv, - &intel_state->cdclk.actual, + &state->cdclk.actual, &dev_priv->cdclk.actual, - intel_state->cdclk.pipe); + state->cdclk.pipe); /* * SKL workaround: bspec recommends we disable the SAGV when we @@ -13850,31 +13987,37 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) if (!intel_can_enable_sagv(state)) intel_disable_sagv(dev_priv); - intel_modeset_verify_disabled(dev, state); + intel_modeset_verify_disabled(dev_priv, state); } /* Complete the events for pipes that have now been disabled */ - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { bool modeset = needs_modeset(new_crtc_state); /* Complete events for now disable pipes here. */ - if (modeset && !new_crtc_state->active && new_crtc_state->event) { + if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) { spin_lock_irq(&dev->event_lock); - drm_crtc_send_vblank_event(crtc, new_crtc_state->event); + drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event); spin_unlock_irq(&dev->event_lock); - new_crtc_state->event = NULL; + new_crtc_state->base.event = NULL; } } + if (state->modeset) + intel_encoders_update_prepare(state); + /* Now enable the clocks, plane, pipe, and connectors that we set up. */ dev_priv->display.update_crtcs(state); - if (intel_state->modeset) + if (state->modeset) { + intel_encoders_update_complete(state); + intel_set_cdclk_post_plane_update(dev_priv, - &intel_state->cdclk.actual, + &state->cdclk.actual, &dev_priv->cdclk.actual, - intel_state->cdclk.pipe); + state->cdclk.pipe); + } /* FIXME: We should call drm_atomic_helper_commit_hw_done() here * already, but still need the state for the delayed optimization. To @@ -13885,16 +14028,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * - switch over to the vblank wait helper in the core after that since * we don't need out special handling any more. */ - drm_atomic_helper_wait_for_flip_done(dev, state); + drm_atomic_helper_wait_for_flip_done(dev, &state->base); - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { - new_intel_crtc_state = to_intel_crtc_state(new_crtc_state); - - if (new_crtc_state->active && + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + if (new_crtc_state->base.active && !needs_modeset(new_crtc_state) && - (new_intel_crtc_state->base.color_mgmt_changed || - new_intel_crtc_state->update_pipe)) - intel_color_load_luts(new_intel_crtc_state); + (new_crtc_state->base.color_mgmt_changed || + new_crtc_state->update_pipe)) + intel_color_load_luts(new_crtc_state); } /* @@ -13904,16 +14045,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * * TODO: Move this (and other cleanup) to an async worker eventually. */ - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { - new_intel_crtc_state = to_intel_crtc_state(new_crtc_state); - + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (dev_priv->display.optimize_watermarks) - dev_priv->display.optimize_watermarks(intel_state, - new_intel_crtc_state); + dev_priv->display.optimize_watermarks(state, + new_crtc_state); } - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - intel_post_plane_update(to_intel_crtc_state(old_crtc_state)); + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + intel_post_plane_update(old_crtc_state); if (put_domains[i]) modeset_put_power_domains(dev_priv, put_domains[i]); @@ -13921,15 +14060,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); } - if (intel_state->modeset) - intel_verify_planes(intel_state); + if (state->modeset) + intel_verify_planes(state); - if (intel_state->modeset && intel_can_enable_sagv(state)) + if (state->modeset && intel_can_enable_sagv(state)) intel_enable_sagv(dev_priv); - drm_atomic_helper_commit_hw_done(state); + drm_atomic_helper_commit_hw_done(&state->base); - if (intel_state->modeset) { + if (state->modeset) { /* As one of the primary mmio accessors, KMS has a high * likelihood of triggering bugs in unclaimed access. After we * finish modesetting, see if an error has been flagged, and if @@ -13939,7 +14078,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); } - intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); /* * Defer the cleanup of the old state to a separate worker to not @@ -13949,14 +14088,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * schedule point (cond_resched()) here anyway to keep latencies * down. */ - INIT_WORK(&state->commit_work, intel_atomic_cleanup_work); - queue_work(system_highpri_wq, &state->commit_work); + INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); + queue_work(system_highpri_wq, &state->base.commit_work); } static void intel_atomic_commit_work(struct work_struct *work) { - struct drm_atomic_state *state = - container_of(work, struct drm_atomic_state, commit_work); + struct intel_atomic_state *state = + container_of(work, struct intel_atomic_state, base.commit_work); intel_atomic_commit_tail(state); } @@ -13986,42 +14125,31 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence, return NOTIFY_DONE; } -static void intel_atomic_track_fbs(struct drm_atomic_state *state) +static void intel_atomic_track_fbs(struct intel_atomic_state *state) { - struct drm_plane_state *old_plane_state, *new_plane_state; - struct drm_plane *plane; + struct intel_plane_state *old_plane_state, *new_plane_state; + struct intel_plane *plane; int i; - for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) - i915_gem_track_fb(intel_fb_obj(old_plane_state->fb), - intel_fb_obj(new_plane_state->fb), - to_intel_plane(plane)->frontbuffer_bit); + for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, + new_plane_state, i) + i915_gem_track_fb(intel_fb_obj(old_plane_state->base.fb), + intel_fb_obj(new_plane_state->base.fb), + plane->frontbuffer_bit); } -/** - * intel_atomic_commit - commit validated state object - * @dev: DRM device - * @state: the top-level driver state object - * @nonblock: nonblocking commit - * - * This function commits a top-level state object that has been validated - * with drm_atomic_helper_check(). - * - * RETURNS - * Zero for success or -errno. - */ static int intel_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, + struct drm_atomic_state *_state, bool nonblock) { - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct intel_atomic_state *state = to_intel_atomic_state(_state); struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; - intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - drm_atomic_state_get(state); - i915_sw_fence_init(&intel_state->commit_ready, + drm_atomic_state_get(&state->base); + i915_sw_fence_init(&state->commit_ready, intel_atomic_commit_ready); /* @@ -14041,63 +14169,61 @@ static int intel_atomic_commit(struct drm_device *dev, * FIXME doing watermarks and fb cleanup from a vblank worker * (assuming we had any) would solve these problems. */ - if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) { + if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) { struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; int i; - for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i) + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) if (new_crtc_state->wm.need_postvbl_update || new_crtc_state->update_wm_post) - state->legacy_cursor_update = false; + state->base.legacy_cursor_update = false; } - ret = intel_atomic_prepare_commit(dev, state); + ret = intel_atomic_prepare_commit(state); if (ret) { DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); - i915_sw_fence_commit(&intel_state->commit_ready); - intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); + i915_sw_fence_commit(&state->commit_ready); + intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); return ret; } - ret = drm_atomic_helper_setup_commit(state, nonblock); + ret = drm_atomic_helper_setup_commit(&state->base, nonblock); if (!ret) - ret = drm_atomic_helper_swap_state(state, true); + ret = drm_atomic_helper_swap_state(&state->base, true); if (ret) { - i915_sw_fence_commit(&intel_state->commit_ready); + i915_sw_fence_commit(&state->commit_ready); - drm_atomic_helper_cleanup_planes(dev, state); - intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); + drm_atomic_helper_cleanup_planes(dev, &state->base); + intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); return ret; } dev_priv->wm.distrust_bios_wm = false; intel_shared_dpll_swap_state(state); intel_atomic_track_fbs(state); - if (intel_state->modeset) { - memcpy(dev_priv->min_cdclk, intel_state->min_cdclk, - sizeof(intel_state->min_cdclk)); - memcpy(dev_priv->min_voltage_level, - intel_state->min_voltage_level, - sizeof(intel_state->min_voltage_level)); - dev_priv->active_crtcs = intel_state->active_crtcs; - dev_priv->cdclk.force_min_cdclk = - intel_state->cdclk.force_min_cdclk; + if (state->modeset) { + memcpy(dev_priv->min_cdclk, state->min_cdclk, + sizeof(state->min_cdclk)); + memcpy(dev_priv->min_voltage_level, state->min_voltage_level, + sizeof(state->min_voltage_level)); + dev_priv->active_crtcs = state->active_crtcs; + dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk; - intel_cdclk_swap_state(intel_state); + intel_cdclk_swap_state(state); } - drm_atomic_state_get(state); - INIT_WORK(&state->commit_work, intel_atomic_commit_work); + drm_atomic_state_get(&state->base); + INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); - i915_sw_fence_commit(&intel_state->commit_ready); - if (nonblock && intel_state->modeset) { - queue_work(dev_priv->modeset_wq, &state->commit_work); + i915_sw_fence_commit(&state->commit_ready); + if (nonblock && state->modeset) { + queue_work(dev_priv->modeset_wq, &state->base.commit_work); } else if (nonblock) { - queue_work(system_unbound_wq, &state->commit_work); + queue_work(system_unbound_wq, &state->base.commit_work); } else { - if (intel_state->modeset) + if (state->modeset) flush_workqueue(dev_priv->modeset_wq); intel_atomic_commit_tail(state); } @@ -14105,18 +14231,6 @@ static int intel_atomic_commit(struct drm_device *dev, return 0; } -static const struct drm_crtc_funcs intel_crtc_funcs = { - .gamma_set = drm_atomic_helper_legacy_gamma_set, - .set_config = drm_atomic_helper_set_config, - .destroy = intel_crtc_destroy, - .page_flip = drm_atomic_helper_page_flip, - .atomic_duplicate_state = intel_crtc_duplicate_state, - .atomic_destroy_state = intel_crtc_destroy_state, - .set_crc_source = intel_crtc_set_crc_source, - .verify_crc_source = intel_crtc_verify_crc_source, - .get_crc_sources = intel_crtc_get_crc_sources, -}; - struct wait_rps_boost { struct wait_queue_entry wait; @@ -14250,9 +14364,9 @@ intel_prepare_plane_fb(struct drm_plane *plane, int ret; if (old_obj) { - struct drm_crtc_state *crtc_state = - drm_atomic_get_new_crtc_state(new_state->state, - plane->state->crtc); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(intel_state, + to_intel_crtc(plane->state->crtc)); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the @@ -14413,7 +14527,7 @@ static void intel_begin_crtc_commit(struct intel_atomic_state *state, intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - bool modeset = needs_modeset(&new_crtc_state->base); + bool modeset = needs_modeset(new_crtc_state); /* Perform vblank evasion around commit operation */ intel_pipe_update_start(new_crtc_state); @@ -14466,7 +14580,7 @@ static void intel_finish_crtc_commit(struct intel_atomic_state *state, intel_pipe_update_end(new_crtc_state); if (new_crtc_state->update_pipe && - !needs_modeset(&new_crtc_state->base) && + !needs_modeset(new_crtc_state) && old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED) intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); } @@ -14580,7 +14694,7 @@ intel_legacy_cursor_update(struct drm_plane *plane, * When crtc is inactive or there is a modeset pending, * wait for it to complete in the slowpath */ - if (!crtc_state->base.active || needs_modeset(&crtc_state->base) || + if (!crtc_state->base.active || needs_modeset(crtc_state) || crtc_state->update_pipe) goto slow; @@ -14910,8 +15024,76 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc, scaler_state->scaler_id = -1; } +#define INTEL_CRTC_FUNCS \ + .gamma_set = drm_atomic_helper_legacy_gamma_set, \ + .set_config = drm_atomic_helper_set_config, \ + .destroy = intel_crtc_destroy, \ + .page_flip = drm_atomic_helper_page_flip, \ + .atomic_duplicate_state = intel_crtc_duplicate_state, \ + .atomic_destroy_state = intel_crtc_destroy_state, \ + .set_crc_source = intel_crtc_set_crc_source, \ + .verify_crc_source = intel_crtc_verify_crc_source, \ + .get_crc_sources = intel_crtc_get_crc_sources + +static const struct drm_crtc_funcs bdw_crtc_funcs = { + INTEL_CRTC_FUNCS, + + .get_vblank_counter = g4x_get_vblank_counter, + .enable_vblank = bdw_enable_vblank, + .disable_vblank = bdw_disable_vblank, +}; + +static const struct drm_crtc_funcs ilk_crtc_funcs = { + INTEL_CRTC_FUNCS, + + .get_vblank_counter = g4x_get_vblank_counter, + .enable_vblank = ilk_enable_vblank, + .disable_vblank = ilk_disable_vblank, +}; + +static const struct drm_crtc_funcs g4x_crtc_funcs = { + INTEL_CRTC_FUNCS, + + .get_vblank_counter = g4x_get_vblank_counter, + .enable_vblank = i965_enable_vblank, + .disable_vblank = i965_disable_vblank, +}; + +static const struct drm_crtc_funcs i965_crtc_funcs = { + INTEL_CRTC_FUNCS, + + .get_vblank_counter = i915_get_vblank_counter, + .enable_vblank = i965_enable_vblank, + .disable_vblank = i965_disable_vblank, +}; + +static const struct drm_crtc_funcs i945gm_crtc_funcs = { + INTEL_CRTC_FUNCS, + + .get_vblank_counter = i915_get_vblank_counter, + .enable_vblank = i945gm_enable_vblank, + .disable_vblank = i945gm_disable_vblank, +}; + +static const struct drm_crtc_funcs i915_crtc_funcs = { + INTEL_CRTC_FUNCS, + + .get_vblank_counter = i915_get_vblank_counter, + .enable_vblank = i8xx_enable_vblank, + .disable_vblank = i8xx_disable_vblank, +}; + +static const struct drm_crtc_funcs i8xx_crtc_funcs = { + INTEL_CRTC_FUNCS, + + /* no hw vblank counter */ + .enable_vblank = i8xx_enable_vblank, + .disable_vblank = i8xx_disable_vblank, +}; + static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) { + const struct drm_crtc_funcs *funcs; struct intel_crtc *intel_crtc; struct intel_crtc_state *crtc_state = NULL; struct intel_plane *primary = NULL; @@ -14955,10 +15137,28 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) } intel_crtc->plane_ids_mask |= BIT(cursor->id); + if (HAS_GMCH(dev_priv)) { + if (IS_CHERRYVIEW(dev_priv) || + IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) + funcs = &g4x_crtc_funcs; + else if (IS_GEN(dev_priv, 4)) + funcs = &i965_crtc_funcs; + else if (IS_I945GM(dev_priv)) + funcs = &i945gm_crtc_funcs; + else if (IS_GEN(dev_priv, 3)) + funcs = &i915_crtc_funcs; + else + funcs = &i8xx_crtc_funcs; + } else { + if (INTEL_GEN(dev_priv) >= 8) + funcs = &bdw_crtc_funcs; + else + funcs = &ilk_crtc_funcs; + } + ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, &primary->base, &cursor->base, - &intel_crtc_funcs, - "pipe %c", pipe_name(pipe)); + funcs, "pipe %c", pipe_name(pipe)); if (ret) goto fail; @@ -15114,12 +15314,18 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - if (IS_ELKHARTLAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 12) { + /* TODO: initialize TC ports as well */ + intel_ddi_init(dev_priv, PORT_A); + intel_ddi_init(dev_priv, PORT_B); + intel_ddi_init(dev_priv, PORT_C); + } else if (IS_ELKHARTLAKE(dev_priv)) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_C); + intel_ddi_init(dev_priv, PORT_D); icl_dsi_init(dev_priv); - } else if (INTEL_GEN(dev_priv) >= 11) { + } else if (IS_GEN(dev_priv, 11)) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_C); @@ -15775,8 +15981,8 @@ static void sanitize_watermarks(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *state; struct intel_atomic_state *intel_state; - struct drm_crtc *crtc; - struct drm_crtc_state *cstate; + struct intel_crtc *crtc; + struct intel_crtc_state *crtc_state; struct drm_modeset_acquire_ctx ctx; int ret; int i; @@ -15831,13 +16037,11 @@ retry: } /* Write calculated watermark values back */ - for_each_new_crtc_in_state(state, crtc, cstate, i) { - struct intel_crtc_state *cs = to_intel_crtc_state(cstate); - - cs->wm.need_postvbl_update = true; - dev_priv->display.optimize_watermarks(intel_state, cs); + for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { + crtc_state->wm.need_postvbl_update = true; + dev_priv->display.optimize_watermarks(intel_state, crtc_state); - to_intel_crtc_state(crtc->state)->wm = cs->wm; + to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; } put_state: @@ -16495,6 +16699,13 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) pll->on = pll->info->funcs->get_hw_state(dev_priv, pll, &pll->state.hw_state); + + if (IS_ELKHARTLAKE(dev_priv) && pll->on && + pll->info->id == DPLL_ID_EHL_DPLL4) { + pll->wakeref = intel_display_power_get(dev_priv, + POWER_DOMAIN_DPLL_DC_OFF); + } + pll->state.crtc_mask = 0; for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = @@ -16744,6 +16955,17 @@ intel_modeset_setup_hw_state(struct drm_device *dev, intel_modeset_readout_hw_state(dev); /* HW state is read out, now we need to sanitize this mess. */ + + /* Sanitize the TypeC port mode upfront, encoders depend on this */ + for_each_intel_encoder(dev, encoder) { + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + + /* We need to sanitize only the MST primary port. */ + if (encoder->type != INTEL_OUTPUT_DP_MST && + intel_phy_is_tc(dev_priv, phy)) + intel_tc_port_sanitize(enc_to_dig_port(&encoder->base)); + } + get_encoder_power_domains(dev_priv); if (HAS_PCH_IBX(dev_priv)) @@ -16804,7 +17026,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, u64 put_domains; crtc_state = to_intel_crtc_state(crtc->base.state); - put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state); + put_domains = modeset_get_crtc_power_domains(crtc_state); if (WARN_ON(put_domains)) modeset_put_power_domains(dev_priv, put_domains); } @@ -16866,7 +17088,7 @@ static void intel_hpd_poll_fini(struct drm_device *dev) drm_connector_list_iter_end(&conn_iter); } -void intel_modeset_cleanup(struct drm_device *dev) +void intel_modeset_driver_remove(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -16982,7 +17204,7 @@ struct intel_display_error_state { u32 vtotal; u32 vblank; u32 vsync; - } transcoder[4]; + } transcoder[5]; }; struct intel_display_error_state * @@ -16993,6 +17215,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) TRANSCODER_A, TRANSCODER_B, TRANSCODER_C, + TRANSCODER_D, TRANSCODER_EDP, }; int i; diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index ee6b8194a459..d2c718f25478 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -45,6 +45,8 @@ enum i915_gpio { GPIOK, GPIOL, GPIOM, + GPION, + GPIOO, }; /* @@ -58,6 +60,7 @@ enum pipe { PIPE_A = 0, PIPE_B, PIPE_C, + PIPE_D, _PIPE_EDP, I915_MAX_PIPES = _PIPE_EDP @@ -75,6 +78,7 @@ enum transcoder { TRANSCODER_A = PIPE_A, TRANSCODER_B = PIPE_B, TRANSCODER_C = PIPE_C, + TRANSCODER_D = PIPE_D, /* * The following transcoders can map to any pipe, their enum value @@ -98,6 +102,8 @@ static inline const char *transcoder_name(enum transcoder transcoder) return "B"; case TRANSCODER_C: return "C"; + case TRANSCODER_D: + return "D"; case TRANSCODER_EDP: return "EDP"; case TRANSCODER_DSI_A: @@ -173,6 +179,12 @@ static inline const char *port_identifier(enum port port) return "Port E"; case PORT_F: return "Port F"; + case PORT_G: + return "Port G"; + case PORT_H: + return "Port H"; + case PORT_I: + return "Port I"; default: return "<invalid>"; } @@ -185,14 +197,15 @@ enum tc_port { PORT_TC2, PORT_TC3, PORT_TC4, + PORT_TC5, + PORT_TC6, I915_MAX_TC_PORTS }; -enum tc_port_type { - TC_PORT_UNKNOWN = 0, - TC_PORT_TYPEC, - TC_PORT_TBT, +enum tc_port_mode { + TC_PORT_TBT_ALT, + TC_PORT_DP_ALT, TC_PORT_LEGACY, }; @@ -229,6 +242,30 @@ struct intel_link_m_n { u32 link_n; }; +enum phy { + PHY_NONE = -1, + + PHY_A = 0, + PHY_B, + PHY_C, + PHY_D, + PHY_E, + PHY_F, + PHY_G, + PHY_H, + PHY_I, + + I915_MAX_PHYS +}; + +#define phy_name(a) ((a) + 'A') + +enum phy_fia { + FIA1, + FIA2, + FIA3, +}; + #define for_each_pipe(__dev_priv, __p) \ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) @@ -254,6 +291,10 @@ struct intel_link_m_n { for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ for_each_if((__ports_mask) & BIT(__port)) +#define for_each_phy_masked(__phy, __phys_mask) \ + for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \ + for_each_if((__phys_mask) & BIT(__phy)) + #define for_each_crtc(dev, crtc) \ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) @@ -357,5 +398,6 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier); bool intel_plane_can_remap(const struct intel_plane_state *plane_state); +enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port); #endif diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index c93ad512014c..dd2a50b8ba0a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -17,13 +17,17 @@ #include "intel_drv.h" #include "intel_hotplug.h" #include "intel_sideband.h" +#include "intel_tc.h" bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); const char * -intel_display_power_domain_str(enum intel_display_power_domain domain) +intel_display_power_domain_str(struct drm_i915_private *i915, + enum intel_display_power_domain domain) { + bool ddi_tc_ports = IS_GEN(i915, 12); + switch (domain) { case POWER_DOMAIN_DISPLAY_CORE: return "DISPLAY_CORE"; @@ -33,22 +37,28 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "PIPE_B"; case POWER_DOMAIN_PIPE_C: return "PIPE_C"; + case POWER_DOMAIN_PIPE_D: + return "PIPE_D"; case POWER_DOMAIN_PIPE_A_PANEL_FITTER: return "PIPE_A_PANEL_FITTER"; case POWER_DOMAIN_PIPE_B_PANEL_FITTER: return "PIPE_B_PANEL_FITTER"; case POWER_DOMAIN_PIPE_C_PANEL_FITTER: return "PIPE_C_PANEL_FITTER"; + case POWER_DOMAIN_PIPE_D_PANEL_FITTER: + return "PIPE_D_PANEL_FITTER"; case POWER_DOMAIN_TRANSCODER_A: return "TRANSCODER_A"; case POWER_DOMAIN_TRANSCODER_B: return "TRANSCODER_B"; case POWER_DOMAIN_TRANSCODER_C: return "TRANSCODER_C"; + case POWER_DOMAIN_TRANSCODER_D: + return "TRANSCODER_D"; case POWER_DOMAIN_TRANSCODER_EDP: return "TRANSCODER_EDP"; - case POWER_DOMAIN_TRANSCODER_EDP_VDSC: - return "TRANSCODER_EDP_VDSC"; + case POWER_DOMAIN_TRANSCODER_VDSC_PW2: + return "TRANSCODER_VDSC_PW2"; case POWER_DOMAIN_TRANSCODER_DSI_A: return "TRANSCODER_DSI_A"; case POWER_DOMAIN_TRANSCODER_DSI_C: @@ -60,11 +70,23 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) case POWER_DOMAIN_PORT_DDI_C_LANES: return "PORT_DDI_C_LANES"; case POWER_DOMAIN_PORT_DDI_D_LANES: - return "PORT_DDI_D_LANES"; + BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_LANES != + POWER_DOMAIN_PORT_DDI_TC1_LANES); + return ddi_tc_ports ? "PORT_DDI_TC1_LANES" : "PORT_DDI_D_LANES"; case POWER_DOMAIN_PORT_DDI_E_LANES: - return "PORT_DDI_E_LANES"; + BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_LANES != + POWER_DOMAIN_PORT_DDI_TC2_LANES); + return ddi_tc_ports ? "PORT_DDI_TC2_LANES" : "PORT_DDI_E_LANES"; case POWER_DOMAIN_PORT_DDI_F_LANES: - return "PORT_DDI_F_LANES"; + BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_LANES != + POWER_DOMAIN_PORT_DDI_TC3_LANES); + return ddi_tc_ports ? "PORT_DDI_TC3_LANES" : "PORT_DDI_F_LANES"; + case POWER_DOMAIN_PORT_DDI_TC4_LANES: + return "PORT_DDI_TC4_LANES"; + case POWER_DOMAIN_PORT_DDI_TC5_LANES: + return "PORT_DDI_TC5_LANES"; + case POWER_DOMAIN_PORT_DDI_TC6_LANES: + return "PORT_DDI_TC6_LANES"; case POWER_DOMAIN_PORT_DDI_A_IO: return "PORT_DDI_A_IO"; case POWER_DOMAIN_PORT_DDI_B_IO: @@ -72,11 +94,23 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) case POWER_DOMAIN_PORT_DDI_C_IO: return "PORT_DDI_C_IO"; case POWER_DOMAIN_PORT_DDI_D_IO: - return "PORT_DDI_D_IO"; + BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_IO != + POWER_DOMAIN_PORT_DDI_TC1_IO); + return ddi_tc_ports ? "PORT_DDI_TC1_IO" : "PORT_DDI_D_IO"; case POWER_DOMAIN_PORT_DDI_E_IO: - return "PORT_DDI_E_IO"; + BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_IO != + POWER_DOMAIN_PORT_DDI_TC2_IO); + return ddi_tc_ports ? "PORT_DDI_TC2_IO" : "PORT_DDI_E_IO"; case POWER_DOMAIN_PORT_DDI_F_IO: - return "PORT_DDI_F_IO"; + BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_IO != + POWER_DOMAIN_PORT_DDI_TC3_IO); + return ddi_tc_ports ? "PORT_DDI_TC3_IO" : "PORT_DDI_F_IO"; + case POWER_DOMAIN_PORT_DDI_TC4_IO: + return "PORT_DDI_TC4_IO"; + case POWER_DOMAIN_PORT_DDI_TC5_IO: + return "PORT_DDI_TC5_IO"; + case POWER_DOMAIN_PORT_DDI_TC6_IO: + return "PORT_DDI_TC6_IO"; case POWER_DOMAIN_PORT_DSI: return "PORT_DSI"; case POWER_DOMAIN_PORT_CRT: @@ -94,11 +128,20 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) case POWER_DOMAIN_AUX_C: return "AUX_C"; case POWER_DOMAIN_AUX_D: - return "AUX_D"; + BUILD_BUG_ON(POWER_DOMAIN_AUX_D != POWER_DOMAIN_AUX_TC1); + return ddi_tc_ports ? "AUX_TC1" : "AUX_D"; case POWER_DOMAIN_AUX_E: - return "AUX_E"; + BUILD_BUG_ON(POWER_DOMAIN_AUX_E != POWER_DOMAIN_AUX_TC2); + return ddi_tc_ports ? "AUX_TC2" : "AUX_E"; case POWER_DOMAIN_AUX_F: - return "AUX_F"; + BUILD_BUG_ON(POWER_DOMAIN_AUX_F != POWER_DOMAIN_AUX_TC3); + return ddi_tc_ports ? "AUX_TC3" : "AUX_F"; + case POWER_DOMAIN_AUX_TC4: + return "AUX_TC4"; + case POWER_DOMAIN_AUX_TC5: + return "AUX_TC5"; + case POWER_DOMAIN_AUX_TC6: + return "AUX_TC6"; case POWER_DOMAIN_AUX_IO_A: return "AUX_IO_A"; case POWER_DOMAIN_AUX_TBT1: @@ -109,6 +152,10 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "AUX_TBT3"; case POWER_DOMAIN_AUX_TBT4: return "AUX_TBT4"; + case POWER_DOMAIN_AUX_TBT5: + return "AUX_TBT5"; + case POWER_DOMAIN_AUX_TBT6: + return "AUX_TBT6"; case POWER_DOMAIN_GMBUS: return "GMBUS"; case POWER_DOMAIN_INIT: @@ -117,6 +164,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "MODESET"; case POWER_DOMAIN_GT_IRQ: return "GT_IRQ"; + case POWER_DOMAIN_DPLL_DC_OFF: + return "DPLL_DC_OFF"; default: MISSING_CASE(domain); return "?"; @@ -269,11 +318,17 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, int pw_idx = power_well->desc->hsw.idx; /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ - WARN_ON(intel_wait_for_register(&dev_priv->uncore, - regs->driver, - HSW_PWR_WELL_CTL_STATE(pw_idx), - HSW_PWR_WELL_CTL_STATE(pw_idx), - 1)); + if (intel_wait_for_register(&dev_priv->uncore, + regs->driver, + HSW_PWR_WELL_CTL_STATE(pw_idx), + HSW_PWR_WELL_CTL_STATE(pw_idx), + 1)) { + DRM_DEBUG_KMS("%s power well enable timeout\n", + power_well->desc->name); + + /* An AUX timeout is expected if the TBT DP tunnel is down. */ + WARN_ON(!power_well->desc->hsw.is_tc_tbt); + } } static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, @@ -388,7 +443,7 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv, hsw_wait_for_power_well_disable(dev_priv, power_well); } -#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) +#define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) static void icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, @@ -396,21 +451,29 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, { const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; int pw_idx = power_well->desc->hsw.idx; - enum port port = ICL_AUX_PW_TO_PORT(pw_idx); + enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); u32 val; + int wa_idx_max; val = I915_READ(regs->driver); I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); - val = I915_READ(ICL_PORT_CL_DW12(port)); - I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); + if (INTEL_GEN(dev_priv) < 12) { + val = I915_READ(ICL_PORT_CL_DW12(phy)); + I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX); + } hsw_wait_for_power_well_enable(dev_priv, power_well); - /* Display WA #1178: icl */ - if (IS_ICELAKE(dev_priv) && - pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && - !intel_bios_is_port_edp(dev_priv, port)) { + /* Display WA #1178: icl, tgl */ + if (IS_TIGERLAKE(dev_priv)) + wa_idx_max = ICL_PW_CTL_IDX_AUX_C; + else + wa_idx_max = ICL_PW_CTL_IDX_AUX_B; + + if (!IS_ELKHARTLAKE(dev_priv) && + pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max && + !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx)); val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val); @@ -423,11 +486,13 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, { const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; int pw_idx = power_well->desc->hsw.idx; - enum port port = ICL_AUX_PW_TO_PORT(pw_idx); + enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); u32 val; - val = I915_READ(ICL_PORT_CL_DW12(port)); - I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX); + if (INTEL_GEN(dev_priv) < 12) { + val = I915_READ(ICL_PORT_CL_DW12(phy)); + I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX); + } val = I915_READ(regs->driver); I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); @@ -438,13 +503,93 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, #define ICL_AUX_PW_TO_CH(pw_idx) \ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) +#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) + +static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int pw_idx = power_well->desc->hsw.idx; + + return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : + ICL_AUX_PW_TO_CH(pw_idx); +} + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + +static u64 async_put_domains_mask(struct i915_power_domains *power_domains); + +static int power_well_async_ref_count(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int refs = hweight64(power_well->desc->domains & + async_put_domains_mask(&dev_priv->power_domains)); + + WARN_ON(refs > power_well->count); + + return refs; +} + +static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); + struct intel_digital_port *dig_port = NULL; + struct intel_encoder *encoder; + + /* Bypass the check if all references are released asynchronously */ + if (power_well_async_ref_count(dev_priv, power_well) == + power_well->count) + return; + + aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); + + for_each_intel_encoder(&dev_priv->drm, encoder) { + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + + if (!intel_phy_is_tc(dev_priv, phy)) + continue; + + /* We'll check the MST primary port */ + if (encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + dig_port = enc_to_dig_port(&encoder->base); + if (WARN_ON(!dig_port)) + continue; + + if (dig_port->aux_ch != aux_ch) { + dig_port = NULL; + continue; + } + + break; + } + + if (WARN_ON(!dig_port)) + return; + + WARN_ON(!intel_tc_port_ref_held(dig_port)); +} + +#else + +static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ +} + +#endif + static void icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx); + enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); u32 val; + icl_tc_port_assert_ref_held(dev_priv, power_well); + val = I915_READ(DP_AUX_CH_CTL(aux_ch)); val &= ~DP_AUX_CH_CTL_TBT_IO; if (power_well->desc->hsw.is_tc_tbt) @@ -454,6 +599,15 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, hsw_power_well_enable(dev_priv, power_well); } +static void +icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + icl_tc_port_assert_ref_held(dev_priv, power_well); + + hsw_power_well_disable(dev_priv, power_well); +} + /* * We should only use the power well if we explicitly asked the hardware to * enable it, so check if it's enabled and also check if we've requested it to @@ -1064,7 +1218,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); /* make sure we're done processing display irqs */ - synchronize_irq(dev_priv->drm.irq); + intel_synchronize_irq(dev_priv); intel_power_sequencer_reset(dev_priv); @@ -1568,12 +1722,15 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains) static void print_power_domains(struct i915_power_domains *power_domains, const char *prefix, u64 mask) { + struct drm_i915_private *i915 = + container_of(power_domains, struct drm_i915_private, + power_domains); enum intel_display_power_domain domain; DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask)); for_each_power_domain(domain, mask) DRM_DEBUG_DRIVER("%s use_count %d\n", - intel_display_power_domain_str(domain), + intel_display_power_domain_str(i915, domain), power_domains->domain_use_count[domain]); } @@ -1743,7 +1900,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv, { struct i915_power_domains *power_domains; struct i915_power_well *power_well; - const char *name = intel_display_power_domain_str(domain); + const char *name = intel_display_power_domain_str(dev_priv, domain); power_domains = &dev_priv->power_domains; @@ -2352,7 +2509,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, */ #define ICL_PW_2_POWER_DOMAINS ( \ ICL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ BIT_ULL(POWER_DOMAIN_INIT)) /* * - KVMR (HW control) @@ -2361,6 +2518,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, ICL_PW_2_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define ICL_DDI_IO_A_POWER_DOMAINS ( \ @@ -2398,6 +2556,93 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, #define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_TBT4)) +#define TGL_PW_5_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_D) | \ + BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define TGL_PW_4_POWER_DOMAINS ( \ + TGL_PW_5_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define TGL_PW_3_POWER_DOMAINS ( \ + TGL_PW_4_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO) | \ + BIT_ULL(POWER_DOMAIN_AUX_TC1) | \ + BIT_ULL(POWER_DOMAIN_AUX_TC2) | \ + BIT_ULL(POWER_DOMAIN_AUX_TC3) | \ + BIT_ULL(POWER_DOMAIN_AUX_TC4) | \ + BIT_ULL(POWER_DOMAIN_AUX_TC5) | \ + BIT_ULL(POWER_DOMAIN_AUX_TC6) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define TGL_PW_2_POWER_DOMAINS ( \ + TGL_PW_3_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + TGL_PW_2_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define TGL_DDI_IO_TC1_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO)) +#define TGL_DDI_IO_TC2_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO)) +#define TGL_DDI_IO_TC3_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO)) +#define TGL_DDI_IO_TC4_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO)) +#define TGL_DDI_IO_TC5_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO)) +#define TGL_DDI_IO_TC6_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO)) + +#define TGL_AUX_TC1_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TC1)) +#define TGL_AUX_TC2_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TC2)) +#define TGL_AUX_TC3_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TC3)) +#define TGL_AUX_TC4_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TC4)) +#define TGL_AUX_TC5_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TC5)) +#define TGL_AUX_TC6_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TC6)) +#define TGL_AUX_TBT5_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT5)) +#define TGL_AUX_TBT6_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT6)) + static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, .enable = i9xx_always_on_power_well_noop, @@ -3106,7 +3351,7 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = { .sync_hw = hsw_power_well_sync_hw, .enable = icl_tc_phy_aux_power_well_enable, - .disable = hsw_power_well_disable, + .disable = icl_tc_phy_aux_power_well_disable, .is_enabled = hsw_power_well_enabled, }; @@ -3355,6 +3600,335 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }; +static const struct i915_power_well_desc tgl_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 1", + /* Handled by the DMC firmware */ + .always_on = true, + .domains = 0, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_1, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, + { + .name = "DC off", + .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 2", + .domains = TGL_PW_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .hsw.has_fuses = true, + }, + }, + { + .name = "power well 3", + .domains = TGL_PW_3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, + { + .name = "DDI A IO", + .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_A, + } + }, + { + .name = "DDI B IO", + .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_B, + } + }, + { + .name = "DDI C IO", + .domains = ICL_DDI_IO_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_C, + } + }, + { + .name = "DDI TC1 IO", + .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, + }, + }, + { + .name = "DDI TC2 IO", + .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, + }, + }, + { + .name = "DDI TC3 IO", + .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, + }, + }, + { + .name = "DDI TC4 IO", + .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, + }, + }, + { + .name = "DDI TC5 IO", + .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, + }, + }, + { + .name = "DDI TC6 IO", + .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, + }, + }, + { + .name = "AUX A", + .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_A, + }, + }, + { + .name = "AUX B", + .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_B, + }, + }, + { + .name = "AUX C", + .domains = ICL_AUX_C_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_C, + }, + }, + { + .name = "AUX TC1", + .domains = TGL_AUX_TC1_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX TC2", + .domains = TGL_AUX_TC2_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX TC3", + .domains = TGL_AUX_TC3_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX TC4", + .domains = TGL_AUX_TC4_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX TC5", + .domains = TGL_AUX_TC5_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX TC6", + .domains = TGL_AUX_TC6_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX TBT1", + .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "AUX TBT2", + .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "AUX TBT3", + .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "AUX TBT4", + .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "AUX TBT5", + .domains = TGL_AUX_TBT5_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "AUX TBT6", + .domains = TGL_AUX_TBT6_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "power well 4", + .domains = TGL_PW_4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_4, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_C), + } + }, + { + .name = "power well 5", + .domains = TGL_PW_5_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_PW_5, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_D), + }, + }, +}; + static int sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, int disable_power_well) @@ -3482,7 +4056,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) * The enabling order will be from lower to higher indexed wells, * the disabling order is reversed. */ - if (IS_GEN(dev_priv, 11)) { + if (IS_GEN(dev_priv, 12)) { + err = set_power_wells(power_domains, tgl_power_wells); + } else if (IS_GEN(dev_priv, 11)) { err = set_power_wells(power_domains, icl_power_wells); } else if (IS_CANNONLAKE(dev_priv)) { err = set_power_wells(power_domains, cnl_power_wells); @@ -4330,7 +4906,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); * * It will return with power domains disabled (to be enabled later by * intel_power_domains_enable()) and must be paired with - * intel_power_domains_fini_hw(). + * intel_power_domains_driver_remove(). */ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) { @@ -4382,7 +4958,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) } /** - * intel_power_domains_fini_hw - deinitialize hw power domain state + * intel_power_domains_driver_remove - deinitialize hw power domain state * @i915: i915 device instance * * De-initializes the display power domain HW state. It also ensures that the @@ -4392,7 +4968,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) * intel_power_domains_disable()) and must be paired with * intel_power_domains_init_hw(). */ -void intel_power_domains_fini_hw(struct drm_i915_private *i915) +void intel_power_domains_driver_remove(struct drm_i915_private *i915) { intel_wakeref_t wakeref __maybe_unused = fetch_and_zero(&i915->power_domains.wakeref); @@ -4546,7 +5122,8 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915) for_each_power_domain(domain, power_well->desc->domains) DRM_DEBUG_DRIVER(" %-23s %d\n", - intel_display_power_domain_str(domain), + intel_display_power_domain_str(i915, + domain), power_domains->domain_use_count[domain]); } } diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index ff57b0a7fe59..e4d2c1ba24b0 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -18,28 +18,47 @@ enum intel_display_power_domain { POWER_DOMAIN_PIPE_A, POWER_DOMAIN_PIPE_B, POWER_DOMAIN_PIPE_C, + POWER_DOMAIN_PIPE_D, POWER_DOMAIN_PIPE_A_PANEL_FITTER, POWER_DOMAIN_PIPE_B_PANEL_FITTER, POWER_DOMAIN_PIPE_C_PANEL_FITTER, + POWER_DOMAIN_PIPE_D_PANEL_FITTER, POWER_DOMAIN_TRANSCODER_A, POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, + POWER_DOMAIN_TRANSCODER_D, POWER_DOMAIN_TRANSCODER_EDP, - POWER_DOMAIN_TRANSCODER_EDP_VDSC, + /* VDSC/joining for TRANSCODER_EDP (ICL) or TRANSCODER_A (TGL) */ + POWER_DOMAIN_TRANSCODER_VDSC_PW2, POWER_DOMAIN_TRANSCODER_DSI_A, POWER_DOMAIN_TRANSCODER_DSI_C, POWER_DOMAIN_PORT_DDI_A_LANES, POWER_DOMAIN_PORT_DDI_B_LANES, POWER_DOMAIN_PORT_DDI_C_LANES, POWER_DOMAIN_PORT_DDI_D_LANES, + POWER_DOMAIN_PORT_DDI_TC1_LANES = POWER_DOMAIN_PORT_DDI_D_LANES, POWER_DOMAIN_PORT_DDI_E_LANES, + POWER_DOMAIN_PORT_DDI_TC2_LANES = POWER_DOMAIN_PORT_DDI_E_LANES, POWER_DOMAIN_PORT_DDI_F_LANES, + POWER_DOMAIN_PORT_DDI_TC3_LANES = POWER_DOMAIN_PORT_DDI_F_LANES, + POWER_DOMAIN_PORT_DDI_TC4_LANES, + POWER_DOMAIN_PORT_DDI_TC5_LANES, + POWER_DOMAIN_PORT_DDI_TC6_LANES, POWER_DOMAIN_PORT_DDI_A_IO, POWER_DOMAIN_PORT_DDI_B_IO, POWER_DOMAIN_PORT_DDI_C_IO, POWER_DOMAIN_PORT_DDI_D_IO, + POWER_DOMAIN_PORT_DDI_TC1_IO = POWER_DOMAIN_PORT_DDI_D_IO, POWER_DOMAIN_PORT_DDI_E_IO, + POWER_DOMAIN_PORT_DDI_TC2_IO = POWER_DOMAIN_PORT_DDI_E_IO, POWER_DOMAIN_PORT_DDI_F_IO, + POWER_DOMAIN_PORT_DDI_TC3_IO = POWER_DOMAIN_PORT_DDI_F_IO, + POWER_DOMAIN_PORT_DDI_G_IO, + POWER_DOMAIN_PORT_DDI_TC4_IO = POWER_DOMAIN_PORT_DDI_G_IO, + POWER_DOMAIN_PORT_DDI_H_IO, + POWER_DOMAIN_PORT_DDI_TC5_IO = POWER_DOMAIN_PORT_DDI_H_IO, + POWER_DOMAIN_PORT_DDI_I_IO, + POWER_DOMAIN_PORT_DDI_TC6_IO = POWER_DOMAIN_PORT_DDI_I_IO, POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_PORT_OTHER, @@ -49,16 +68,25 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_C, POWER_DOMAIN_AUX_D, + POWER_DOMAIN_AUX_TC1 = POWER_DOMAIN_AUX_D, POWER_DOMAIN_AUX_E, + POWER_DOMAIN_AUX_TC2 = POWER_DOMAIN_AUX_E, POWER_DOMAIN_AUX_F, + POWER_DOMAIN_AUX_TC3 = POWER_DOMAIN_AUX_F, + POWER_DOMAIN_AUX_TC4, + POWER_DOMAIN_AUX_TC5, + POWER_DOMAIN_AUX_TC6, POWER_DOMAIN_AUX_IO_A, POWER_DOMAIN_AUX_TBT1, POWER_DOMAIN_AUX_TBT2, POWER_DOMAIN_AUX_TBT3, POWER_DOMAIN_AUX_TBT4, + POWER_DOMAIN_AUX_TBT5, + POWER_DOMAIN_AUX_TBT6, POWER_DOMAIN_GMBUS, POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, + POWER_DOMAIN_DPLL_DC_OFF, POWER_DOMAIN_INIT, POWER_DOMAIN_NUM, @@ -213,7 +241,7 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv); int intel_power_domains_init(struct drm_i915_private *dev_priv); void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); -void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv); +void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv); void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume); void icl_display_core_uninit(struct drm_i915_private *dev_priv); void intel_power_domains_enable(struct drm_i915_private *dev_priv); @@ -227,7 +255,8 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume); void bxt_display_core_uninit(struct drm_i915_private *dev_priv); const char * -intel_display_power_domain_str(enum intel_display_power_domain domain); +intel_display_power_domain_str(struct drm_i915_private *i915, + enum intel_display_power_domain domain); bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 4336df46fe78..0eb5d66f87a7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -62,6 +62,7 @@ #include "intel_panel.h" #include "intel_psr.h" #include "intel_sideband.h" +#include "intel_tc.h" #include "intel_vdsc.h" #define DP_DPRX_ESI_LEN 14 @@ -211,46 +212,13 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp) return intel_dp->common_rates[intel_dp->num_common_rates - 1]; } -static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); - intel_wakeref_t wakeref; - u32 lane_info; - - if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC) - return 4; - - lane_info = 0; - with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) - lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & - DP_LANE_ASSIGNMENT_MASK(tc_port)) >> - DP_LANE_ASSIGNMENT_SHIFT(tc_port); - - switch (lane_info) { - default: - MISSING_CASE(lane_info); - case 1: - case 2: - case 4: - case 8: - return 1; - case 3: - case 12: - return 2; - case 15: - return 4; - } -} - /* Theoretical max between source and sink */ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); int source_max = intel_dig_port->max_lanes; int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); - int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp); + int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port); return min3(source_max, sink_max, fia_max); } @@ -329,9 +297,9 @@ static int icl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum port port = dig_port->base.port; + enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); - if (intel_port_is_combophy(dev_priv, port) && + if (intel_phy_is_combo(dev_priv, phy) && !IS_ELKHARTLAKE(dev_priv) && !intel_dp_is_edp(intel_dp)) return 540000; @@ -1208,7 +1176,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); - if (intel_dig_port->tc_type == TC_PORT_TBT) + if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) ret |= DP_AUX_CH_CTL_TBT_IO; return ret; @@ -1224,6 +1192,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); struct intel_uncore *uncore = &i915->uncore; + enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); + bool is_tc_port = intel_phy_is_tc(i915, phy); i915_reg_t ch_ctl, ch_data[5]; u32 aux_clock_divider; enum intel_display_power_domain aux_domain = @@ -1239,6 +1209,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, for (i = 0; i < ARRAY_SIZE(ch_data); i++) ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); + if (is_tc_port) + intel_tc_port_lock(intel_dig_port); + aux_wakeref = intel_display_power_get(i915, aux_domain); pps_wakeref = pps_lock(intel_dp); @@ -1391,6 +1364,9 @@ out: pps_unlock(intel_dp, pps_wakeref); intel_display_power_put_async(i915, aux_domain, aux_wakeref); + if (is_tc_port) + intel_tc_port_unlock(intel_dig_port); + return ret; } @@ -1878,8 +1854,10 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, int mode_rate, link_clock, link_avail; for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { + int output_bpp = intel_dp_output_bpp(pipe_config, bpp); + mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, - bpp); + output_bpp); for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { for (lane_count = limits->min_lane_count; @@ -4243,8 +4221,14 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) if (!intel_dp_read_dpcd(intel_dp)) return false; - /* Don't clobber cached eDP rates. */ + /* + * Don't clobber cached eDP rates. Also skip re-reading + * the OUI/ID since we know it won't change. + */ if (!intel_dp_is_edp(intel_dp)) { + drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, + drm_dp_is_branch(intel_dp->dpcd)); + intel_dp_set_sink_rates(intel_dp); intel_dp_set_common_rates(intel_dp); } @@ -4253,7 +4237,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) * Some eDP panels do not set a valid value for sink count, that is why * it don't care about read it here and in intel_edp_init_dpcd(). */ - if (!intel_dp_is_edp(intel_dp)) { + if (!intel_dp_is_edp(intel_dp) && + !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) { u8 count; ssize_t r; @@ -4878,14 +4863,16 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, * retrain the link to get a picture. That's in case no * userspace component reacted to intermittent HPD dip. */ -static bool intel_dp_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector) +static enum intel_hotplug_state +intel_dp_hotplug(struct intel_encoder *encoder, + struct intel_connector *connector, + bool irq_received) { struct drm_modeset_acquire_ctx ctx; - bool changed; + enum intel_hotplug_state state; int ret; - changed = intel_encoder_hotplug(encoder, connector); + state = intel_encoder_hotplug(encoder, connector, irq_received); drm_modeset_acquire_init(&ctx, 0); @@ -4904,7 +4891,14 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder, drm_modeset_acquire_fini(&ctx); WARN(ret, "Acquiring modeset locks failed with %i\n", ret); - return changed; + /* + * Keeping it consistent with intel_ddi_hotplug() and + * intel_hdmi_hotplug(). + */ + if (state == INTEL_HOTPLUG_UNCHANGED && irq_received) + state = INTEL_HOTPLUG_RETRY; + + return state; } static void intel_dp_check_service_irq(struct intel_dp *intel_dp) @@ -5232,204 +5226,16 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv, return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port); } -static const char *tc_type_name(enum tc_port_type type) -{ - static const char * const names[] = { - [TC_PORT_UNKNOWN] = "unknown", - [TC_PORT_LEGACY] = "legacy", - [TC_PORT_TYPEC] = "typec", - [TC_PORT_TBT] = "tbt", - }; - - if (WARN_ON(type >= ARRAY_SIZE(names))) - type = TC_PORT_UNKNOWN; - - return names[type]; -} - -static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, - struct intel_digital_port *intel_dig_port, - bool is_legacy, bool is_typec, bool is_tbt) -{ - enum port port = intel_dig_port->base.port; - enum tc_port_type old_type = intel_dig_port->tc_type; - - WARN_ON(is_legacy + is_typec + is_tbt != 1); - - if (is_legacy) - intel_dig_port->tc_type = TC_PORT_LEGACY; - else if (is_typec) - intel_dig_port->tc_type = TC_PORT_TYPEC; - else if (is_tbt) - intel_dig_port->tc_type = TC_PORT_TBT; - else - return; - - /* Types are not supposed to be changed at runtime. */ - WARN_ON(old_type != TC_PORT_UNKNOWN && - old_type != intel_dig_port->tc_type); - - if (old_type != intel_dig_port->tc_type) - DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port), - tc_type_name(intel_dig_port->tc_type)); -} - -/* - * This function implements the first part of the Connect Flow described by our - * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading - * lanes, EDID, etc) is done as needed in the typical places. - * - * Unlike the other ports, type-C ports are not available to use as soon as we - * get a hotplug. The type-C PHYs can be shared between multiple controllers: - * display, USB, etc. As a result, handshaking through FIA is required around - * connect and disconnect to cleanly transfer ownership with the controller and - * set the type-C power state. - * - * We could opt to only do the connect flow when we actually try to use the AUX - * channels or do a modeset, then immediately run the disconnect flow after - * usage, but there are some implications on this for a dynamic environment: - * things may go away or change behind our backs. So for now our driver is - * always trying to acquire ownership of the controller as soon as it gets an - * interrupt (or polls state and sees a port is connected) and only gives it - * back when it sees a disconnect. Implementation of a more fine-grained model - * will require a lot of coordination with user space and thorough testing for - * the extra possible cases. - */ -static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv, - struct intel_digital_port *dig_port) -{ - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); - u32 val; - - if (dig_port->tc_type != TC_PORT_LEGACY && - dig_port->tc_type != TC_PORT_TYPEC) - return true; - - val = I915_READ(PORT_TX_DFLEXDPPMS); - if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) { - DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port); - WARN_ON(dig_port->tc_legacy_port); - return false; - } - - /* - * This function may be called many times in a row without an HPD event - * in between, so try to avoid the write when we can. - */ - val = I915_READ(PORT_TX_DFLEXDPCSSS); - if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) { - val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); - I915_WRITE(PORT_TX_DFLEXDPCSSS, val); - } - - /* - * Now we have to re-check the live state, in case the port recently - * became disconnected. Not necessary for legacy mode. - */ - if (dig_port->tc_type == TC_PORT_TYPEC && - !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) { - DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port); - icl_tc_phy_disconnect(dev_priv, dig_port); - return false; - } - - return true; -} - -/* - * See the comment at the connect function. This implements the Disconnect - * Flow. - */ -void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, - struct intel_digital_port *dig_port) -{ - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); - - if (dig_port->tc_type == TC_PORT_UNKNOWN) - return; - - /* - * TBT disconnection flow is read the live status, what was done in - * caller. - */ - if (dig_port->tc_type == TC_PORT_TYPEC || - dig_port->tc_type == TC_PORT_LEGACY) { - u32 val; - - val = I915_READ(PORT_TX_DFLEXDPCSSS); - val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); - I915_WRITE(PORT_TX_DFLEXDPCSSS, val); - } - - DRM_DEBUG_KMS("Port %c TC type %s disconnected\n", - port_name(dig_port->base.port), - tc_type_name(dig_port->tc_type)); - - dig_port->tc_type = TC_PORT_UNKNOWN; -} - -/* - * The type-C ports are different because even when they are connected, they may - * not be available/usable by the graphics driver: see the comment on - * icl_tc_phy_connect(). So in our driver instead of adding the additional - * concept of "usable" and make everything check for "connected and usable" we - * define a port as "connected" when it is not only connected, but also when it - * is usable by the rest of the driver. That maintains the old assumption that - * connected ports are usable, and avoids exposing to the users objects they - * can't really use. - */ -static bool icl_tc_port_connected(struct drm_i915_private *dev_priv, - struct intel_digital_port *intel_dig_port) -{ - enum port port = intel_dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - bool is_legacy, is_typec, is_tbt; - u32 dpsp; - - /* - * Complain if we got a legacy port HPD, but VBT didn't mark the port as - * legacy. Treat the port as legacy from now on. - */ - if (!intel_dig_port->tc_legacy_port && - I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) { - DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n", - port_name(port)); - intel_dig_port->tc_legacy_port = true; - } - is_legacy = intel_dig_port->tc_legacy_port; - - /* - * The spec says we shouldn't be using the ISR bits for detecting - * between TC and TBT. We should use DFLEXDPSP. - */ - dpsp = I915_READ(PORT_TX_DFLEXDPSP); - is_typec = dpsp & TC_LIVE_STATE_TC(tc_port); - is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port); - - if (!is_legacy && !is_typec && !is_tbt) { - icl_tc_phy_disconnect(dev_priv, intel_dig_port); - - return false; - } - - icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec, - is_tbt); - - if (!icl_tc_phy_connect(dev_priv, intel_dig_port)) - return false; - - return true; -} - static bool icl_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - if (intel_port_is_combophy(dev_priv, encoder->port)) + if (intel_phy_is_combo(dev_priv, phy)) return icl_combo_port_connected(dev_priv, dig_port); - else if (intel_port_is_tc(dev_priv, encoder->port)) - return icl_tc_port_connected(dev_priv, dig_port); + else if (intel_phy_is_tc(dev_priv, phy)) + return intel_tc_port_connected(dig_port); else MISSING_CASE(encoder->hpd_pin); @@ -5587,9 +5393,6 @@ intel_dp_detect(struct drm_connector *connector, if (INTEL_GEN(dev_priv) >= 11) intel_dp_get_dsc_sink_cap(intel_dp); - drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, - drm_dp_is_branch(intel_dp->dpcd)); - intel_dp_configure_mst(intel_dp); if (intel_dp->is_mst) { @@ -6834,8 +6637,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, const struct intel_crtc_state *crtc_state, int refresh_rate) { - struct intel_encoder *encoder; - struct intel_digital_port *dig_port = NULL; struct intel_dp *intel_dp = dev_priv->drrs.dp; struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); enum drrs_refresh_rate_type index = DRRS_HIGH_RR; @@ -6850,9 +6651,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, return; } - dig_port = dp_to_dig_port(intel_dp); - encoder = &dig_port->base; - if (!intel_crtc) { DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n"); return; @@ -7332,6 +7130,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, port); int type; /* Initialize the work for modeset in case of link train failure */ @@ -7358,7 +7157,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, * Currently we don't support eDP on TypeC ports, although in * theory it could work on TypeC legacy ports. */ - WARN_ON(intel_port_is_tc(dev_priv, port)); + WARN_ON(intel_phy_is_tc(dev_priv, phy)); type = DRM_MODE_CONNECTOR_eDP; } else { type = DRM_MODE_CONNECTOR_DisplayPort; diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index da70b1a41c83..657bbb1f5ed0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -112,8 +112,6 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); int intel_dp_max_data_rate(int max_link_clock, int max_lanes); bool intel_digital_port_connected(struct intel_encoder *encoder); -void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, - struct intel_digital_port *dig_port); static inline unsigned int intel_dp_unused_lane_mask(int lane_count) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 7ded95a334db..6b0b73479fb8 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -264,8 +264,11 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector) int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) { struct intel_panel *panel = &intel_connector->panel; + struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev); - if (!i915_modparams.enable_dpcd_backlight) + if (i915_modparams.enable_dpcd_backlight == 0 || + (i915_modparams.enable_dpcd_backlight == -1 && + dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)) return -ENODEV; if (!intel_dp_aux_display_control_capable(intel_connector)) diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h index 1470c6e0514b..6754c211205a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.h +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h @@ -6,9 +6,15 @@ #ifndef __INTEL_DP_MST_H__ #define __INTEL_DP_MST_H__ -struct intel_digital_port; +#include "intel_drv.h" int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); +static inline int +intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port) +{ + return intel_dig_port->dp.active_mst_links; +} + #endif /* __INTEL_DP_MST_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 2d4e7b9a7b9d..f9bdf8514a53 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -36,9 +36,10 @@ * This file provides an abstraction over display PLLs. The function * intel_shared_dpll_init() initializes the PLLs for the given platform. The * users of a PLL are tracked and that tracking is integrated with the atomic - * modest interface. During an atomic operation, a PLL can be requested for a - * given CRTC and encoder configuration by calling intel_get_shared_dpll() and - * a previously used PLL can be released with intel_release_shared_dpll(). + * modset interface. During an atomic operation, required PLLs can be reserved + * for a given CRTC and encoder configuration by calling + * intel_reserve_shared_dplls() and previously reserved PLLs can be released + * with intel_release_shared_dplls(). * Changes to the users are first staged in the atomic state, and then made * effective by calling intel_shared_dpll_swap_state() during the atomic * commit phase. @@ -243,17 +244,18 @@ out: } static struct intel_shared_dpll * -intel_find_shared_dpll(struct intel_crtc_state *crtc_state, +intel_find_shared_dpll(struct intel_atomic_state *state, + const struct intel_crtc *crtc, + const struct intel_dpll_hw_state *pll_state, enum intel_dpll_id range_min, enum intel_dpll_id range_max) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll, *unused_pll = NULL; struct intel_shared_dpll_state *shared_dpll; enum intel_dpll_id i; - shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); + shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); for (i = range_min; i <= range_max; i++) { pll = &dev_priv->shared_dplls[i]; @@ -265,9 +267,9 @@ intel_find_shared_dpll(struct intel_crtc_state *crtc_state, continue; } - if (memcmp(&crtc_state->dpll_hw_state, + if (memcmp(pll_state, &shared_dpll[i].hw_state, - sizeof(crtc_state->dpll_hw_state)) == 0) { + sizeof(*pll_state)) == 0) { DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n", crtc->base.base.id, crtc->base.name, pll->info->name, @@ -289,26 +291,51 @@ intel_find_shared_dpll(struct intel_crtc_state *crtc_state, } static void -intel_reference_shared_dpll(struct intel_shared_dpll *pll, - struct intel_crtc_state *crtc_state) +intel_reference_shared_dpll(struct intel_atomic_state *state, + const struct intel_crtc *crtc, + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { struct intel_shared_dpll_state *shared_dpll; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); const enum intel_dpll_id id = pll->info->id; - shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); + shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); if (shared_dpll[id].crtc_mask == 0) - shared_dpll[id].hw_state = - crtc_state->dpll_hw_state; + shared_dpll[id].hw_state = *pll_state; - crtc_state->shared_dpll = pll; DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name, pipe_name(crtc->pipe)); shared_dpll[id].crtc_mask |= 1 << crtc->pipe; } +static void intel_unreference_shared_dpll(struct intel_atomic_state *state, + const struct intel_crtc *crtc, + const struct intel_shared_dpll *pll) +{ + struct intel_shared_dpll_state *shared_dpll; + + shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); + shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe); +} + +static void intel_put_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + new_crtc_state->shared_dpll = NULL; + + if (!old_crtc_state->shared_dpll) + return; + + intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll); +} + /** * intel_shared_dpll_swap_state - make atomic DPLL configuration effective * @state: atomic state @@ -320,25 +347,20 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll, * i.e. it also puts the current state into @state, even though there is no * need for that at this moment. */ -void intel_shared_dpll_swap_state(struct drm_atomic_state *state) +void intel_shared_dpll_swap_state(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->dev); - struct intel_shared_dpll_state *shared_dpll; - struct intel_shared_dpll *pll; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_shared_dpll_state *shared_dpll = state->shared_dpll; enum intel_dpll_id i; - if (!to_intel_atomic_state(state)->dpll_set) + if (!state->dpll_set) return; - shared_dpll = to_intel_atomic_state(state)->shared_dpll; for (i = 0; i < dev_priv->num_shared_dpll; i++) { - struct intel_shared_dpll_state tmp; + struct intel_shared_dpll *pll = + &dev_priv->shared_dplls[i]; - pll = &dev_priv->shared_dplls[i]; - - tmp = pll->state; - pll->state = shared_dpll[i]; - shared_dpll[i] = tmp; + swap(pll->state, shared_dpll[i]); } } @@ -421,11 +443,12 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, udelay(200); } -static struct intel_shared_dpll * -ibx_get_dpll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +static bool ibx_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum intel_dpll_id i; @@ -439,18 +462,22 @@ ibx_get_dpll(struct intel_crtc_state *crtc_state, crtc->base.base.id, crtc->base.name, pll->info->name); } else { - pll = intel_find_shared_dpll(crtc_state, + pll = intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, DPLL_ID_PCH_PLL_A, DPLL_ID_PCH_PLL_B); } if (!pll) - return NULL; + return false; /* reference the pll */ - intel_reference_shared_dpll(pll, crtc_state); + intel_reference_shared_dpll(state, crtc, + pll, &crtc_state->dpll_hw_state); - return pll; + crtc_state->shared_dpll = pll; + + return true; } static void ibx_dump_hw_state(struct drm_i915_private *dev_priv, @@ -767,8 +794,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */, *r2_out = best.r2; } -static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state) +static struct intel_shared_dpll * +hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) { + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_shared_dpll *pll; u32 val; unsigned int p, n2, r2; @@ -781,7 +812,8 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state * crtc_state->dpll_hw_state.wrpll = val; - pll = intel_find_shared_dpll(crtc_state, + pll = intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, DPLL_ID_WRPLL1, DPLL_ID_WRPLL2); if (!pll) @@ -821,38 +853,44 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state) return pll; } -static struct intel_shared_dpll * -hsw_get_dpll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +static bool hsw_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_shared_dpll *pll; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - pll = hsw_ddi_hdmi_get_dpll(crtc_state); + pll = hsw_ddi_hdmi_get_dpll(state, crtc); } else if (intel_crtc_has_dp_encoder(crtc_state)) { pll = hsw_ddi_dp_get_dpll(crtc_state); } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { if (WARN_ON(crtc_state->port_clock / 2 != 135000)) - return NULL; + return false; crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC; - pll = intel_find_shared_dpll(crtc_state, + pll = intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, DPLL_ID_SPLL, DPLL_ID_SPLL); } else { - return NULL; + return false; } if (!pll) - return NULL; + return false; - intel_reference_shared_dpll(pll, crtc_state); + intel_reference_shared_dpll(state, crtc, + pll, &crtc_state->dpll_hw_state); - return pll; + crtc_state->shared_dpll = pll; + + return true; } static void hsw_dump_hw_state(struct drm_i915_private *dev_priv, @@ -1385,10 +1423,12 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) return true; } -static struct intel_shared_dpll * -skl_get_dpll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +static bool skl_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_shared_dpll *pll; bool bret; @@ -1396,32 +1436,37 @@ skl_get_dpll(struct intel_crtc_state *crtc_state, bret = skl_ddi_hdmi_pll_dividers(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); - return NULL; + return false; } } else if (intel_crtc_has_dp_encoder(crtc_state)) { bret = skl_ddi_dp_set_dpll_hw_state(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); - return NULL; + return false; } } else { - return NULL; + return false; } if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - pll = intel_find_shared_dpll(crtc_state, + pll = intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, DPLL_ID_SKL_DPLL0, DPLL_ID_SKL_DPLL0); else - pll = intel_find_shared_dpll(crtc_state, + pll = intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, DPLL_ID_SKL_DPLL1, DPLL_ID_SKL_DPLL3); if (!pll) - return NULL; + return false; - intel_reference_shared_dpll(pll, crtc_state); + intel_reference_shared_dpll(state, crtc, + pll, &crtc_state->dpll_hw_state); - return pll; + crtc_state->shared_dpll = pll; + + return true; } static void skl_dump_hw_state(struct drm_i915_private *dev_priv, @@ -1827,22 +1872,23 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state) return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); } -static struct intel_shared_dpll * -bxt_get_dpll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +static bool bxt_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum intel_dpll_id id; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state)) - return NULL; + return false; if (intel_crtc_has_dp_encoder(crtc_state) && !bxt_ddi_dp_set_dpll_hw_state(crtc_state)) - return NULL; + return false; /* 1:1 mapping between ports and PLLs */ id = (enum intel_dpll_id) encoder->port; @@ -1851,9 +1897,12 @@ bxt_get_dpll(struct intel_crtc_state *crtc_state, DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", crtc->base.base.id, crtc->base.name, pll->info->name); - intel_reference_shared_dpll(pll, crtc_state); + intel_reference_shared_dpll(state, crtc, + pll, &crtc_state->dpll_hw_state); - return pll; + crtc_state->shared_dpll = pll; + + return true; } static void bxt_dump_hw_state(struct drm_i915_private *dev_priv, @@ -1884,9 +1933,14 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { struct intel_dpll_mgr { const struct dpll_info *dpll_info; - struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder); - + bool (*get_dplls)(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); + void (*put_dplls)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*update_active_dpll)(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); void (*dump_hw_state)(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state); }; @@ -1899,7 +1953,8 @@ static const struct dpll_info pch_plls[] = { static const struct intel_dpll_mgr pch_pll_mgr = { .dpll_info = pch_plls, - .get_dpll = ibx_get_dpll, + .get_dplls = ibx_get_dpll, + .put_dplls = intel_put_dpll, .dump_hw_state = ibx_dump_hw_state, }; @@ -1915,7 +1970,8 @@ static const struct dpll_info hsw_plls[] = { static const struct intel_dpll_mgr hsw_pll_mgr = { .dpll_info = hsw_plls, - .get_dpll = hsw_get_dpll, + .get_dplls = hsw_get_dpll, + .put_dplls = intel_put_dpll, .dump_hw_state = hsw_dump_hw_state, }; @@ -1929,7 +1985,8 @@ static const struct dpll_info skl_plls[] = { static const struct intel_dpll_mgr skl_pll_mgr = { .dpll_info = skl_plls, - .get_dpll = skl_get_dpll, + .get_dplls = skl_get_dpll, + .put_dplls = intel_put_dpll, .dump_hw_state = skl_dump_hw_state, }; @@ -1942,7 +1999,8 @@ static const struct dpll_info bxt_plls[] = { static const struct intel_dpll_mgr bxt_pll_mgr = { .dpll_info = bxt_plls, - .get_dpll = bxt_get_dpll, + .get_dplls = bxt_get_dpll, + .put_dplls = intel_put_dpll, .dump_hw_state = bxt_dump_hw_state, }; @@ -2332,10 +2390,12 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) return true; } -static struct intel_shared_dpll * -cnl_get_dpll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +static bool cnl_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_shared_dpll *pll; bool bret; @@ -2343,31 +2403,35 @@ cnl_get_dpll(struct intel_crtc_state *crtc_state, bret = cnl_ddi_hdmi_pll_dividers(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); - return NULL; + return false; } } else if (intel_crtc_has_dp_encoder(crtc_state)) { bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); - return NULL; + return false; } } else { DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n", crtc_state->output_types); - return NULL; + return false; } - pll = intel_find_shared_dpll(crtc_state, + pll = intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, DPLL_ID_SKL_DPLL0, DPLL_ID_SKL_DPLL2); if (!pll) { DRM_DEBUG_KMS("No PLL selected\n"); - return NULL; + return false; } - intel_reference_shared_dpll(pll, crtc_state); + intel_reference_shared_dpll(state, crtc, + pll, &crtc_state->dpll_hw_state); - return pll; + crtc_state->shared_dpll = pll; + + return true; } static void cnl_dump_hw_state(struct drm_i915_private *dev_priv, @@ -2394,7 +2458,8 @@ static const struct dpll_info cnl_plls[] = { static const struct intel_dpll_mgr cnl_pll_mgr = { .dpll_info = cnl_plls, - .get_dpll = cnl_get_dpll, + .get_dplls = cnl_get_dpll, + .put_dplls = intel_put_dpll, .dump_hw_state = cnl_dump_hw_state, }; @@ -2506,14 +2571,16 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, } static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) + struct intel_encoder *encoder, + struct intel_dpll_hw_state *pll_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); u32 cfgcr0, cfgcr1; struct skl_wrpll_params pll_params = { 0 }; bool ret; - if (intel_port_is_tc(dev_priv, encoder->port)) + if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, + encoder->port))) ret = icl_calc_tbt_pll(crtc_state, &pll_params); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) @@ -2530,14 +2597,17 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) | DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) | DPLL_CFGCR1_KDIV(pll_params.kdiv) | - DPLL_CFGCR1_PDIV(pll_params.pdiv) | - DPLL_CFGCR1_CENTRAL_FREQ_8400; + DPLL_CFGCR1_PDIV(pll_params.pdiv); - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); + if (INTEL_GEN(dev_priv) >= 12) + cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL; + else + cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400; - crtc_state->dpll_hw_state.cfgcr0 = cfgcr0; - crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; + memset(pll_state, 0, sizeof(*pll_state)); + + pll_state->cfgcr0 = cfgcr0; + pll_state->cfgcr1 = cfgcr1; return true; } @@ -2627,10 +2697,10 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, * The specification for this function uses real numbers, so the math had to be * adapted to integer-only calculation, that's why it looks so different. */ -static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state) +static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, + struct intel_dpll_hw_state *pll_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state; int refclk_khz = dev_priv->cdclk.hw.ref; int clock = crtc_state->port_clock; u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; @@ -2792,63 +2862,184 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state) return true; } -static struct intel_shared_dpll * -icl_get_dpll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +/** + * icl_set_active_port_dpll - select the active port DPLL for a given CRTC + * @crtc_state: state for the CRTC to select the DPLL for + * @port_dpll_id: the active @port_dpll_id to select + * + * Select the given @port_dpll_id instance from the DPLLs reserved for the + * CRTC. + */ +void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state, + enum icl_port_dpll_id port_dpll_id) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_digital_port *intel_dig_port; - struct intel_shared_dpll *pll; + struct icl_port_dpll *port_dpll = + &crtc_state->icl_port_dplls[port_dpll_id]; + + crtc_state->shared_dpll = port_dpll->pll; + crtc_state->dpll_hw_state = port_dpll->hw_state; +} + +static void icl_update_active_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_digital_port *primary_port; + enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT; + + primary_port = encoder->type == INTEL_OUTPUT_DP_MST ? + enc_to_mst(&encoder->base)->primary : + enc_to_dig_port(&encoder->base); + + if (primary_port && + (primary_port->tc_mode == TC_PORT_DP_ALT || + primary_port->tc_mode == TC_PORT_LEGACY)) + port_dpll_id = ICL_PORT_DPLL_MG_PHY; + + icl_set_active_port_dpll(crtc_state, port_dpll_id); +} + +static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct icl_port_dpll *port_dpll = + &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum port port = encoder->port; - enum intel_dpll_id min, max; - bool ret; + bool has_dpll4 = false; - if (intel_port_is_combophy(dev_priv, port)) { - min = DPLL_ID_ICL_DPLL0; - max = DPLL_ID_ICL_DPLL1; - ret = icl_calc_dpll_state(crtc_state, encoder); - } else if (intel_port_is_tc(dev_priv, port)) { - if (encoder->type == INTEL_OUTPUT_DP_MST) { - struct intel_dp_mst_encoder *mst_encoder; + if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) { + DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n"); - mst_encoder = enc_to_mst(&encoder->base); - intel_dig_port = mst_encoder->primary; - } else { - intel_dig_port = enc_to_dig_port(&encoder->base); - } + return false; + } - if (intel_dig_port->tc_type == TC_PORT_TBT) { - min = DPLL_ID_ICL_TBTPLL; - max = min; - ret = icl_calc_dpll_state(crtc_state, encoder); - } else { - enum tc_port tc_port; + if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A) + has_dpll4 = true; + + port_dpll->pll = intel_find_shared_dpll(state, crtc, + &port_dpll->hw_state, + DPLL_ID_ICL_DPLL0, + has_dpll4 ? DPLL_ID_EHL_DPLL4 + : DPLL_ID_ICL_DPLL1); + if (!port_dpll->pll) { + DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n", + port_name(encoder->port)); + return false; + } - tc_port = intel_port_to_tc(dev_priv, port); - min = icl_tc_port_to_pll_id(tc_port); - max = min; - ret = icl_calc_mg_pll_state(crtc_state); - } - } else { - MISSING_CASE(port); - return NULL; + intel_reference_shared_dpll(state, crtc, + port_dpll->pll, &port_dpll->hw_state); + + icl_update_active_dpll(state, crtc, encoder); + + return true; +} + +static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct icl_port_dpll *port_dpll; + enum intel_dpll_id dpll_id; + + port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; + if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) { + DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n"); + return false; } - if (!ret) { - DRM_DEBUG_KMS("Could not calculate PLL state.\n"); - return NULL; + port_dpll->pll = intel_find_shared_dpll(state, crtc, + &port_dpll->hw_state, + DPLL_ID_ICL_TBTPLL, + DPLL_ID_ICL_TBTPLL); + if (!port_dpll->pll) { + DRM_DEBUG_KMS("No TBT-ALT PLL found\n"); + return false; } + intel_reference_shared_dpll(state, crtc, + port_dpll->pll, &port_dpll->hw_state); - pll = intel_find_shared_dpll(crtc_state, min, max); - if (!pll) { - DRM_DEBUG_KMS("No PLL selected\n"); - return NULL; + port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY]; + if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) { + DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n"); + goto err_unreference_tbt_pll; } - intel_reference_shared_dpll(pll, crtc_state); + dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, + encoder->port)); + port_dpll->pll = intel_find_shared_dpll(state, crtc, + &port_dpll->hw_state, + dpll_id, + dpll_id); + if (!port_dpll->pll) { + DRM_DEBUG_KMS("No MG PHY PLL found\n"); + goto err_unreference_tbt_pll; + } + intel_reference_shared_dpll(state, crtc, + port_dpll->pll, &port_dpll->hw_state); - return pll; + icl_update_active_dpll(state, crtc, encoder); + + return true; + +err_unreference_tbt_pll: + port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; + intel_unreference_shared_dpll(state, crtc, port_dpll->pll); + + return false; +} + +static bool icl_get_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + + if (intel_phy_is_combo(dev_priv, phy)) + return icl_get_combo_phy_dpll(state, crtc, encoder); + else if (intel_phy_is_tc(dev_priv, phy)) + return icl_get_tc_phy_dplls(state, crtc, encoder); + + MISSING_CASE(phy); + + return false; +} + +static void icl_put_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + enum icl_port_dpll_id id; + + new_crtc_state->shared_dpll = NULL; + + for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) { + const struct icl_port_dpll *old_port_dpll = + &old_crtc_state->icl_port_dplls[id]; + struct icl_port_dpll *new_port_dpll = + &new_crtc_state->icl_port_dplls[id]; + + new_port_dpll->pll = NULL; + + if (!old_port_dpll->pll) + continue; + + intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll); + } } static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, @@ -2932,8 +3123,18 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, if (!(val & PLL_ENABLE)) goto out; - hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); - hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); + if (INTEL_GEN(dev_priv) >= 12) { + hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id)); + hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id)); + } else { + if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) { + hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4)); + hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4)); + } else { + hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); + hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); + } + } ret = true; out: @@ -2945,8 +3146,14 @@ static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { - return icl_pll_get_hw_state(dev_priv, pll, hw_state, - CNL_DPLL_ENABLE(pll->info->id)); + i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); + + if (IS_ELKHARTLAKE(dev_priv) && + pll->info->id == DPLL_ID_EHL_DPLL4) { + enable_reg = MG_PLL_ENABLE(0); + } + + return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg); } static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv, @@ -2961,10 +3168,24 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, { struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; const enum intel_dpll_id id = pll->info->id; + i915_reg_t cfgcr0_reg, cfgcr1_reg; - I915_WRITE(ICL_DPLL_CFGCR0(id), hw_state->cfgcr0); - I915_WRITE(ICL_DPLL_CFGCR1(id), hw_state->cfgcr1); - POSTING_READ(ICL_DPLL_CFGCR1(id)); + if (INTEL_GEN(dev_priv) >= 12) { + cfgcr0_reg = TGL_DPLL_CFGCR0(id); + cfgcr1_reg = TGL_DPLL_CFGCR1(id); + } else { + if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) { + cfgcr0_reg = ICL_DPLL_CFGCR0(4); + cfgcr1_reg = ICL_DPLL_CFGCR1(4); + } else { + cfgcr0_reg = ICL_DPLL_CFGCR0(id); + cfgcr1_reg = ICL_DPLL_CFGCR1(id); + } + } + + I915_WRITE(cfgcr0_reg, hw_state->cfgcr0); + I915_WRITE(cfgcr1_reg, hw_state->cfgcr1); + POSTING_READ(cfgcr1_reg); } static void icl_mg_pll_write(struct drm_i915_private *dev_priv, @@ -3057,6 +3278,19 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv, { i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); + if (IS_ELKHARTLAKE(dev_priv) && + pll->info->id == DPLL_ID_EHL_DPLL4) { + enable_reg = MG_PLL_ENABLE(0); + + /* + * We need to disable DC states when this DPLL is enabled. + * This can be done by taking a reference on DPLL4 power + * domain. + */ + pll->wakeref = intel_display_power_get(dev_priv, + POWER_DOMAIN_DPLL_DC_OFF); + } + icl_pll_power_enable(dev_priv, pll, enable_reg); icl_dpll_write(dev_priv, pll); @@ -3152,7 +3386,19 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, static void combo_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id)); + i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); + + if (IS_ELKHARTLAKE(dev_priv) && + pll->info->id == DPLL_ID_EHL_DPLL4) { + enable_reg = MG_PLL_ENABLE(0); + icl_pll_disable(dev_priv, pll, enable_reg); + + intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF, + pll->wakeref); + return; + } + + icl_pll_disable(dev_priv, pll, enable_reg); } static void tbt_pll_disable(struct drm_i915_private *dev_priv, @@ -3223,19 +3469,38 @@ static const struct dpll_info icl_plls[] = { static const struct intel_dpll_mgr icl_pll_mgr = { .dpll_info = icl_plls, - .get_dpll = icl_get_dpll, + .get_dplls = icl_get_dplls, + .put_dplls = icl_put_dplls, + .update_active_dpll = icl_update_active_dpll, .dump_hw_state = icl_dump_hw_state, }; static const struct dpll_info ehl_plls[] = { { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, + { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 }, { }, }; static const struct intel_dpll_mgr ehl_pll_mgr = { .dpll_info = ehl_plls, - .get_dpll = icl_get_dpll, + .get_dplls = icl_get_dplls, + .put_dplls = icl_put_dplls, + .dump_hw_state = icl_dump_hw_state, +}; + +static const struct dpll_info tgl_plls[] = { + { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, + { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, + { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, + /* TODO: Add typeC plls */ + { }, +}; + +static const struct intel_dpll_mgr tgl_pll_mgr = { + .dpll_info = tgl_plls, + .get_dplls = icl_get_dplls, + .put_dplls = icl_put_dplls, .dump_hw_state = icl_dump_hw_state, }; @@ -3252,7 +3517,9 @@ void intel_shared_dpll_init(struct drm_device *dev) const struct dpll_info *dpll_info; int i; - if (IS_ELKHARTLAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 12) + dpll_mgr = &tgl_pll_mgr; + else if (IS_ELKHARTLAKE(dev_priv)) dpll_mgr = &ehl_pll_mgr; else if (INTEL_GEN(dev_priv) >= 11) dpll_mgr = &icl_pll_mgr; @@ -3287,50 +3554,87 @@ void intel_shared_dpll_init(struct drm_device *dev) } /** - * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination - * @crtc_state: atomic state for the crtc + * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination + * @state: atomic state + * @crtc: CRTC to reserve DPLLs for * @encoder: encoder * - * Find an appropriate DPLL for the given CRTC and encoder combination. A - * reference from the @crtc_state to the returned pll is registered in the - * atomic state. That configuration is made effective by calling - * intel_shared_dpll_swap_state(). The reference should be released by calling - * intel_release_shared_dpll(). + * This function reserves all required DPLLs for the given CRTC and encoder + * combination in the current atomic commit @state and the new @crtc atomic + * state. + * + * The new configuration in the atomic commit @state is made effective by + * calling intel_shared_dpll_swap_state(). + * + * The reserved DPLLs should be released by calling + * intel_release_shared_dplls(). * * Returns: - * A shared DPLL to be used by @crtc_state and @encoder. + * True if all required DPLLs were successfully reserved. */ -struct intel_shared_dpll * -intel_get_shared_dpll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +bool intel_reserve_shared_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; if (WARN_ON(!dpll_mgr)) - return NULL; + return false; - return dpll_mgr->get_dpll(crtc_state, encoder); + return dpll_mgr->get_dplls(state, crtc, encoder); } /** - * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state - * @dpll: dpll in use by @crtc - * @crtc: crtc + * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state * @state: atomic state + * @crtc: crtc from which the DPLLs are to be released * - * This function releases the reference from @crtc to @dpll from the - * atomic @state. The new configuration is made effective by calling - * intel_shared_dpll_swap_state(). + * This function releases all DPLLs reserved by intel_reserve_shared_dplls() + * from the current atomic commit @state and the old @crtc atomic state. + * + * The new configuration in the atomic commit @state is made effective by + * calling intel_shared_dpll_swap_state(). */ -void intel_release_shared_dpll(struct intel_shared_dpll *dpll, - struct intel_crtc *crtc, - struct drm_atomic_state *state) +void intel_release_shared_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_shared_dpll_state *shared_dpll_state; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; + + /* + * FIXME: this function is called for every platform having a + * compute_clock hook, even though the platform doesn't yet support + * the shared DPLL framework and intel_reserve_shared_dplls() is not + * called on those. + */ + if (!dpll_mgr) + return; + + dpll_mgr->put_dplls(state, crtc); +} + +/** + * intel_update_active_dpll - update the active DPLL for a CRTC/encoder + * @state: atomic state + * @crtc: the CRTC for which to update the active DPLL + * @encoder: encoder determining the type of port DPLL + * + * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state, + * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The + * DPLL selected will be based on the current mode of the encoder's port. + */ +void intel_update_active_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; + + if (WARN_ON(!dpll_mgr)) + return; - shared_dpll_state = intel_atomic_get_shared_dpll_state(state); - shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe); + dpll_mgr->update_active_dpll(state, crtc, encoder); } /** diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index d0570414f3d1..e7588799fce5 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -28,6 +28,7 @@ #include <linux/types.h> #include "intel_display.h" +#include "intel_wakeref.h" /*FIXME: Move this to a more appropriate place. */ #define abs_diff(a, b) ({ \ @@ -36,9 +37,9 @@ (void) (&__a == &__b); \ __a > __b ? (__a - __b) : (__b - __a); }) -struct drm_atomic_state; struct drm_device; struct drm_i915_private; +struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; struct intel_encoder; @@ -110,35 +111,59 @@ enum intel_dpll_id { /** - * @DPLL_ID_ICL_DPLL0: ICL combo PHY DPLL0 + * @DPLL_ID_ICL_DPLL0: ICL/TGL combo PHY DPLL0 */ DPLL_ID_ICL_DPLL0 = 0, /** - * @DPLL_ID_ICL_DPLL1: ICL combo PHY DPLL1 + * @DPLL_ID_ICL_DPLL1: ICL/TGL combo PHY DPLL1 */ DPLL_ID_ICL_DPLL1 = 1, /** - * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL + * @DPLL_ID_EHL_DPLL4: EHL combo PHY DPLL4 + */ + DPLL_ID_EHL_DPLL4 = 2, + /** + * @DPLL_ID_ICL_TBTPLL: ICL/TGL TBT PLL */ DPLL_ID_ICL_TBTPLL = 2, /** - * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C) + * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C), + * TGL TC PLL 1 port 1 (TC1) */ DPLL_ID_ICL_MGPLL1 = 3, /** * @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D) + * TGL TC PLL 1 port 2 (TC2) */ DPLL_ID_ICL_MGPLL2 = 4, /** * @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E) + * TGL TC PLL 1 port 3 (TC3) */ DPLL_ID_ICL_MGPLL3 = 5, /** * @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F) + * TGL TC PLL 1 port 4 (TC4) */ DPLL_ID_ICL_MGPLL4 = 6, + /** + * @DPLL_ID_TGL_TCPLL5: TGL TC PLL port 5 (TC5) + */ + DPLL_ID_TGL_MGPLL5 = 7, + /** + * @DPLL_ID_TGL_TCPLL6: TGL TC PLL port 6 (TC6) + */ + DPLL_ID_TGL_MGPLL6 = 8, +}; + +#define I915_NUM_PLLS 9 + +enum icl_port_dpll_id { + ICL_PORT_DPLL_DEFAULT, + ICL_PORT_DPLL_MG_PHY, + + ICL_PORT_DPLL_COUNT, }; -#define I915_NUM_PLLS 7 struct intel_dpll_hw_state { /* i9xx, pch plls */ @@ -195,7 +220,7 @@ struct intel_dpll_hw_state { * future state which would be applied by an atomic mode set (stored in * a struct &intel_atomic_state). * - * See also intel_get_shared_dpll() and intel_release_shared_dpll(). + * See also intel_reserve_shared_dplls() and intel_release_shared_dplls(). */ struct intel_shared_dpll_state { /** @@ -312,6 +337,7 @@ struct intel_shared_dpll { * @info: platform specific info */ const struct dpll_info *info; + intel_wakeref_t wakeref; }; #define SKL_DPLL0 0 @@ -331,15 +357,20 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, bool state); #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) -struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state, - struct intel_encoder *encoder); -void intel_release_shared_dpll(struct intel_shared_dpll *dpll, - struct intel_crtc *crtc, - struct drm_atomic_state *state); +bool intel_reserve_shared_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); +void intel_release_shared_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state, + enum icl_port_dpll_id port_dpll_id); +void intel_update_active_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); -void intel_shared_dpll_swap_state(struct drm_atomic_state *state); +void intel_shared_dpll_swap_state(struct intel_atomic_state *state); void intel_shared_dpll_init(struct drm_device *dev); void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index 6d20434636cd..1cd24bd46518 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -49,8 +49,11 @@ struct intel_dsi { struct intel_connector *attached_connector; - /* bit mask of ports being driven */ - u16 ports; + /* bit mask of ports (vlv dsi) or phys (icl dsi) being driven */ + union { + u16 ports; /* VLV DSI */ + u16 phys; /* ICL DSI */ + }; /* if true, use HS mode, otherwise LP */ bool hs; @@ -132,7 +135,10 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h) return container_of(h, struct intel_dsi_host, base); } -#define for_each_dsi_port(__port, __ports_mask) for_each_port_masked(__port, __ports_mask) +#define for_each_dsi_port(__port, __ports_mask) \ + for_each_port_masked(__port, __ports_mask) +#define for_each_dsi_phy(__phy, __phys_mask) \ + for_each_phy_masked(__phy, __phys_mask) static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) { diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 4f6a9bd5af47..b42c79aea61a 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -94,11 +94,25 @@ static const struct gmbus_pin gmbus_pins_mcc[] = { [GMBUS_PIN_9_TC1_ICP] = { "dpc", GPIOJ }, }; +static const struct gmbus_pin gmbus_pins_tgp[] = { + [GMBUS_PIN_1_BXT] = { "dpa", GPIOB }, + [GMBUS_PIN_2_BXT] = { "dpb", GPIOC }, + [GMBUS_PIN_3_BXT] = { "dpc", GPIOD }, + [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ }, + [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK }, + [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL }, + [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM }, + [GMBUS_PIN_13_TC5_TGP] = { "tc5", GPION }, + [GMBUS_PIN_14_TC6_TGP] = { "tc6", GPIOO }, +}; + /* pin is expected to be valid */ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv, unsigned int pin) { - if (HAS_PCH_MCC(dev_priv)) + if (HAS_PCH_TGP(dev_priv)) + return &gmbus_pins_tgp[pin]; + else if (HAS_PCH_MCC(dev_priv)) return &gmbus_pins_mcc[pin]; else if (HAS_PCH_ICP(dev_priv)) return &gmbus_pins_icp[pin]; @@ -119,7 +133,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, { unsigned int size; - if (HAS_PCH_MCC(dev_priv)) + if (HAS_PCH_TGP(dev_priv)) + size = ARRAY_SIZE(gmbus_pins_tgp); + else if (HAS_PCH_MCC(dev_priv)) size = ARRAY_SIZE(gmbus_pins_mcc); else if (HAS_PCH_ICP(dev_priv)) size = ARRAY_SIZE(gmbus_pins_icp); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index e56969ebdd25..845eb8f29b58 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -523,12 +523,16 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) * authentication. */ num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); - if (num_downstream == 0) + if (num_downstream == 0) { + DRM_DEBUG_KMS("Repeater with zero downstream devices\n"); return -EINVAL; + } ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); - if (!ksv_fifo) + if (!ksv_fifo) { + DRM_DEBUG_KMS("Out of mem: ksv_fifo\n"); return -ENOMEM; + } ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo); if (ret) @@ -1204,8 +1208,10 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) if (ret < 0) return ret; - if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) + if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) { + DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n"); return -EINVAL; + } hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 0ebec69bbbfc..9bf28de10401 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2930,51 +2930,34 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv, static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { - u8 ddc_pin; + enum phy phy = intel_port_to_phy(dev_priv, port); - switch (port) { - case PORT_A: - ddc_pin = GMBUS_PIN_1_BXT; - break; - case PORT_B: - ddc_pin = GMBUS_PIN_2_BXT; - break; - case PORT_C: - ddc_pin = GMBUS_PIN_9_TC1_ICP; - break; - case PORT_D: - ddc_pin = GMBUS_PIN_10_TC2_ICP; - break; - case PORT_E: - ddc_pin = GMBUS_PIN_11_TC3_ICP; - break; - case PORT_F: - ddc_pin = GMBUS_PIN_12_TC4_ICP; - break; - default: - MISSING_CASE(port); - ddc_pin = GMBUS_PIN_2_BXT; - break; - } - return ddc_pin; + if (intel_phy_is_combo(dev_priv, phy)) + return GMBUS_PIN_1_BXT + port; + else if (intel_phy_is_tc(dev_priv, phy)) + return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port); + + WARN(1, "Unknown port:%c\n", port_name(port)); + return GMBUS_PIN_2_BXT; } static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { + enum phy phy = intel_port_to_phy(dev_priv, port); u8 ddc_pin; - switch (port) { - case PORT_A: + switch (phy) { + case PHY_A: ddc_pin = GMBUS_PIN_1_BXT; break; - case PORT_B: + case PHY_B: ddc_pin = GMBUS_PIN_2_BXT; break; - case PORT_C: + case PHY_C: ddc_pin = GMBUS_PIN_9_TC1_ICP; break; default: - MISSING_CASE(port); + MISSING_CASE(phy); ddc_pin = GMBUS_PIN_1_BXT; break; } @@ -3019,7 +3002,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, if (HAS_PCH_MCC(dev_priv)) ddc_pin = mcc_port_to_ddc_pin(dev_priv, port); - else if (HAS_PCH_ICP(dev_priv)) + else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_ICP(dev_priv)) ddc_pin = icl_port_to_ddc_pin(dev_priv, port); else if (HAS_PCH_CNP(dev_priv)) ddc_pin = cnp_port_to_ddc_pin(dev_priv, port); @@ -3143,6 +3126,32 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, DRM_DEBUG_KMS("CEC notifier get failed\n"); } +static enum intel_hotplug_state +intel_hdmi_hotplug(struct intel_encoder *encoder, + struct intel_connector *connector, bool irq_received) +{ + enum intel_hotplug_state state; + + state = intel_encoder_hotplug(encoder, connector, irq_received); + + /* + * On many platforms the HDMI live state signal is known to be + * unreliable, so we can't use it to detect if a sink is connected or + * not. Instead we detect if it's connected based on whether we can + * read the EDID or not. That in turn has a problem during disconnect, + * since the HPD interrupt may be raised before the DDC lines get + * disconnected (due to how the required length of DDC vs. HPD + * connector pins are specified) and so we'll still be able to get a + * valid EDID. To solve this schedule another detection cycle if this + * time around we didn't detect any change in the sink's connection + * status. + */ + if (state == INTEL_HOTPLUG_UNCHANGED && irq_received) + state = INTEL_HOTPLUG_RETRY; + + return state; +} + void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port) { @@ -3166,7 +3175,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port)); - intel_encoder->hotplug = intel_encoder_hotplug; + intel_encoder->hotplug = intel_hdmi_hotplug; intel_encoder->compute_config = intel_hdmi_compute_config; if (HAS_PCH_SPLIT(dev_priv)) { intel_encoder->disable = pch_disable_hdmi; diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index ea3de4acc850..342587d91d57 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -112,6 +112,7 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, #define HPD_STORM_DETECT_PERIOD 1000 #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) +#define HPD_RETRY_DELAY 1000 /** * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin @@ -266,8 +267,10 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } -bool intel_encoder_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector) +enum intel_hotplug_state +intel_encoder_hotplug(struct intel_encoder *encoder, + struct intel_connector *connector, + bool irq_received) { struct drm_device *dev = connector->base.dev; enum drm_connector_status old_status; @@ -279,7 +282,7 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder, drm_helper_probe_detect(&connector->base, NULL, false); if (old_status == connector->base.status) - return false; + return INTEL_HOTPLUG_UNCHANGED; DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", connector->base.base.id, @@ -287,7 +290,7 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder, drm_get_connector_status_name(old_status), drm_get_connector_status_name(connector->base.status)); - return true; + return INTEL_HOTPLUG_CHANGED; } static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) @@ -339,7 +342,7 @@ static void i915_digport_work_func(struct work_struct *work) spin_lock_irq(&dev_priv->irq_lock); dev_priv->hotplug.event_bits |= old_bits; spin_unlock_irq(&dev_priv->irq_lock); - schedule_work(&dev_priv->hotplug.hotplug_work); + queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0); } } @@ -349,14 +352,16 @@ static void i915_digport_work_func(struct work_struct *work) static void i915_hotplug_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, hotplug.hotplug_work); + container_of(work, struct drm_i915_private, + hotplug.hotplug_work.work); struct drm_device *dev = &dev_priv->drm; struct intel_connector *intel_connector; struct intel_encoder *intel_encoder; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; - bool changed = false; + u32 changed = 0, retry = 0; u32 hpd_event_bits; + u32 hpd_retry_bits; mutex_lock(&dev->mode_config.mutex); DRM_DEBUG_KMS("running encoder hotplug functions\n"); @@ -365,6 +370,8 @@ static void i915_hotplug_work_func(struct work_struct *work) hpd_event_bits = dev_priv->hotplug.event_bits; dev_priv->hotplug.event_bits = 0; + hpd_retry_bits = dev_priv->hotplug.retry_bits; + dev_priv->hotplug.retry_bits = 0; /* Enable polling for connectors which had HPD IRQ storms */ intel_hpd_irq_storm_switch_to_polling(dev_priv); @@ -373,16 +380,29 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { + u32 hpd_bit; + intel_connector = to_intel_connector(connector); if (!intel_connector->encoder) continue; intel_encoder = intel_connector->encoder; - if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { + hpd_bit = BIT(intel_encoder->hpd_pin); + if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) { DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", connector->name, intel_encoder->hpd_pin); - changed |= intel_encoder->hotplug(intel_encoder, - intel_connector); + switch (intel_encoder->hotplug(intel_encoder, + intel_connector, + hpd_event_bits & hpd_bit)) { + case INTEL_HOTPLUG_UNCHANGED: + break; + case INTEL_HOTPLUG_CHANGED: + changed |= hpd_bit; + break; + case INTEL_HOTPLUG_RETRY: + retry |= hpd_bit; + break; + } } } drm_connector_list_iter_end(&conn_iter); @@ -390,6 +410,17 @@ static void i915_hotplug_work_func(struct work_struct *work) if (changed) drm_kms_helper_hotplug_event(dev); + + /* Remove shared HPD pins that have changed */ + retry &= ~changed; + if (retry) { + spin_lock_irq(&dev_priv->irq_lock); + dev_priv->hotplug.retry_bits |= retry; + spin_unlock_irq(&dev_priv->irq_lock); + + mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, + msecs_to_jiffies(HPD_RETRY_DELAY)); + } } @@ -516,7 +547,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (queue_dig) queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work); if (queue_hp) - schedule_work(&dev_priv->hotplug.hotplug_work); + queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0); } /** @@ -636,7 +667,8 @@ void intel_hpd_poll_init(struct drm_i915_private *dev_priv) void intel_hpd_init_work(struct drm_i915_private *dev_priv) { - INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); + INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work, + i915_hotplug_work_func); INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work); INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, @@ -650,11 +682,12 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) dev_priv->hotplug.long_port_mask = 0; dev_priv->hotplug.short_port_mask = 0; dev_priv->hotplug.event_bits = 0; + dev_priv->hotplug.retry_bits = 0; spin_unlock_irq(&dev_priv->irq_lock); cancel_work_sync(&dev_priv->hotplug.dig_port_work); - cancel_work_sync(&dev_priv->hotplug.hotplug_work); + cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work); cancel_work_sync(&dev_priv->hotplug.poll_init_work); cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); } diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h index 805f897dbb7a..b0cd447b7fbc 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug.h @@ -15,8 +15,9 @@ struct intel_connector; struct intel_encoder; void intel_hpd_poll_init(struct drm_i915_private *dev_priv); -bool intel_encoder_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector); +enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder, + struct intel_connector *connector, + bool irq_received); void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 pin_mask, u32 long_mask); void intel_hpd_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 21339b7f6a3e..07929726b780 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -175,6 +175,7 @@ struct overlay_registers { struct intel_overlay { struct drm_i915_private *i915; + struct intel_context *context; struct intel_crtc *crtc; struct i915_vma *vma; struct i915_vma *old_vma; @@ -239,9 +240,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, static struct i915_request *alloc_request(struct intel_overlay *overlay) { - struct intel_engine_cs *engine = overlay->i915->engine[RCS0]; - - return i915_request_create(engine->kernel_context); + return i915_request_create(overlay->context); } /* overlay needs to be disable in OCMD reg */ @@ -1359,11 +1358,16 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) if (!HAS_OVERLAY(dev_priv)) return; + if (!HAS_ENGINE(dev_priv, RCS0)) + return; + overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); if (!overlay) return; overlay->i915 = dev_priv; + overlay->context = dev_priv->engine[RCS0]->kernel_context; + GEM_BUG_ON(!overlay->context); overlay->color_key = 0x0101fe; overlay->color_key_enabled = true; diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c index 1e2c4307d05a..9a48f7a01e7e 100644 --- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c @@ -667,5 +667,5 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc) I915_WRITE(PIPE_CRC_CTL(crtc->index), 0); POSTING_READ(PIPE_CRC_CTL(crtc->index)); - synchronize_irq(dev_priv->drm.irq); + intel_synchronize_irq(dev_priv); } diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index ceda03e5a3d4..c5e2dfd7ef80 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -274,130 +274,145 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) return false; } -#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} +#define SDVO_CMD_NAME_ENTRY(cmd_) { .cmd = SDVO_CMD_ ## cmd_, .name = #cmd_ } + /** Mapping of command numbers to names, for debug output */ -static const struct _sdvo_cmd_name { +static const struct { u8 cmd; const char *name; } __attribute__ ((packed)) sdvo_cmd_names[] = { - SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), + SDVO_CMD_NAME_ENTRY(RESET), + SDVO_CMD_NAME_ENTRY(GET_DEVICE_CAPS), + SDVO_CMD_NAME_ENTRY(GET_FIRMWARE_REV), + SDVO_CMD_NAME_ENTRY(GET_TRAINED_INPUTS), + SDVO_CMD_NAME_ENTRY(GET_ACTIVE_OUTPUTS), + SDVO_CMD_NAME_ENTRY(SET_ACTIVE_OUTPUTS), + SDVO_CMD_NAME_ENTRY(GET_IN_OUT_MAP), + SDVO_CMD_NAME_ENTRY(SET_IN_OUT_MAP), + SDVO_CMD_NAME_ENTRY(GET_ATTACHED_DISPLAYS), + SDVO_CMD_NAME_ENTRY(GET_HOT_PLUG_SUPPORT), + SDVO_CMD_NAME_ENTRY(SET_ACTIVE_HOT_PLUG), + SDVO_CMD_NAME_ENTRY(GET_ACTIVE_HOT_PLUG), + SDVO_CMD_NAME_ENTRY(GET_INTERRUPT_EVENT_SOURCE), + SDVO_CMD_NAME_ENTRY(SET_TARGET_INPUT), + SDVO_CMD_NAME_ENTRY(SET_TARGET_OUTPUT), + SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(CREATE_PREFERRED_INPUT_TIMING), + SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART1), + SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART2), + SDVO_CMD_NAME_ENTRY(GET_INPUT_PIXEL_CLOCK_RANGE), + SDVO_CMD_NAME_ENTRY(GET_OUTPUT_PIXEL_CLOCK_RANGE), + SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_CLOCK_RATE_MULTS), + SDVO_CMD_NAME_ENTRY(GET_CLOCK_RATE_MULT), + SDVO_CMD_NAME_ENTRY(SET_CLOCK_RATE_MULT), + SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_TV_FORMATS), + SDVO_CMD_NAME_ENTRY(GET_TV_FORMAT), + SDVO_CMD_NAME_ENTRY(SET_TV_FORMAT), + SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_POWER_STATES), + SDVO_CMD_NAME_ENTRY(GET_POWER_STATE), + SDVO_CMD_NAME_ENTRY(SET_ENCODER_POWER_STATE), + SDVO_CMD_NAME_ENTRY(SET_DISPLAY_POWER_STATE), + SDVO_CMD_NAME_ENTRY(SET_CONTROL_BUS_SWITCH), + SDVO_CMD_NAME_ENTRY(GET_SDTV_RESOLUTION_SUPPORT), + SDVO_CMD_NAME_ENTRY(GET_SCALED_HDTV_RESOLUTION_SUPPORT), + SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_ENHANCEMENTS), /* Add the op code for SDVO enhancements */ - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER), + SDVO_CMD_NAME_ENTRY(GET_MAX_HPOS), + SDVO_CMD_NAME_ENTRY(GET_HPOS), + SDVO_CMD_NAME_ENTRY(SET_HPOS), + SDVO_CMD_NAME_ENTRY(GET_MAX_VPOS), + SDVO_CMD_NAME_ENTRY(GET_VPOS), + SDVO_CMD_NAME_ENTRY(SET_VPOS), + SDVO_CMD_NAME_ENTRY(GET_MAX_SATURATION), + SDVO_CMD_NAME_ENTRY(GET_SATURATION), + SDVO_CMD_NAME_ENTRY(SET_SATURATION), + SDVO_CMD_NAME_ENTRY(GET_MAX_HUE), + SDVO_CMD_NAME_ENTRY(GET_HUE), + SDVO_CMD_NAME_ENTRY(SET_HUE), + SDVO_CMD_NAME_ENTRY(GET_MAX_CONTRAST), + SDVO_CMD_NAME_ENTRY(GET_CONTRAST), + SDVO_CMD_NAME_ENTRY(SET_CONTRAST), + SDVO_CMD_NAME_ENTRY(GET_MAX_BRIGHTNESS), + SDVO_CMD_NAME_ENTRY(GET_BRIGHTNESS), + SDVO_CMD_NAME_ENTRY(SET_BRIGHTNESS), + SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_H), + SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_H), + SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_H), + SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_V), + SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_V), + SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_V), + SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER), + SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER), + SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER), + SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_ADAPTIVE), + SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_ADAPTIVE), + SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_ADAPTIVE), + SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_2D), + SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_2D), + SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_2D), + SDVO_CMD_NAME_ENTRY(GET_MAX_SHARPNESS), + SDVO_CMD_NAME_ENTRY(GET_SHARPNESS), + SDVO_CMD_NAME_ENTRY(SET_SHARPNESS), + SDVO_CMD_NAME_ENTRY(GET_DOT_CRAWL), + SDVO_CMD_NAME_ENTRY(SET_DOT_CRAWL), + SDVO_CMD_NAME_ENTRY(GET_MAX_TV_CHROMA_FILTER), + SDVO_CMD_NAME_ENTRY(GET_TV_CHROMA_FILTER), + SDVO_CMD_NAME_ENTRY(SET_TV_CHROMA_FILTER), + SDVO_CMD_NAME_ENTRY(GET_MAX_TV_LUMA_FILTER), + SDVO_CMD_NAME_ENTRY(GET_TV_LUMA_FILTER), + SDVO_CMD_NAME_ENTRY(SET_TV_LUMA_FILTER), /* HDMI op code */ - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), + SDVO_CMD_NAME_ENTRY(GET_SUPP_ENCODE), + SDVO_CMD_NAME_ENTRY(GET_ENCODE), + SDVO_CMD_NAME_ENTRY(SET_ENCODE), + SDVO_CMD_NAME_ENTRY(SET_PIXEL_REPLI), + SDVO_CMD_NAME_ENTRY(GET_PIXEL_REPLI), + SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY_CAP), + SDVO_CMD_NAME_ENTRY(SET_COLORIMETRY), + SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY), + SDVO_CMD_NAME_ENTRY(GET_AUDIO_ENCRYPT_PREFER), + SDVO_CMD_NAME_ENTRY(SET_AUDIO_STAT), + SDVO_CMD_NAME_ENTRY(GET_AUDIO_STAT), + SDVO_CMD_NAME_ENTRY(GET_HBUF_INDEX), + SDVO_CMD_NAME_ENTRY(SET_HBUF_INDEX), + SDVO_CMD_NAME_ENTRY(GET_HBUF_INFO), + SDVO_CMD_NAME_ENTRY(GET_HBUF_AV_SPLIT), + SDVO_CMD_NAME_ENTRY(SET_HBUF_AV_SPLIT), + SDVO_CMD_NAME_ENTRY(GET_HBUF_TXRATE), + SDVO_CMD_NAME_ENTRY(SET_HBUF_TXRATE), + SDVO_CMD_NAME_ENTRY(SET_HBUF_DATA), + SDVO_CMD_NAME_ENTRY(GET_HBUF_DATA), }; +#undef SDVO_CMD_NAME_ENTRY + +static const char *sdvo_cmd_name(u8 cmd) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) { + if (cmd == sdvo_cmd_names[i].cmd) + return sdvo_cmd_names[i].name; + } + + return NULL; +} + #define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC") static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) { + const char *cmd_name; int i, pos = 0; #define BUF_LEN 256 char buffer[BUF_LEN]; @@ -412,15 +427,12 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, for (; i < 8; i++) { BUF_PRINT(" "); } - for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) { - if (cmd == sdvo_cmd_names[i].cmd) { - BUF_PRINT("(%s)", sdvo_cmd_names[i].name); - break; - } - } - if (i == ARRAY_SIZE(sdvo_cmd_names)) { + + cmd_name = sdvo_cmd_name(cmd); + if (cmd_name) + BUF_PRINT("(%s)", cmd_name); + else BUF_PRINT("(%02X)", cmd); - } BUG_ON(pos >= BUF_LEN - 1); #undef BUF_PRINT #undef BUF_LEN @@ -429,15 +441,23 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, } static const char * const cmd_status_names[] = { - "Power on", - "Success", - "Not supported", - "Invalid arg", - "Pending", - "Target not specified", - "Scaling not supported" + [SDVO_CMD_STATUS_POWER_ON] = "Power on", + [SDVO_CMD_STATUS_SUCCESS] = "Success", + [SDVO_CMD_STATUS_NOTSUPP] = "Not supported", + [SDVO_CMD_STATUS_INVALID_ARG] = "Invalid arg", + [SDVO_CMD_STATUS_PENDING] = "Pending", + [SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED] = "Target not specified", + [SDVO_CMD_STATUS_SCALING_NOT_SUPP] = "Scaling not supported", }; +static const char *sdvo_cmd_status(u8 status) +{ + if (status < ARRAY_SIZE(cmd_status_names)) + return cmd_status_names[status]; + else + return NULL; +} + static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len, bool unlocked) @@ -516,6 +536,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response, int response_len) { + const char *cmd_status; u8 retry = 15; /* 5 quick checks, followed by 10 long checks */ u8 status; int i, pos = 0; @@ -562,8 +583,9 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, #define BUF_PRINT(args...) \ pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args) - if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) - BUF_PRINT("(%s)", cmd_status_names[status]); + cmd_status = sdvo_cmd_status(status); + if (cmd_status) + BUF_PRINT("(%s)", cmd_status); else BUF_PRINT("(??? %d)", status); @@ -929,6 +951,20 @@ static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo, &audio_state, 1); } +static bool intel_sdvo_get_hbuf_size(struct intel_sdvo *intel_sdvo, + u8 *hbuf_size) +{ + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, + hbuf_size, 1)) + return false; + + /* Buffer size is 0 based, hooray! However zero means zero. */ + if (*hbuf_size) + (*hbuf_size)++; + + return true; +} + #if 0 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { @@ -972,14 +1008,10 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, set_buf_index, 2)) return false; - if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, - &hbuf_size, 1)) + if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size)) return false; - /* Buffer size is 0 based, hooray! */ - hbuf_size++; - - DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n", + DRM_DEBUG_KMS("writing sdvo hbuf: %i, length %u, hbuf_size: %i\n", if_index, length, hbuf_size); if (hbuf_size < length) @@ -1030,14 +1062,10 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo, if (tx_rate == SDVO_HBUF_TX_DISABLED) return 0; - if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, - &hbuf_size, 1)) - return -ENXIO; - - /* Buffer size is 0 based, hooray! */ - hbuf_size++; + if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size)) + return false; - DRM_DEBUG_KMS("reading sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n", + DRM_DEBUG_KMS("reading sdvo hbuf: %i, length %u, hbuf_size: %i\n", if_index, length, hbuf_size); hbuf_size = min_t(unsigned int, length, hbuf_size); @@ -1893,12 +1921,14 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) &intel_sdvo->hotplug_active, 2); } -static bool intel_sdvo_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector) +static enum intel_hotplug_state +intel_sdvo_hotplug(struct intel_encoder *encoder, + struct intel_connector *connector, + bool irq_received) { intel_sdvo_enable_hotplug(encoder); - return intel_encoder_hotplug(encoder, connector); + return intel_encoder_hotplug(encoder, connector, irq_received); } static bool diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 004b52027ae8..53c6594c4588 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -441,9 +441,21 @@ icl_program_input_csc(struct intel_plane *plane, */ [DRM_COLOR_YCBCR_BT709] = { 0x7C98, 0x7800, 0x0, - 0x9EF8, 0x7800, 0xABF8, + 0x9EF8, 0x7800, 0xAC00, 0x0, 0x7800, 0x7ED8, }, + /* + * BT.2020 full range YCbCr -> full range RGB + * The matrix required is : + * [1.000, 0.000, 1.474, + * 1.000, -0.1645, -0.5713, + * 1.000, 1.8814, 0.0000] + */ + [DRM_COLOR_YCBCR_BT2020] = { + 0x7BC8, 0x7800, 0x0, + 0x8928, 0x7800, 0xAA88, + 0x0, 0x7800, 0x7F10, + }, }; /* Matrix for Limited Range to Full Range Conversion */ @@ -451,26 +463,38 @@ icl_program_input_csc(struct intel_plane *plane, /* * BT.601 Limted range YCbCr -> full range RGB * The matrix required is : - * [1.164384, 0.000, 1.596370, - * 1.138393, -0.382500, -0.794598, - * 1.138393, 1.971696, 0.0000] + * [1.164384, 0.000, 1.596027, + * 1.164384, -0.39175, -0.812813, + * 1.164384, 2.017232, 0.0000] */ [DRM_COLOR_YCBCR_BT601] = { 0x7CC8, 0x7950, 0x0, - 0x8CB8, 0x7918, 0x9C40, - 0x0, 0x7918, 0x7FC8, + 0x8D00, 0x7950, 0x9C88, + 0x0, 0x7950, 0x6810, }, /* * BT.709 Limited range YCbCr -> full range RGB * The matrix required is : - * [1.164, 0.000, 1.833671, - * 1.138393, -0.213249, -0.532909, - * 1.138393, 2.112402, 0.0000] + * [1.164384, 0.000, 1.792741, + * 1.164384, -0.213249, -0.532909, + * 1.164384, 2.112402, 0.0000] */ [DRM_COLOR_YCBCR_BT709] = { - 0x7EA8, 0x7950, 0x0, - 0x8888, 0x7918, 0xADA8, - 0x0, 0x7918, 0x6870, + 0x7E58, 0x7950, 0x0, + 0x8888, 0x7950, 0xADA8, + 0x0, 0x7950, 0x6870, + }, + /* + * BT.2020 Limited range YCbCr -> full range RGB + * The matrix required is : + * [1.164, 0.000, 1.678, + * 1.164, -0.1873, -0.6504, + * 1.164, 2.1417, 0.0000] + */ + [DRM_COLOR_YCBCR_BT2020] = { + 0x7D70, 0x7950, 0x0, + 0x8A68, 0x7950, 0xAC00, + 0x0, 0x7950, 0x6890, }, }; const u16 *csc; @@ -492,8 +516,11 @@ icl_program_input_csc(struct intel_plane *plane, I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), PREOFF_YUV_TO_RGB_HI); - I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), - PREOFF_YUV_TO_RGB_ME); + if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) + I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0); + else + I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), + PREOFF_YUV_TO_RGB_ME); I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), PREOFF_YUV_TO_RGB_LO); I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); @@ -683,6 +710,16 @@ skl_plane_get_hw_state(struct intel_plane *plane, return ret; } +static void i9xx_plane_linear_gamma(u16 gamma[8]) +{ + /* The points are not evenly spaced. */ + static const u8 in[8] = { 0, 1, 2, 4, 8, 16, 24, 32 }; + int i; + + for (i = 0; i < 8; i++) + gamma[i] = (in[i] << 8) / 32; +} + static void chv_update_csc(const struct intel_plane_state *plane_state) { @@ -858,6 +895,31 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, return sprctl; } +static void vlv_update_gamma(const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->base.fb; + enum pipe pipe = plane->pipe; + enum plane_id plane_id = plane->id; + u16 gamma[8]; + int i; + + /* Seems RGB data bypasses the gamma always */ + if (!fb->format->is_yuv) + return; + + i9xx_plane_linear_gamma(gamma); + + /* FIXME these register are single buffered :( */ + /* The two end points are implicit (0.0 and 1.0) */ + for (i = 1; i < 8 - 1; i++) + I915_WRITE_FW(SPGAMC(pipe, plane_id, i - 1), + gamma[i] << 16 | + gamma[i] << 8 | + gamma[i]); +} + static void vlv_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, @@ -916,6 +978,7 @@ vlv_update_plane(struct intel_plane *plane, intel_plane_ggtt_offset(plane_state) + sprsurf_offset); vlv_update_clrc(plane_state); + vlv_update_gamma(plane_state); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -1013,6 +1076,8 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, return 0; } + sprctl |= SPRITE_INT_GAMMA_DISABLE; + if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709; @@ -1033,6 +1098,45 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, return sprctl; } +static void ivb_sprite_linear_gamma(u16 gamma[18]) +{ + int i; + + for (i = 0; i < 17; i++) + gamma[i] = (i << 10) / 16; + + gamma[i] = 3 << 10; + i++; +} + +static void ivb_update_gamma(const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + u16 gamma[18]; + int i; + + ivb_sprite_linear_gamma(gamma); + + /* FIXME these register are single buffered :( */ + for (i = 0; i < 16; i++) + I915_WRITE_FW(SPRGAMC(pipe, i), + gamma[i] << 20 | + gamma[i] << 10 | + gamma[i]); + + I915_WRITE_FW(SPRGAMC16(pipe, 0), gamma[i]); + I915_WRITE_FW(SPRGAMC16(pipe, 1), gamma[i]); + I915_WRITE_FW(SPRGAMC16(pipe, 2), gamma[i]); + i++; + + I915_WRITE_FW(SPRGAMC17(pipe, 0), gamma[i]); + I915_WRITE_FW(SPRGAMC17(pipe, 1), gamma[i]); + I915_WRITE_FW(SPRGAMC17(pipe, 2), gamma[i]); + i++; +} + static void ivb_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, @@ -1099,6 +1203,8 @@ ivb_update_plane(struct intel_plane *plane, I915_WRITE_FW(SPRSURF(pipe), intel_plane_ggtt_offset(plane_state) + sprsurf_offset); + ivb_update_gamma(plane_state); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -1224,6 +1330,66 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, return dvscntr; } +static void g4x_update_gamma(const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->base.fb; + enum pipe pipe = plane->pipe; + u16 gamma[8]; + int i; + + /* Seems RGB data bypasses the gamma always */ + if (!fb->format->is_yuv) + return; + + i9xx_plane_linear_gamma(gamma); + + /* FIXME these register are single buffered :( */ + /* The two end points are implicit (0.0 and 1.0) */ + for (i = 1; i < 8 - 1; i++) + I915_WRITE_FW(DVSGAMC_G4X(pipe, i - 1), + gamma[i] << 16 | + gamma[i] << 8 | + gamma[i]); +} + +static void ilk_sprite_linear_gamma(u16 gamma[17]) +{ + int i; + + for (i = 0; i < 17; i++) + gamma[i] = (i << 10) / 16; +} + +static void ilk_update_gamma(const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->base.fb; + enum pipe pipe = plane->pipe; + u16 gamma[17]; + int i; + + /* Seems RGB data bypasses the gamma always */ + if (!fb->format->is_yuv) + return; + + ilk_sprite_linear_gamma(gamma); + + /* FIXME these register are single buffered :( */ + for (i = 0; i < 16; i++) + I915_WRITE_FW(DVSGAMC_ILK(pipe, i), + gamma[i] << 20 | + gamma[i] << 10 | + gamma[i]); + + I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 0), gamma[i]); + I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 1), gamma[i]); + I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 2), gamma[i]); + i++; +} + static void g4x_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, @@ -1283,6 +1449,11 @@ g4x_update_plane(struct intel_plane *plane, I915_WRITE_FW(DVSSURF(pipe), intel_plane_ggtt_offset(plane_state) + dvssurf_offset); + if (IS_G4X(dev_priv)) + g4x_update_gamma(plane_state); + else + ilk_update_gamma(plane_state); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -1347,7 +1518,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, const struct drm_framebuffer *fb = plane_state->base.fb; const struct drm_rect *src = &plane_state->base.src; const struct drm_rect *dst = &plane_state->base.dst; - int src_x, src_y, src_w, src_h, crtc_w, crtc_h; + int src_x, src_w, src_h, crtc_w, crtc_h; const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; unsigned int cpp = fb->format->cpp[0]; @@ -1358,7 +1529,6 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, crtc_h = drm_rect_height(dst); src_x = src->x1 >> 16; - src_y = src->y1 >> 16; src_w = drm_rect_width(src) >> 16; src_h = drm_rect_height(src) >> 16; @@ -1852,28 +2022,7 @@ static const u32 skl_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const u32 icl_plane_formats[] = { - DRM_FORMAT_C8, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XBGR2101010, - DRM_FORMAT_YUYV, - DRM_FORMAT_YVYU, - DRM_FORMAT_UYVY, - DRM_FORMAT_VYUY, - DRM_FORMAT_Y210, - DRM_FORMAT_Y212, - DRM_FORMAT_Y216, - DRM_FORMAT_XVYU2101010, - DRM_FORMAT_XVYU12_16161616, - DRM_FORMAT_XVYU16161616, -}; - -static const u32 icl_hdr_plane_formats[] = { +static const u32 skl_planar_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1882,23 +2031,14 @@ static const u32 icl_hdr_plane_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, - DRM_FORMAT_XRGB16161616F, - DRM_FORMAT_XBGR16161616F, - DRM_FORMAT_ARGB16161616F, - DRM_FORMAT_ABGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, - DRM_FORMAT_Y210, - DRM_FORMAT_Y212, - DRM_FORMAT_Y216, - DRM_FORMAT_XVYU2101010, - DRM_FORMAT_XVYU12_16161616, - DRM_FORMAT_XVYU16161616, + DRM_FORMAT_NV12, }; -static const u32 skl_planar_formats[] = { +static const u32 glk_planar_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1912,9 +2052,12 @@ static const u32 skl_planar_formats[] = { DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_NV12, + DRM_FORMAT_P010, + DRM_FORMAT_P012, + DRM_FORMAT_P016, }; -static const u32 glk_planar_formats[] = { +static const u32 icl_sdr_y_plane_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1927,13 +2070,15 @@ static const u32 glk_planar_formats[] = { DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, - DRM_FORMAT_NV12, - DRM_FORMAT_P010, - DRM_FORMAT_P012, - DRM_FORMAT_P016, + DRM_FORMAT_Y210, + DRM_FORMAT_Y212, + DRM_FORMAT_Y216, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, }; -static const u32 icl_planar_formats[] = { +static const u32 icl_sdr_uv_plane_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1958,7 +2103,7 @@ static const u32 icl_planar_formats[] = { DRM_FORMAT_XVYU16161616, }; -static const u32 icl_hdr_planar_formats[] = { +static const u32 icl_hdr_plane_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -2201,9 +2346,6 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, static bool skl_plane_has_planar(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { - if (INTEL_GEN(dev_priv) >= 11) - return plane_id <= PLANE_SPRITE3; - /* Display WA #0870: skl, bxt */ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) return false; @@ -2217,6 +2359,48 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv, return true; } +static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id, + int *num_formats) +{ + if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { + *num_formats = ARRAY_SIZE(skl_planar_formats); + return skl_planar_formats; + } else { + *num_formats = ARRAY_SIZE(skl_plane_formats); + return skl_plane_formats; + } +} + +static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id, + int *num_formats) +{ + if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { + *num_formats = ARRAY_SIZE(glk_planar_formats); + return glk_planar_formats; + } else { + *num_formats = ARRAY_SIZE(skl_plane_formats); + return skl_plane_formats; + } +} + +static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id, + int *num_formats) +{ + if (icl_is_hdr_plane(dev_priv, plane_id)) { + *num_formats = ARRAY_SIZE(icl_hdr_plane_formats); + return icl_hdr_plane_formats; + } else if (icl_is_nv12_y_plane(plane_id)) { + *num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats); + return icl_sdr_y_plane_formats; + } else { + *num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats); + return icl_sdr_uv_plane_formats; + } +} + static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { @@ -2270,30 +2454,15 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, if (icl_is_nv12_y_plane(plane_id)) plane->update_slave = icl_update_slave; - if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { - if (icl_is_hdr_plane(dev_priv, plane_id)) { - formats = icl_hdr_planar_formats; - num_formats = ARRAY_SIZE(icl_hdr_planar_formats); - } else if (INTEL_GEN(dev_priv) >= 11) { - formats = icl_planar_formats; - num_formats = ARRAY_SIZE(icl_planar_formats); - } else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) { - formats = glk_planar_formats; - num_formats = ARRAY_SIZE(glk_planar_formats); - } else { - formats = skl_planar_formats; - num_formats = ARRAY_SIZE(skl_planar_formats); - } - } else if (icl_is_hdr_plane(dev_priv, plane_id)) { - formats = icl_hdr_plane_formats; - num_formats = ARRAY_SIZE(icl_hdr_plane_formats); - } else if (INTEL_GEN(dev_priv) >= 11) { - formats = icl_plane_formats; - num_formats = ARRAY_SIZE(icl_plane_formats); - } else { - formats = skl_plane_formats; - num_formats = ARRAY_SIZE(skl_plane_formats); - } + if (INTEL_GEN(dev_priv) >= 11) + formats = icl_get_plane_formats(dev_priv, pipe, + plane_id, &num_formats); + else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + formats = glk_get_plane_formats(dev_priv, pipe, + plane_id, &num_formats); + else + formats = skl_get_plane_formats(dev_priv, pipe, + plane_id, &num_formats); plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); if (plane->has_ccs) diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c new file mode 100644 index 000000000000..c96a81c2416c --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -0,0 +1,537 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_display.h" +#include "intel_dp_mst.h" +#include "intel_tc.h" + +static const char *tc_port_mode_name(enum tc_port_mode mode) +{ + static const char * const names[] = { + [TC_PORT_TBT_ALT] = "tbt-alt", + [TC_PORT_DP_ALT] = "dp-alt", + [TC_PORT_LEGACY] = "legacy", + }; + + if (WARN_ON(mode >= ARRAY_SIZE(names))) + mode = TC_PORT_TBT_ALT; + + return names[mode]; +} + +static bool has_modular_fia(struct drm_i915_private *i915) +{ + if (!INTEL_INFO(i915)->display.has_modular_fia) + return false; + + return intel_uncore_read(&i915->uncore, + PORT_TX_DFLEXDPSP(FIA1)) & MODULAR_FIA_MASK; +} + +static enum phy_fia tc_port_to_fia(struct drm_i915_private *i915, + enum tc_port tc_port) +{ + if (!has_modular_fia(i915)) + return FIA1; + + /* + * Each Modular FIA instance houses 2 TC ports. In SOC that has more + * than two TC ports, there are multiple instances of Modular FIA. + */ + return tc_port / 2; +} + +u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + struct intel_uncore *uncore = &i915->uncore; + u32 lane_mask; + + lane_mask = intel_uncore_read(uncore, + PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); + + WARN_ON(lane_mask == 0xffffffff); + + return (lane_mask & DP_LANE_ASSIGNMENT_MASK(tc_port)) >> + DP_LANE_ASSIGNMENT_SHIFT(tc_port); +} + +int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + intel_wakeref_t wakeref; + u32 lane_mask; + + if (dig_port->tc_mode != TC_PORT_DP_ALT) + return 4; + + lane_mask = 0; + with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) + lane_mask = intel_tc_port_get_lane_mask(dig_port); + + switch (lane_mask) { + default: + MISSING_CASE(lane_mask); + /* fall-through */ + case 0x1: + case 0x2: + case 0x4: + case 0x8: + return 1; + case 0x3: + case 0xc: + return 2; + case 0xf: + return 4; + } +} + +void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, + int required_lanes) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; + struct intel_uncore *uncore = &i915->uncore; + u32 val; + + WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY); + + val = intel_uncore_read(uncore, + PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia)); + val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port); + + switch (required_lanes) { + case 1: + val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) : + DFLEXDPMLE1_DPMLETC_ML0(tc_port); + break; + case 2: + val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) : + DFLEXDPMLE1_DPMLETC_ML1_0(tc_port); + break; + case 4: + val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port); + break; + default: + MISSING_CASE(required_lanes); + } + + intel_uncore_write(uncore, + PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val); +} + +static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port, + u32 live_status_mask) +{ + u32 valid_hpd_mask; + + if (dig_port->tc_legacy_port) + valid_hpd_mask = BIT(TC_PORT_LEGACY); + else + valid_hpd_mask = BIT(TC_PORT_DP_ALT) | + BIT(TC_PORT_TBT_ALT); + + if (!(live_status_mask & ~valid_hpd_mask)) + return; + + /* If live status mismatches the VBT flag, trust the live status. */ + DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n", + dig_port->tc_port_name, live_status_mask); + + dig_port->tc_legacy_port = !dig_port->tc_legacy_port; +} + +static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + struct intel_uncore *uncore = &i915->uncore; + u32 mask = 0; + u32 val; + + val = intel_uncore_read(uncore, + PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); + + if (val == 0xffffffff) { + DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n", + dig_port->tc_port_name); + return mask; + } + + if (val & TC_LIVE_STATE_TBT(tc_port)) + mask |= BIT(TC_PORT_TBT_ALT); + if (val & TC_LIVE_STATE_TC(tc_port)) + mask |= BIT(TC_PORT_DP_ALT); + + if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) + mask |= BIT(TC_PORT_LEGACY); + + /* The sink can be connected only in a single mode. */ + if (!WARN_ON(hweight32(mask) > 1)) + tc_port_fixup_legacy_flag(dig_port, mask); + + return mask; +} + +static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + struct intel_uncore *uncore = &i915->uncore; + u32 val; + + val = intel_uncore_read(uncore, + PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia)); + if (val == 0xffffffff) { + DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n", + dig_port->tc_port_name); + return false; + } + + return val & DP_PHY_MODE_STATUS_COMPLETED(tc_port); +} + +static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, + bool enable) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + struct intel_uncore *uncore = &i915->uncore; + u32 val; + + val = intel_uncore_read(uncore, + PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); + if (val == 0xffffffff) { + DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n", + dig_port->tc_port_name, + enableddisabled(enable)); + + return false; + } + + val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + if (!enable) + val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + + intel_uncore_write(uncore, + PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val); + + if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10)) + DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n", + dig_port->tc_port_name); + + return true; +} + +static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + struct intel_uncore *uncore = &i915->uncore; + u32 val; + + val = intel_uncore_read(uncore, + PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); + if (val == 0xffffffff) { + DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n", + dig_port->tc_port_name); + return true; + } + + return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)); +} + +/* + * This function implements the first part of the Connect Flow described by our + * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading + * lanes, EDID, etc) is done as needed in the typical places. + * + * Unlike the other ports, type-C ports are not available to use as soon as we + * get a hotplug. The type-C PHYs can be shared between multiple controllers: + * display, USB, etc. As a result, handshaking through FIA is required around + * connect and disconnect to cleanly transfer ownership with the controller and + * set the type-C power state. + */ +static void icl_tc_phy_connect(struct intel_digital_port *dig_port, + int required_lanes) +{ + int max_lanes; + + if (!icl_tc_phy_status_complete(dig_port)) { + DRM_DEBUG_KMS("Port %s: PHY not ready\n", + dig_port->tc_port_name); + goto out_set_tbt_alt_mode; + } + + if (!icl_tc_phy_set_safe_mode(dig_port, false) && + !WARN_ON(dig_port->tc_legacy_port)) + goto out_set_tbt_alt_mode; + + max_lanes = intel_tc_port_fia_max_lane_count(dig_port); + if (dig_port->tc_legacy_port) { + WARN_ON(max_lanes != 4); + dig_port->tc_mode = TC_PORT_LEGACY; + + return; + } + + /* + * Now we have to re-check the live state, in case the port recently + * became disconnected. Not necessary for legacy mode. + */ + if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) { + DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n", + dig_port->tc_port_name); + goto out_set_safe_mode; + } + + if (max_lanes < required_lanes) { + DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n", + dig_port->tc_port_name, + max_lanes, required_lanes); + goto out_set_safe_mode; + } + + dig_port->tc_mode = TC_PORT_DP_ALT; + + return; + +out_set_safe_mode: + icl_tc_phy_set_safe_mode(dig_port, true); +out_set_tbt_alt_mode: + dig_port->tc_mode = TC_PORT_TBT_ALT; +} + +/* + * See the comment at the connect function. This implements the Disconnect + * Flow. + */ +static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) +{ + switch (dig_port->tc_mode) { + case TC_PORT_LEGACY: + /* Nothing to do, we never disconnect from legacy mode */ + break; + case TC_PORT_DP_ALT: + icl_tc_phy_set_safe_mode(dig_port, true); + dig_port->tc_mode = TC_PORT_TBT_ALT; + break; + case TC_PORT_TBT_ALT: + /* Nothing to do, we stay in TBT-alt mode */ + break; + default: + MISSING_CASE(dig_port->tc_mode); + } +} + +static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port) +{ + if (!icl_tc_phy_status_complete(dig_port)) { + DRM_DEBUG_KMS("Port %s: PHY status not complete\n", + dig_port->tc_port_name); + return dig_port->tc_mode == TC_PORT_TBT_ALT; + } + + if (icl_tc_phy_is_in_safe_mode(dig_port)) { + DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n", + dig_port->tc_port_name); + + return false; + } + + return dig_port->tc_mode == TC_PORT_DP_ALT || + dig_port->tc_mode == TC_PORT_LEGACY; +} + +static enum tc_port_mode +intel_tc_port_get_current_mode(struct intel_digital_port *dig_port) +{ + u32 live_status_mask = tc_port_live_status_mask(dig_port); + bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port); + enum tc_port_mode mode; + + if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port))) + return TC_PORT_TBT_ALT; + + mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT; + if (live_status_mask) { + enum tc_port_mode live_mode = fls(live_status_mask) - 1; + + if (!WARN_ON(live_mode == TC_PORT_TBT_ALT)) + mode = live_mode; + } + + return mode; +} + +static enum tc_port_mode +intel_tc_port_get_target_mode(struct intel_digital_port *dig_port) +{ + u32 live_status_mask = tc_port_live_status_mask(dig_port); + + if (live_status_mask) + return fls(live_status_mask) - 1; + + return icl_tc_phy_status_complete(dig_port) && + dig_port->tc_legacy_port ? TC_PORT_LEGACY : + TC_PORT_TBT_ALT; +} + +static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, + int required_lanes) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port_mode old_tc_mode = dig_port->tc_mode; + + intel_display_power_flush_work(i915); + WARN_ON(intel_display_power_is_enabled(i915, + intel_aux_power_domain(dig_port))); + + icl_tc_phy_disconnect(dig_port); + icl_tc_phy_connect(dig_port, required_lanes); + + DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n", + dig_port->tc_port_name, + tc_port_mode_name(old_tc_mode), + tc_port_mode_name(dig_port->tc_mode)); +} + +static void +intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port, + int refcount) +{ + WARN_ON(dig_port->tc_link_refcount); + dig_port->tc_link_refcount = refcount; +} + +void intel_tc_port_sanitize(struct intel_digital_port *dig_port) +{ + struct intel_encoder *encoder = &dig_port->base; + int active_links = 0; + + mutex_lock(&dig_port->tc_lock); + + dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); + if (dig_port->dp.is_mst) + active_links = intel_dp_mst_encoder_active_links(dig_port); + else if (encoder->base.crtc) + active_links = to_intel_crtc(encoder->base.crtc)->active; + + if (active_links) { + if (!icl_tc_phy_is_connected(dig_port)) + DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n", + dig_port->tc_port_name, active_links); + intel_tc_port_link_init_refcount(dig_port, active_links); + + goto out; + } + + if (dig_port->tc_legacy_port) + icl_tc_phy_connect(dig_port, 1); + +out: + DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n", + dig_port->tc_port_name, + tc_port_mode_name(dig_port->tc_mode)); + + mutex_unlock(&dig_port->tc_lock); +} + +static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port) +{ + return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode; +} + +/* + * The type-C ports are different because even when they are connected, they may + * not be available/usable by the graphics driver: see the comment on + * icl_tc_phy_connect(). So in our driver instead of adding the additional + * concept of "usable" and make everything check for "connected and usable" we + * define a port as "connected" when it is not only connected, but also when it + * is usable by the rest of the driver. That maintains the old assumption that + * connected ports are usable, and avoids exposing to the users objects they + * can't really use. + */ +bool intel_tc_port_connected(struct intel_digital_port *dig_port) +{ + bool is_connected; + + intel_tc_port_lock(dig_port); + is_connected = tc_port_live_status_mask(dig_port) & + BIT(dig_port->tc_mode); + intel_tc_port_unlock(dig_port); + + return is_connected; +} + +static void __intel_tc_port_lock(struct intel_digital_port *dig_port, + int required_lanes) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + intel_wakeref_t wakeref; + + wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE); + + mutex_lock(&dig_port->tc_lock); + + if (!dig_port->tc_link_refcount && + intel_tc_port_needs_reset(dig_port)) + intel_tc_port_reset_mode(dig_port, required_lanes); + + WARN_ON(dig_port->tc_lock_wakeref); + dig_port->tc_lock_wakeref = wakeref; +} + +void intel_tc_port_lock(struct intel_digital_port *dig_port) +{ + __intel_tc_port_lock(dig_port, 1); +} + +void intel_tc_port_unlock(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref); + + mutex_unlock(&dig_port->tc_lock); + + intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE, + wakeref); +} + +void intel_tc_port_get_link(struct intel_digital_port *dig_port, + int required_lanes) +{ + __intel_tc_port_lock(dig_port, required_lanes); + dig_port->tc_link_refcount++; + intel_tc_port_unlock(dig_port); +} + +void intel_tc_port_put_link(struct intel_digital_port *dig_port) +{ + mutex_lock(&dig_port->tc_lock); + dig_port->tc_link_refcount--; + mutex_unlock(&dig_port->tc_lock); +} + +void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(i915, port); + + if (WARN_ON(tc_port == PORT_TC_NONE)) + return; + + snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name), + "%c/TC#%d", port_name(port), tc_port + 1); + + mutex_init(&dig_port->tc_lock); + dig_port->tc_legacy_port = is_legacy; + dig_port->tc_link_refcount = 0; + dig_port->tc_phy_fia = tc_port_to_fia(i915, tc_port); +} diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h new file mode 100644 index 000000000000..22fe922ac9cf --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_TC_H__ +#define __INTEL_TC_H__ + +#include <linux/mutex.h> +#include <linux/types.h> + +#include "intel_drv.h" + +bool intel_tc_port_connected(struct intel_digital_port *dig_port); +u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port); +int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port); +void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, + int required_lanes); + +void intel_tc_port_sanitize(struct intel_digital_port *dig_port); +void intel_tc_port_lock(struct intel_digital_port *dig_port); +void intel_tc_port_unlock(struct intel_digital_port *dig_port); +void intel_tc_port_get_link(struct intel_digital_port *dig_port, + int required_lanes); +void intel_tc_port_put_link(struct intel_digital_port *dig_port); + +static inline int intel_tc_port_ref_held(struct intel_digital_port *dig_port) +{ + return mutex_is_locked(&dig_port->tc_lock) || + dig_port->tc_link_refcount; +} + +void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy); + +#endif /* __INTEL_TC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 2f4894e9a03d..09cd37fb0b1c 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -310,10 +310,13 @@ enum vbt_gmbus_ddi { DDC_BUS_DDI_F, ICL_DDC_BUS_DDI_A = 0x1, ICL_DDC_BUS_DDI_B, + TGL_DDC_BUS_DDI_C, ICL_DDC_BUS_PORT_1 = 0x4, ICL_DDC_BUS_PORT_2, ICL_DDC_BUS_PORT_3, ICL_DDC_BUS_PORT_4, + TGL_DDC_BUS_PORT_5, + TGL_DDC_BUS_PORT_6, MCC_DDC_BUS_DDI_A = 0x1, MCC_DDC_BUS_DDI_B, MCC_DDC_BUS_DDI_C = 0x4, @@ -478,13 +481,13 @@ struct psr_table { /* TP wake up time in multiple of 100 */ u16 tp1_wakeup_time; u16 tp2_tp3_wakeup_time; - - /* PSR2 TP2/TP3 wakeup time for 16 panels */ - u32 psr2_tp2_tp3_wakeup_time; } __packed; struct bdb_psr { struct psr_table psr_table[16]; + + /* PSR2 TP2/TP3 wakeup time for 16 panels */ + u32 psr2_tp2_tp3_wakeup_time; } __packed; /* diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index ffec807b8960..4ab19c432ef5 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -459,17 +459,23 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, enum intel_display_power_domain intel_dsc_power_domain(const struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; /* - * On ICL VDSC/joining for eDP transcoder uses a separate power well PW2 - * This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain. + * On ICL VDSC/joining for eDP transcoder uses a separate power well, + * PW2. This requires POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain. * For any other transcoder, VDSC/joining uses the power well associated * with the pipe/transcoder in use. Hence another reference on the * transcoder power domain will suffice. + * + * On TGL we have the same mapping, but for transcoder A (the special + * TRANSCODER_EDP is gone). */ - if (cpu_transcoder == TRANSCODER_EDP) - return POWER_DOMAIN_TRANSCODER_EDP_VDSC; + if (INTEL_GEN(i915) >= 12 && cpu_transcoder == TRANSCODER_A) + return POWER_DOMAIN_TRANSCODER_VDSC_PW2; + else if (cpu_transcoder == TRANSCODER_EDP) + return POWER_DOMAIN_TRANSCODER_VDSC_PW2; else return POWER_DOMAIN_TRANSCODER(cpu_transcoder); } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index e272d826210a..c8002ffd29e7 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -1644,7 +1644,7 @@ vlv_dsi_get_panel_orientation(struct intel_connector *connector) return intel_dsi_get_panel_orientation(connector); } -static void intel_dsi_add_properties(struct intel_connector *connector) +static void vlv_dsi_add_properties(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -1983,7 +1983,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_panel_init(&intel_connector->panel, fixed_mode, NULL); intel_panel_setup_backlight(connector, INVALID_PIPE); - intel_dsi_add_properties(intel_connector); + vlv_dsi_add_properties(intel_connector); return; diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile index 07e7b8b840ea..7e73aa587967 100644 --- a/drivers/gpu/drm/i915/gem/Makefile +++ b/drivers/gpu/drm/i915/gem/Makefile @@ -1 +1,5 @@ -include $(src)/Makefile.header-test # Extra header tests +# For building individual subdir files on the command line +subdir-ccflags-y += -I$(srctree)/$(src)/.. + +# Extra header tests +header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/gem/Makefile.header-test b/drivers/gpu/drm/i915/gem/Makefile.header-test deleted file mode 100644 index 61e06cbb4b32..000000000000 --- a/drivers/gpu/drm/i915/gem/Makefile.header-test +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: MIT -# Copyright © 2019 Intel Corporation - -# Test the headers are compilable as standalone units -header_test := $(notdir $(wildcard $(src)/*.h)) - -quiet_cmd_header_test = HDRTEST $@ - cmd_header_test = echo "\#include \"$(<F)\"" > $@ - -header_test_%.c: %.h - $(call cmd,header_test) - -extra-$(CONFIG_DRM_I915_WERROR) += \ - $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h))) - -clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h))) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c index 1fdab0767a47..2312a0c6af89 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c @@ -72,7 +72,6 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm, vma->ops = &proxy_vma_ops; sleeve->vma = vma; - sleeve->obj = i915_gem_object_get(obj); sleeve->pages = pages; sleeve->page_sizes = *page_sizes; @@ -85,7 +84,6 @@ err_free: static void destroy_sleeve(struct i915_sleeve *sleeve) { - i915_gem_object_put(sleeve->obj); kfree(sleeve); } @@ -155,7 +153,7 @@ static void clear_pages_worker(struct work_struct *work) { struct clear_pages_work *w = container_of(work, typeof(*w), work); struct drm_i915_private *i915 = w->ce->gem_context->i915; - struct drm_i915_gem_object *obj = w->sleeve->obj; + struct drm_i915_gem_object *obj = w->sleeve->vma->obj; struct i915_vma *vma = w->sleeve->vma; struct i915_request *rq; int err = w->dma.error; @@ -164,11 +162,12 @@ static void clear_pages_worker(struct work_struct *work) goto out_signal; if (obj->cache_dirty) { - obj->write_domain = 0; if (i915_gem_object_has_struct_page(obj)) drm_clflush_sg(w->sleeve->pages); obj->cache_dirty = false; } + obj->read_domains = I915_GEM_GPU_DOMAINS; + obj->write_domain = 0; /* XXX: we need to kill this */ mutex_lock(&i915->drm.struct_mutex); @@ -193,10 +192,12 @@ static void clear_pages_worker(struct work_struct *work) goto out_request; } - /* XXX: more feverish nightmares await */ - i915_vma_lock(vma); - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_vma_unlock(vma); + /* + * w->dma is already exported via (vma|obj)->resv we need only + * keep track of the GPU activity within this vma/request, and + * propagate the signal from the request to w->dma. + */ + err = i915_active_ref(&vma->active, rq->fence.context, rq); if (err) goto out_request; @@ -249,13 +250,11 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj, u32 value) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_gem_context *ctx = ce->gem_context; - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct clear_pages_work *work; struct i915_sleeve *sleeve; int err; - sleeve = create_sleeve(vm, obj, pages, page_sizes); + sleeve = create_sleeve(ce->vm, obj, pages, page_sizes); if (IS_ERR(sleeve)) return PTR_ERR(sleeve); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 0f2c22a3bcb6..b28c7ca681a8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -316,7 +316,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) mutex_destroy(&ctx->engines_mutex); if (ctx->timeline) - i915_timeline_put(ctx->timeline); + intel_timeline_put(ctx->timeline); kfree(ctx->name); put_pid(ctx->pid); @@ -459,8 +459,7 @@ __create_context(struct drm_i915_private *i915) i915_gem_context_set_recoverable(ctx); ctx->ring_size = 4 * PAGE_SIZE; - ctx->desc_template = - default_desc_template(i915, &i915->mm.aliasing_ppgtt->vm); + ctx->desc_template = default_desc_template(i915, NULL); for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; @@ -476,10 +475,18 @@ static struct i915_address_space * __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) { struct i915_address_space *old = ctx->vm; + struct i915_gem_engines_iter it; + struct intel_context *ce; ctx->vm = i915_vm_get(vm); ctx->desc_template = default_desc_template(ctx->i915, vm); + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + i915_vm_put(ce->vm); + ce->vm = i915_vm_get(vm); + } + i915_gem_context_unlock_engines(ctx); + return old; } @@ -528,9 +535,9 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) } if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { - struct i915_timeline *timeline; + struct intel_timeline *timeline; - timeline = i915_timeline_create(dev_priv, NULL); + timeline = intel_timeline_create(&dev_priv->gt, NULL); if (IS_ERR(timeline)) { context_close(ctx); return ERR_CAST(timeline); @@ -644,20 +651,13 @@ static void init_contexts(struct drm_i915_private *i915) init_llist_head(&i915->contexts.free_list); } -static bool needs_preempt_context(struct drm_i915_private *i915) -{ - return HAS_EXECLISTS(i915); -} - int i915_gem_contexts_init(struct drm_i915_private *dev_priv) { struct i915_gem_context *ctx; /* Reassure ourselves we are only called once */ GEM_BUG_ON(dev_priv->kernel_context); - GEM_BUG_ON(dev_priv->preempt_context); - intel_engine_init_ctx_wa(dev_priv->engine[RCS0]); init_contexts(dev_priv); /* lowest priority; idle task */ @@ -677,15 +677,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count)); dev_priv->kernel_context = ctx; - /* highest priority; preempting task */ - if (needs_preempt_context(dev_priv)) { - ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX); - if (!IS_ERR(ctx)) - dev_priv->preempt_context = ctx; - else - DRM_ERROR("Failed to create preempt context; disabling preemption\n"); - } - DRM_DEBUG_DRIVER("%s context support initialized\n", DRIVER_CAPS(dev_priv)->has_logical_contexts ? "logical" : "fake"); @@ -696,8 +687,6 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915) { lockdep_assert_held(&i915->drm.struct_mutex); - if (i915->preempt_context) - destroy_kernel_context(&i915->preempt_context); destroy_kernel_context(&i915->kernel_context); /* Must free all deferred contexts (via flush_workqueue) first */ @@ -923,8 +912,12 @@ static int context_barrier_task(struct i915_gem_context *ctx, if (!cb) return -ENOMEM; - i915_active_init(i915, &cb->base, cb_retire); - i915_active_acquire(&cb->base); + i915_active_init(i915, &cb->base, NULL, cb_retire); + err = i915_active_acquire(&cb->base); + if (err) { + kfree(cb); + return err; + } for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { struct i915_request *rq; @@ -1019,7 +1012,7 @@ static void set_ppgtt_barrier(void *data) static int emit_ppgtt_update(struct i915_request *rq, void *data) { - struct i915_address_space *vm = rq->gem_context->vm; + struct i915_address_space *vm = rq->hw_context->vm; struct intel_engine_cs *engine = rq->engine; u32 base = engine->mmio_base; u32 *cs; @@ -1128,9 +1121,8 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, set_ppgtt_barrier, old); if (err) { - ctx->vm = old; - ctx->desc_template = default_desc_template(ctx->i915, old); - i915_vm_put(vm); + i915_vm_put(__set_ppgtt(ctx, old)); + i915_vm_put(old); } unlock: @@ -1187,26 +1179,11 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) if (IS_ERR(rq)) return PTR_ERR(rq); - /* Queue this switch after all other activity by this context. */ - ret = i915_active_request_set(&ce->ring->timeline->last_request, rq); - if (ret) - goto out_add; - - /* - * Guarantee context image and the timeline remains pinned until the - * modifying request is retired by setting the ce activity tracker. - * - * But we only need to take one pin on the account of it. Or in other - * words transfer the pinned ce object to tracked active request. - */ - GEM_BUG_ON(i915_active_is_idle(&ce->active)); - ret = i915_active_ref(&ce->active, rq->fence.context, rq); - if (ret) - goto out_add; - - ret = gen8_emit_rpcs_config(rq, ce, sseu); + /* Serialise with the remote context */ + ret = intel_context_prepare_remote_request(ce, rq); + if (ret == 0) + ret = gen8_emit_rpcs_config(rq, ce, sseu); -out_add: i915_request_add(rq); return ret; } @@ -2015,8 +1992,8 @@ static int clone_timeline(struct i915_gem_context *dst, GEM_BUG_ON(src->timeline == dst->timeline); if (dst->timeline) - i915_timeline_put(dst->timeline); - dst->timeline = i915_timeline_get(src->timeline); + intel_timeline_put(dst->timeline); + dst->timeline = intel_timeline_get(src->timeline); } return 0; @@ -2141,7 +2118,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) return -EINVAL; - ret = i915_terminally_wedged(i915); + ret = intel_gt_terminally_wedged(&i915->gt); if (ret) return ret; @@ -2287,8 +2264,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, args->size = 0; if (ctx->vm) args->value = ctx->vm->total; - else if (to_i915(dev)->mm.aliasing_ppgtt) - args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total; + else if (to_i915(dev)->ggtt.alias) + args->value = to_i915(dev)->ggtt.alias->vm.total; else args->value = to_i915(dev)->ggtt.vm.total; break; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index 9691dd062f72..106e2ccf7a4c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -198,12 +198,6 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx) } static inline struct intel_context * -i915_gem_context_lookup_engine(struct i915_gem_context *ctx, unsigned int idx) -{ - return i915_gem_context_engines(ctx)->engines[idx]; -} - -static inline struct intel_context * i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx) { struct intel_context *ce = ERR_PTR(-EINVAL); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index cc513410eeef..0ee61482ef94 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -26,7 +26,7 @@ struct pid; struct drm_i915_private; struct drm_i915_file_private; struct i915_address_space; -struct i915_timeline; +struct intel_timeline; struct intel_ring; struct i915_gem_engines { @@ -77,7 +77,7 @@ struct i915_gem_context { struct i915_gem_engines __rcu *engines; struct mutex engines_mutex; /* guards writes to engines */ - struct i915_timeline *timeline; + struct intel_timeline *timeline; /** * @vm: unique address space (GTT) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 5fae0e50aad0..cbd7c6e3a1f8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -16,6 +16,7 @@ #include "gem/i915_gem_ioctls.h" #include "gt/intel_context.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "i915_gem_ioctls.h" @@ -222,7 +223,6 @@ struct i915_execbuffer { struct intel_engine_cs *engine; /** engine to queue the request to */ struct intel_context *context; /* logical state for the request */ struct i915_gem_context *gem_context; /** caller's context */ - struct i915_address_space *vm; /** GTT and vma for the request */ struct i915_request *request; /** our request to build */ struct i915_vma *batch; /** identity of the batch obj/vma */ @@ -696,7 +696,7 @@ static int eb_reserve(struct i915_execbuffer *eb) case 1: /* Too fragmented, unbind everything and retry */ - err = i915_gem_evict_vm(eb->vm); + err = i915_gem_evict_vm(eb->context->vm); if (err) return err; break; @@ -724,12 +724,8 @@ static int eb_select_context(struct i915_execbuffer *eb) return -ENOENT; eb->gem_context = ctx; - if (ctx->vm) { - eb->vm = ctx->vm; + if (ctx->vm) eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; - } else { - eb->vm = &eb->i915->ggtt.vm; - } eb->context_flags = 0; if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags)) @@ -831,7 +827,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) goto err_vma; } - vma = i915_vma_instance(obj, eb->vm, NULL); + vma = i915_vma_instance(obj, eb->context->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_obj; @@ -994,7 +990,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache) __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); i915_gem_object_unpin_map(cache->rq->batch->obj); - i915_gem_chipset_flush(cache->rq->i915); + intel_gt_chipset_flush(cache->rq->engine->gt); i915_request_add(cache->rq); cache->rq = NULL; @@ -1954,7 +1950,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) eb->exec = NULL; /* Unconditionally flush any chipset caches (for streaming writes). */ - i915_gem_chipset_flush(eb->i915); + intel_gt_chipset_flush(eb->engine->gt); return 0; err_skip: @@ -2129,7 +2125,7 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce) * ABI: Before userspace accesses the GPU (e.g. execbuffer), report * EIO if the GPU is already wedged. */ - err = i915_terminally_wedged(eb->i915); + err = intel_gt_terminally_wedged(ce->engine->gt); if (err) return err; @@ -2436,7 +2432,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, * wakeref that we hold until the GPU has been idle for at least * 100ms. */ - intel_gt_pm_get(eb.i915); + intel_gt_pm_get(&eb.i915->gt); err = i915_mutex_lock_interruptible(dev); if (err) @@ -2606,7 +2602,7 @@ err_engine: err_unlock: mutex_unlock(&dev->struct_mutex); err_rpm: - intel_gt_pm_put(eb.i915); + intel_gt_pm_put(&eb.i915->gt); i915_gem_context_put(eb.gem_context); err_destroy: eb_destroy(&eb); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 391621ee3cbb..dfa525e37eb8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -7,6 +7,8 @@ #include <linux/mman.h> #include <linux/sizes.h> +#include "gt/intel_gt.h" + #include "i915_drv.h" #include "i915_gem_gtt.h" #include "i915_gem_ioctls.h" @@ -246,7 +248,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) wakeref = intel_runtime_pm_get(rpm); - srcu = i915_reset_trylock(i915); + srcu = intel_gt_reset_trylock(ggtt->vm.gt); if (srcu < 0) { ret = srcu; goto err_rpm; @@ -326,7 +328,7 @@ err_unpin: err_unlock: mutex_unlock(&dev->struct_mutex); err_reset: - i915_reset_unlock(i915, srcu); + intel_gt_reset_unlock(ggtt->vm.gt, srcu); err_rpm: intel_runtime_pm_put(rpm, wakeref); i915_gem_object_unpin_pages(obj); @@ -339,9 +341,9 @@ err: * fail). But any other -EIO isn't ours (e.g. swap in failure) * and so needs to be reported. */ - if (!i915_terminally_wedged(i915)) + if (!intel_gt_is_wedged(ggtt->vm.gt)) return VM_FAULT_SIGBUS; - /* else: fall through */ + /* else, fall through */ case -EAGAIN: /* * EAGAIN means the gpu is hung and we'll wait for the error diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index be6caccce0c5..d5197a2a106f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -23,7 +23,7 @@ */ #include "display/intel_frontbuffer.h" - +#include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" @@ -146,6 +146,19 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) } } +static void __i915_gem_free_object_rcu(struct rcu_head *head) +{ + struct drm_i915_gem_object *obj = + container_of(head, typeof(*obj), rcu); + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + reservation_object_fini(&obj->base._resv); + i915_gem_object_free(obj); + + GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); + atomic_dec(&i915->mm.free_count); +} + static void __i915_gem_free_objects(struct drm_i915_private *i915, struct llist_node *freed) { @@ -160,7 +173,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, mutex_lock(&i915->drm.struct_mutex); - GEM_BUG_ON(i915_gem_object_is_active(obj)); list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) { GEM_BUG_ON(i915_vma_is_active(vma)); vma->flags &= ~I915_VMA_PIN_MASK; @@ -169,22 +181,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, GEM_BUG_ON(!list_empty(&obj->vma.list)); GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree)); - /* - * This serializes freeing with the shrinker. Since the free - * is delayed, first by RCU then by the workqueue, we want the - * shrinker to be able to free pages of unreferenced objects, - * or else we may oom whilst there are plenty of deferred - * freed objects. - */ - if (i915_gem_object_has_pages(obj) && - i915_gem_object_is_shrinkable(obj)) { - unsigned long flags; - - spin_lock_irqsave(&i915->mm.obj_lock, flags); - list_del_init(&obj->mm.link); - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - } - mutex_unlock(&i915->drm.struct_mutex); GEM_BUG_ON(atomic_read(&obj->bind_count)); @@ -192,25 +188,21 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); GEM_BUG_ON(!list_empty(&obj->lut_list)); - if (obj->ops->release) - obj->ops->release(obj); - atomic_set(&obj->mm.pages_pin_count, 0); __i915_gem_object_put_pages(obj, I915_MM_NORMAL); GEM_BUG_ON(i915_gem_object_has_pages(obj)); + bitmap_free(obj->bit_17); if (obj->base.import_attach) drm_prime_gem_destroy(&obj->base, NULL); - drm_gem_object_release(&obj->base); + drm_gem_free_mmap_offset(&obj->base); - bitmap_free(obj->bit_17); - i915_gem_object_free(obj); - - GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); - atomic_dec(&i915->mm.free_count); + if (obj->ops->release) + obj->ops->release(obj); - cond_resched(); + /* But keep the pointer alive for RCU-protected lookups */ + call_rcu(&obj->rcu, __i915_gem_free_object_rcu); } intel_runtime_pm_put(&i915->runtime_pm, wakeref); } @@ -261,18 +253,34 @@ static void __i915_gem_free_work(struct work_struct *work) spin_unlock(&i915->mm.free_lock); } -static void __i915_gem_free_object_rcu(struct rcu_head *head) +void i915_gem_free_object(struct drm_gem_object *gem_obj) { - struct drm_i915_gem_object *obj = - container_of(head, typeof(*obj), rcu); + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); struct drm_i915_private *i915 = to_i915(obj->base.dev); /* - * We reuse obj->rcu for the freed list, so we had better not treat - * it like a rcu_head from this point forwards. And we expect all - * objects to be freed via this path. + * Before we free the object, make sure any pure RCU-only + * read-side critical sections are complete, e.g. + * i915_gem_busy_ioctl(). For the corresponding synchronized + * lookup see i915_gem_object_lookup_rcu(). */ - destroy_rcu_head(&obj->rcu); + atomic_inc(&i915->mm.free_count); + + /* + * This serializes freeing with the shrinker. Since the free + * is delayed, first by RCU then by the workqueue, we want the + * shrinker to be able to free pages of unreferenced objects, + * or else we may oom whilst there are plenty of deferred + * freed objects. + */ + if (i915_gem_object_has_pages(obj) && + i915_gem_object_is_shrinkable(obj)) { + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); + list_del_init(&obj->mm.link); + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); + } /* * Since we require blocking on struct_mutex to unbind the freed @@ -288,20 +296,6 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head) queue_work(i915->wq, &i915->mm.free_work); } -void i915_gem_free_object(struct drm_gem_object *gem_obj) -{ - struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); - - /* - * Before we free the object, make sure any pure RCU-only - * read-side critical sections are complete, e.g. - * i915_gem_busy_ioctl(). For the corresponding synchronized - * lookup see i915_gem_object_lookup_rcu(). - */ - atomic_inc(&to_i915(obj->base.dev)->mm.free_count); - call_rcu(&obj->rcu, __i915_gem_free_object_rcu); -} - static inline enum fb_op_origin fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain) { @@ -319,7 +313,6 @@ void i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct i915_vma *vma; assert_object_held(obj); @@ -329,7 +322,8 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, switch (obj->write_domain) { case I915_GEM_DOMAIN_GTT: - i915_gem_flush_ggtt_writes(dev_priv); + for_each_ggtt_vma(vma, obj) + intel_gt_flush_ggtt_writes(vma->vm->gt); intel_fb_obj_flush(obj, fb_write_origin(obj, I915_GEM_DOMAIN_GTT)); @@ -340,6 +334,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, i915_vma_unset_ggtt_write(vma); } + break; case I915_GEM_DOMAIN_WC: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index dfebd5706f16..67aea07ea019 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -81,7 +81,7 @@ i915_gem_object_lookup(struct drm_file *file, u32 handle) } __deprecated -extern struct drm_gem_object * +struct drm_gem_object * drm_gem_object_lookup(struct drm_file *file, u32 handle); __attribute__((nonnull)) @@ -159,12 +159,6 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) } static inline bool -i915_gem_object_is_active(const struct drm_i915_gem_object *obj) -{ - return READ_ONCE(obj->active_count); -} - -static inline bool i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) { return READ_ONCE(obj->framebuffer_references); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index cb42e3a312e2..685064af32d1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -47,15 +47,11 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, struct intel_context *ce, u32 value) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_gem_context *ctx = ce->gem_context; - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct i915_request *rq; struct i915_vma *vma; int err; - /* XXX: ce->vm please */ - vma = i915_vma_instance(obj, vm, NULL); + vma = i915_vma_instance(obj, ce->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 18bf4f8d6d80..34b51fad02de 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -154,7 +154,6 @@ struct drm_i915_gem_object { /** Count of VMA actually bound by this object */ atomic_t bind_count; - unsigned int active_count; /** Count of how many global VMA are currently pinned for use by HW */ unsigned int pin_global; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index b36ad269f4ea..65eb430cedba 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -268,7 +268,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, switch (type) { default: MISSING_CASE(type); - /* fallthrough to use PAGE_KERNEL anyway */ + /* fallthrough - to use PAGE_KERNEL anyway */ case I915_MAP_WB: pgprot = PAGE_KERNEL; break; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 2deac933cf59..102fd7a23d3d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -13,6 +13,7 @@ #include <drm/drm_legacy.h> /* for drm_pci.h! */ #include <drm/drm_pci.h> +#include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" @@ -60,7 +61,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) vaddr += PAGE_SIZE; } - i915_gem_chipset_flush(to_i915(obj->base.dev)); + intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) { @@ -132,16 +133,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, drm_pci_free(obj->base.dev, obj->phys_handle); } -static void -i915_gem_object_release_phys(struct drm_i915_gem_object *obj) -{ - i915_gem_object_unpin_pages(obj); -} - static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { .get_pages = i915_gem_object_get_pages_phys, .put_pages = i915_gem_object_put_pages_phys, - .release = i915_gem_object_release_phys, }; int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) @@ -158,7 +152,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) if (obj->ops != &i915_gem_shmem_ops) return -EINVAL; - err = i915_gem_object_unbind(obj); + err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); if (err) return err; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 05011d4a3b88..b5561cbdc5ea 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -5,6 +5,7 @@ */ #include "gem/i915_gem_pm.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "i915_drv.h" @@ -38,7 +39,7 @@ static void i915_gem_park(struct drm_i915_private *i915) i915_gem_batch_pool_fini(&engine->batch_pool); } - i915_timelines_park(i915); + intel_timelines_park(i915); i915_vma_parked(i915); i915_globals_park(); @@ -54,7 +55,8 @@ static void idle_work_handler(struct work_struct *work) mutex_lock(&i915->drm.struct_mutex); intel_wakeref_lock(&i915->gt.wakeref); - park = !intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work); + park = (!intel_wakeref_is_active(&i915->gt.wakeref) && + !work_pending(work)); intel_wakeref_unlock(&i915->gt.wakeref); if (park) i915_gem_park(i915); @@ -105,18 +107,18 @@ static int pm_notifier(struct notifier_block *nb, return NOTIFY_OK; } -static bool switch_to_kernel_context_sync(struct drm_i915_private *i915) +static bool switch_to_kernel_context_sync(struct intel_gt *gt) { - bool result = !i915_terminally_wedged(i915); + bool result = !intel_gt_is_wedged(gt); do { - if (i915_gem_wait_for_idle(i915, + if (i915_gem_wait_for_idle(gt->i915, I915_WAIT_LOCKED | I915_WAIT_FOR_IDLE_BOOST, I915_GEM_IDLE_TIMEOUT) == -ETIME) { /* XXX hide warning from gem_eio */ if (i915_modparams.reset) { - dev_err(i915->drm.dev, + dev_err(gt->i915->drm.dev, "Failed to idle engines, declaring wedged!\n"); GEM_TRACE_DUMP(); } @@ -125,18 +127,18 @@ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915) * Forcibly cancel outstanding work and leave * the gpu quiet. */ - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); result = false; } - } while (i915_retire_requests(i915) && result); + } while (i915_retire_requests(gt->i915) && result); - GEM_BUG_ON(i915->gt.awake); + GEM_BUG_ON(gt->awake); return result; } bool i915_gem_load_power_context(struct drm_i915_private *i915) { - return switch_to_kernel_context_sync(i915); + return switch_to_kernel_context_sync(&i915->gt); } void i915_gem_suspend(struct drm_i915_private *i915) @@ -157,7 +159,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) * state. Fortunately, the kernel_context is disposable and we do * not rely on its state. */ - switch_to_kernel_context_sync(i915); + switch_to_kernel_context_sync(&i915->gt); mutex_unlock(&i915->drm.struct_mutex); @@ -168,11 +170,11 @@ void i915_gem_suspend(struct drm_i915_private *i915) GEM_BUG_ON(i915->gt.awake); flush_work(&i915->gem.idle_work); - cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work); + cancel_delayed_work_sync(&i915->gt.hangcheck.work); i915_gem_drain_freed_objects(i915); - intel_uc_suspend(i915); + intel_uc_suspend(&i915->gt.uc); } static struct drm_i915_gem_object *first_mm_object(struct list_head *list) @@ -237,7 +239,6 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) } spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - intel_uc_sanitize(i915); i915_gem_sanitize(i915); } @@ -253,17 +254,18 @@ void i915_gem_resume(struct drm_i915_private *i915) i915_gem_restore_gtt_mappings(i915); i915_gem_restore_fences(i915); + if (i915_gem_init_hw(i915)) + goto err_wedged; + /* * As we didn't flush the kernel context before suspend, we cannot * guarantee that the context image is complete. So let's just reset * it and start again. */ - intel_gt_resume(i915); - - if (i915_gem_init_hw(i915)) + if (intel_gt_resume(&i915->gt)) goto err_wedged; - intel_uc_resume(i915); + intel_uc_resume(&i915->gt.uc); /* Always reload a context for powersaving. */ if (!i915_gem_load_power_context(i915)) @@ -275,10 +277,10 @@ out_unlock: return; err_wedged: - if (!i915_reset_failed(i915)) { + if (!intel_gt_is_wedged(&i915->gt)) { dev_err(i915->drm.dev, "Failed to re-initialize GPU, declaring it wedged!\n"); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); } goto out_unlock; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 19d9ecdb2894..d2a1158868e7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -414,6 +414,11 @@ shmem_pwrite(struct drm_i915_gem_object *obj, return 0; } +static void shmem_release(struct drm_i915_gem_object *obj) +{ + fput(obj->base.filp); +} + const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_IS_SHRINKABLE, @@ -424,6 +429,8 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { .writeback = shmem_writeback, .pwrite = shmem_pwrite, + + .release = shmem_release, }; static int create_shmem(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 3a926a8755c6..3f4c6bdcc3c3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -88,10 +88,18 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; } -static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) +static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, + unsigned long shrink) { - if (i915_gem_object_unbind(obj) == 0) + unsigned long flags; + + flags = 0; + if (shrink & I915_SHRINK_ACTIVE) + flags = I915_GEM_OBJECT_UNBIND_ACTIVE; + + if (i915_gem_object_unbind(obj, flags) == 0) __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); + return !i915_gem_object_has_pages(obj); } @@ -169,7 +177,6 @@ i915_gem_shrink(struct drm_i915_private *i915, */ trace_i915_gem_shrink(i915, target, shrink); - i915_retire_requests(i915); /* * Unbinding of objects will require HW access; Let us not wake the @@ -230,8 +237,7 @@ i915_gem_shrink(struct drm_i915_private *i915, continue; if (!(shrink & I915_SHRINK_ACTIVE) && - (i915_gem_object_is_active(obj) || - i915_gem_object_is_framebuffer(obj))) + i915_gem_object_is_framebuffer(obj)) continue; if (!(shrink & I915_SHRINK_BOUND) && @@ -246,7 +252,7 @@ i915_gem_shrink(struct drm_i915_private *i915, spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - if (unsafe_drop_pages(obj)) { + if (unsafe_drop_pages(obj, shrink)) { /* May arrive from get_pages on another bo */ mutex_lock_nested(&obj->mm.lock, I915_MM_SHRINKER); @@ -269,8 +275,6 @@ i915_gem_shrink(struct drm_i915_private *i915, if (shrink & I915_SHRINK_BOUND) intel_runtime_pm_put(&i915->runtime_pm, wakeref); - i915_retire_requests(i915); - shrinker_unlock(i915, unlock); if (nr_scanned) @@ -427,12 +431,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr if (!shrinker_lock(i915, 0, &unlock)) return NOTIFY_DONE; - /* Force everything onto the inactive lists */ - if (i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT)) - goto out; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) freed_pages += i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_BOUND | @@ -455,7 +453,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr } mutex_unlock(&i915->ggtt.vm.mutex); -out: shrinker_unlock(i915, unlock); *(unsigned long *)ptr += freed_pages; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index de1fab2058ec..639c852bad12 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -529,8 +529,6 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) GEM_BUG_ON(!stolen); - __i915_gem_object_unpin_pages(obj); - i915_gem_stolen_remove_node(dev_priv, stolen); kfree(stolen); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c index adb3074d9ce2..1e372420771b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c @@ -41,7 +41,7 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data, long ret; /* ABI: return -EIO if already wedged */ - ret = i915_terminally_wedged(to_i915(dev)); + ret = intel_gt_terminally_wedged(&to_i915(dev)->gt); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 528b61678334..b9d2bb15e4a6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -150,7 +150,8 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, } } - ret = i915_gem_object_unbind(obj); + ret = i915_gem_object_unbind(obj, + I915_GEM_OBJECT_UNBIND_ACTIVE); if (ret == 0) ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); i915_gem_object_put(obj); @@ -662,9 +663,25 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, __i915_gem_object_release_shmem(obj, pages, true); i915_gem_gtt_finish_pages(obj, pages); + /* + * We always mark objects as dirty when they are used by the GPU, + * just in case. However, if we set the vma as being read-only we know + * that the object will never have been written to. + */ + if (i915_gem_object_is_readonly(obj)) + obj->mm.dirty = false; + for_each_sgt_page(page, sgt_iter, pages) { if (obj->mm.dirty) - set_page_dirty(page); + /* + * As this may not be anonymous memory (e.g. shmem) + * but exist on a real mapping, we have to lock + * the page in order to dirty it -- holding + * the page reference is not sufficient to + * prevent the inode from being truncated. + * Play safe and take the lock. + */ + set_page_dirty_lock(page); mark_page_accessed(page); put_page(page); diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index b74729b6f353..6cbd4a668c9a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -10,6 +10,8 @@ #include "gem/i915_gem_pm.h" +#include "gt/intel_gt.h" + #include "igt_gem_utils.h" #include "mock_context.h" @@ -926,7 +928,7 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) } *cmd = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); + intel_gt_chipset_flush(vma->vm->gt); i915_gem_object_unpin_map(obj); @@ -1037,8 +1039,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx, u64 size, u64 offset, u32 dword, u32 val) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; + struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; struct i915_vma *vma; int err; @@ -1421,6 +1422,9 @@ static int igt_ppgtt_pin_update(void *arg) struct drm_i915_gem_object *obj; struct i915_vma *vma; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned int n; int first, last; int err; @@ -1518,11 +1522,20 @@ static int igt_ppgtt_pin_update(void *arg) * land in the now stale 2M page. */ - err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf); - if (err) - goto out_unpin; + n = 0; + for_each_engine(engine, dev_priv, id) { + if (!intel_engine_can_store_dword(engine)) + continue; - err = cpu_check(obj, 0, 0xdeadbeaf); + err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf); + if (err) + goto out_unpin; + } + while (n--) { + err = cpu_check(obj, n, 0xdeadbeaf); + if (err) + goto out_unpin; + } out_unpin: i915_vma_unpin(vma); @@ -1598,8 +1611,11 @@ static int igt_shrink_thp(void *arg) struct drm_i915_private *i915 = ctx->i915; struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct drm_i915_gem_object *obj; + struct intel_engine_cs *engine; + enum intel_engine_id id; struct i915_vma *vma; unsigned int flags = PIN_USER; + unsigned int n; int err; /* @@ -1635,9 +1651,15 @@ static int igt_shrink_thp(void *arg) if (err) goto out_unpin; - err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf); - if (err) - goto out_unpin; + n = 0; + for_each_engine(engine, i915, id) { + if (!intel_engine_can_store_dword(engine)) + continue; + + err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf); + if (err) + goto out_unpin; + } i915_vma_unpin(vma); @@ -1662,7 +1684,12 @@ static int igt_shrink_thp(void *arg) if (err) goto out_close; - err = cpu_check(obj, 0, 0xdeadbeaf); + while (n--) { + err = cpu_check(obj, n, 0xdeadbeaf); + if (err) + goto out_unpin; + } + out_unpin: i915_vma_unpin(vma); @@ -1726,7 +1753,7 @@ out_unlock: return err; } -int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) +int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_shrink_thp), @@ -1741,22 +1768,22 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) intel_wakeref_t wakeref; int err; - if (!HAS_PPGTT(dev_priv)) { + if (!HAS_PPGTT(i915)) { pr_info("PPGTT not supported, skipping live-selftests\n"); return 0; } - if (i915_terminally_wedged(dev_priv)) + if (intel_gt_is_wedged(&i915->gt)) return 0; - file = mock_file(dev_priv); + file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); - ctx = live_context(dev_priv, file); + ctx = live_context(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out_unlock; @@ -1768,10 +1795,10 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) err = i915_subtests(tests, ctx); out_unlock: - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - mutex_unlock(&dev_priv->drm.struct_mutex); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + mutex_unlock(&i915->drm.struct_mutex); - mock_file_free(dev_priv, file); + mock_file_free(i915, file); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index f3a5eb807c1c..275c28926067 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -5,14 +5,16 @@ #include "i915_selftest.h" +#include "gt/intel_gt.h" + #include "selftests/igt_flush_test.h" #include "selftests/mock_drm.h" #include "mock_context.h" static int igt_client_fill(void *arg) { - struct intel_context *ce = arg; - struct drm_i915_private *i915 = ce->gem_context->i915; + struct drm_i915_private *i915 = arg; + struct intel_context *ce = i915->engine[BCS0]->kernel_context; struct drm_i915_gem_object *obj; struct rnd_state prng; IGT_TIMEOUT(end); @@ -63,17 +65,6 @@ static int igt_client_fill(void *arg) if (err) goto err_unpin; - /* - * XXX: For now do the wait without the object resv lock to - * ensure we don't deadlock. - */ - err = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT); - if (err) - goto err_unpin; - i915_gem_object_lock(obj); err = i915_gem_object_set_to_cpu_domain(obj, false); i915_gem_object_unlock(obj); @@ -100,11 +91,6 @@ err_unpin: err_put: i915_gem_object_put(obj); err_flush: - mutex_lock(&i915->drm.struct_mutex); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - mutex_unlock(&i915->drm.struct_mutex); - if (err == -ENOMEM) err = 0; @@ -117,11 +103,11 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_client_fill), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; if (!HAS_ENGINE(i915, BCS0)) return 0; - return i915_subtests(tests, i915->engine[BCS0]->kernel_context); + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index 8f22d3f18422..a1a4b53cdc4a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -6,6 +6,8 @@ #include <linux/prime_numbers.h> +#include "gt/intel_gt.h" + #include "i915_selftest.h" #include "selftests/i915_random.h" @@ -242,12 +244,15 @@ static bool always_valid(struct drm_i915_private *i915) static bool needs_fence_registers(struct drm_i915_private *i915) { - return !i915_terminally_wedged(i915); + return !intel_gt_is_wedged(&i915->gt); } static bool needs_mi_store_dword(struct drm_i915_private *i915) { - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) + return false; + + if (!HAS_ENGINE(i915, RCS0)) return false; return intel_engine_can_store_dword(i915->engine[RCS0]); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index eaa2b16574c7..7f9f6701b32c 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -7,6 +7,7 @@ #include <linux/prime_numbers.h> #include "gem/i915_gem_pm.h" +#include "gt/intel_gt.h" #include "gt/intel_reset.h" #include "i915_selftest.h" @@ -31,7 +32,6 @@ static int live_nop_switch(void *arg) struct intel_engine_cs *engine; struct i915_gem_context **ctx; enum intel_engine_id id; - intel_wakeref_t wakeref; struct igt_live_test t; struct drm_file *file; unsigned long n; @@ -53,7 +53,6 @@ static int live_nop_switch(void *arg) return PTR_ERR(file); mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL); if (!ctx) { @@ -85,7 +84,7 @@ static int live_nop_switch(void *arg) } if (i915_request_wait(rq, 0, HZ / 5) < 0) { pr_err("Failed to populated %d contexts\n", nctx); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto out_unlock; } @@ -129,7 +128,7 @@ static int live_nop_switch(void *arg) if (i915_request_wait(rq, 0, HZ / 5) < 0) { pr_err("Switching between %ld contexts timed out\n", prime); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); break; } @@ -152,7 +151,6 @@ static int live_nop_switch(void *arg) } out_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); mock_file_free(i915, file); return err; @@ -237,8 +235,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj, struct intel_engine_cs *engine, unsigned int dw) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; + struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; struct i915_request *rq; struct i915_vma *vma; struct i915_vma *batch; @@ -431,6 +428,9 @@ create_test_object(struct i915_gem_context *ctx, u64 size; int err; + /* Keep in GEM's good graces */ + i915_retire_requests(ctx->i915); + size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE); size = round_down(size, DW_PER_PAGE * PAGE_SIZE); @@ -507,7 +507,6 @@ static int igt_ctx_exec(void *arg) dw = 0; while (!time_after(jiffies, end_time)) { struct i915_gem_context *ctx; - intel_wakeref_t wakeref; ctx = live_context(i915, file); if (IS_ERR(ctx)) { @@ -523,8 +522,7 @@ static int igt_ctx_exec(void *arg) } } - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - err = gpu_fill(obj, ctx, engine, dw); + err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -565,6 +563,8 @@ out_unlock: mock_file_free(i915, file); if (err) return err; + + i915_gem_drain_freed_objects(i915); } return 0; @@ -623,7 +623,6 @@ static int igt_shared_ctx_exec(void *arg) ncontexts = 0; while (!time_after(jiffies, end_time)) { struct i915_gem_context *ctx; - intel_wakeref_t wakeref; ctx = kernel_context(i915); if (IS_ERR(ctx)) { @@ -642,9 +641,7 @@ static int igt_shared_ctx_exec(void *arg) } } - err = 0; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - err = gpu_fill(obj, ctx, engine, dw); + err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -678,6 +675,10 @@ static int igt_shared_ctx_exec(void *arg) dw += rem; } + + mutex_unlock(&i915->drm.struct_mutex); + i915_gem_drain_freed_objects(i915); + mutex_lock(&i915->drm.struct_mutex); } out_test: if (igt_live_test_end(&t)) @@ -746,7 +747,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); - vma = i915_vma_instance(obj, ce->gem_context->vm, NULL); + vma = i915_vma_instance(obj, ce->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -956,7 +957,7 @@ __sseu_finish(struct drm_i915_private *i915, int ret = 0; if (flags & TEST_RESET) { - ret = i915_reset_engine(ce->engine, "sseu"); + ret = intel_engine_reset(ce->engine, "sseu"); if (ret) goto out; } @@ -1025,35 +1026,33 @@ __igt_ctx_sseu(struct drm_i915_private *i915, unsigned int flags) { struct intel_engine_cs *engine = i915->engine[RCS0]; - struct intel_sseu default_sseu = engine->sseu; struct drm_i915_gem_object *obj; struct i915_gem_context *ctx; struct intel_context *ce; struct intel_sseu pg_sseu; - intel_wakeref_t wakeref; struct drm_file *file; int ret; - if (INTEL_GEN(i915) < 9) + if (INTEL_GEN(i915) < 9 || !engine) return 0; if (!RUNTIME_INFO(i915)->sseu.has_slice_pg) return 0; - if (hweight32(default_sseu.slice_mask) < 2) + if (hweight32(engine->sseu.slice_mask) < 2) return 0; /* * Gen11 VME friendly power-gated configuration with half enabled * sub-slices. */ - pg_sseu = default_sseu; + pg_sseu = engine->sseu; pg_sseu.slice_mask = 1; pg_sseu.subslice_mask = - ~(~0 << (hweight32(default_sseu.subslice_mask) / 2)); + ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2)); pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n", - name, flags, hweight32(default_sseu.slice_mask), + name, flags, hweight32(engine->sseu.slice_mask), hweight32(pg_sseu.slice_mask)); file = mock_file(i915); @@ -1061,7 +1060,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, return PTR_ERR(file); if (flags & TEST_RESET) - igt_global_reset_lock(i915); + igt_global_reset_lock(&i915->gt); mutex_lock(&i915->drm.struct_mutex); @@ -1078,12 +1077,10 @@ __igt_ctx_sseu(struct drm_i915_private *i915, goto out_unlock; } - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - ce = i915_gem_context_get_engine(ctx, RCS0); if (IS_ERR(ce)) { ret = PTR_ERR(ce); - goto out_rpm; + goto out_put; } ret = intel_context_pin(ce); @@ -1091,7 +1088,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, goto out_context; /* First set the default mask. */ - ret = __sseu_test(i915, name, flags, ce, obj, default_sseu); + ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu); if (ret) goto out_fail; @@ -1101,7 +1098,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, goto out_fail; /* Back to defaults. */ - ret = __sseu_test(i915, name, flags, ce, obj, default_sseu); + ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu); if (ret) goto out_fail; @@ -1117,15 +1114,14 @@ out_fail: intel_context_unpin(ce); out_context: intel_context_put(ce); -out_rpm: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); +out_put: i915_gem_object_put(obj); out_unlock: mutex_unlock(&i915->drm.struct_mutex); if (flags & TEST_RESET) - igt_global_reset_unlock(i915); + igt_global_reset_unlock(&i915->gt); mock_file_free(i915, file); @@ -1194,7 +1190,7 @@ static int igt_ctx_readonly(void *arg) goto out_unlock; } - vm = ctx->vm ?: &i915->mm.aliasing_ppgtt->vm; + vm = ctx->vm ?: &i915->ggtt.alias->vm; if (!vm || !vm->has_read_only) { err = 0; goto out_unlock; @@ -1207,8 +1203,6 @@ static int igt_ctx_readonly(void *arg) unsigned int id; for_each_engine(engine, i915, id) { - intel_wakeref_t wakeref; - if (!intel_engine_can_store_dword(engine)) continue; @@ -1223,9 +1217,7 @@ static int igt_ctx_readonly(void *arg) i915_gem_object_set_readonly(obj); } - err = 0; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - err = gpu_fill(obj, ctx, engine, dw); + err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -1488,7 +1480,6 @@ static int igt_vm_isolation(void *arg) struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_a, *ctx_b; struct intel_engine_cs *engine; - intel_wakeref_t wakeref; struct igt_live_test t; struct drm_file *file; I915_RND_STATE(prng); @@ -1535,8 +1526,6 @@ static int igt_vm_isolation(void *arg) GEM_BUG_ON(ctx_b->vm->total != vm_total); vm_total -= I915_GTT_PAGE_SIZE; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - count = 0; for_each_engine(engine, i915, id) { IGT_TIMEOUT(end_time); @@ -1551,7 +1540,7 @@ static int igt_vm_isolation(void *arg) div64_u64_rem(i915_prandom_u64_state(&prng), vm_total, &offset); - offset &= -sizeof(u32); + offset = round_down(offset, alignof_dword); offset += I915_GTT_PAGE_SIZE; err = write_to_scratch(ctx_a, engine, @@ -1560,7 +1549,7 @@ static int igt_vm_isolation(void *arg) err = read_from_scratch(ctx_b, engine, offset, &value); if (err) - goto out_rpm; + goto out_unlock; if (value) { pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n", @@ -1569,7 +1558,7 @@ static int igt_vm_isolation(void *arg) lower_32_bits(offset), this); err = -EINVAL; - goto out_rpm; + goto out_unlock; } this++; @@ -1579,8 +1568,6 @@ static int igt_vm_isolation(void *arg) pr_info("Checked %lu scratch offsets across %d engines\n", count, RUNTIME_INFO(i915)->num_engines); -out_rpm: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); out_unlock: if (igt_live_test_end(&t)) err = -EIO; @@ -1736,7 +1723,7 @@ int i915_gem_context_mock_selftests(void) return err; } -int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) +int i915_gem_context_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_nop_switch), @@ -1747,8 +1734,8 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) SUBTEST(igt_vm_isolation), }; - if (i915_terminally_wedged(dev_priv)) + if (intel_gt_is_wedged(&i915->gt)) return 0; - return i915_subtests(tests, dev_priv); + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 5c81f4b4813a..01857c12f12f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -6,6 +6,7 @@ #include <linux/prime_numbers.h> +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "huge_gem_object.h" #include "i915_selftest.h" @@ -143,7 +144,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, if (offset >= obj->base.size) continue; - i915_gem_flush_ggtt_writes(to_i915(obj->base.dev)); + intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt); p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); cpu = kmap(p) + offset_in_page(offset); @@ -327,7 +328,8 @@ out: static int make_obj_busy(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_request *rq; + struct intel_engine_cs *engine; + enum intel_engine_id id; struct i915_vma *vma; int err; @@ -339,17 +341,21 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) if (err) return err; - rq = i915_request_create(i915->engine[RCS0]->kernel_context); - if (IS_ERR(rq)) { - i915_vma_unpin(vma); - return PTR_ERR(rq); - } + for_each_engine(engine, i915, id) { + struct i915_request *rq; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) { + i915_vma_unpin(vma); + return PTR_ERR(rq); + } - i915_vma_lock(vma); - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_vma_unlock(vma); + i915_vma_lock(vma); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); - i915_request_add(rq); + i915_request_add(rq); + } i915_vma_unpin(vma); i915_gem_object_put(obj); /* leave it only alive via its active ref */ @@ -378,7 +384,7 @@ static void disable_retire_worker(struct drm_i915_private *i915) { i915_gem_shrinker_unregister(i915); - intel_gt_pm_get(i915); + intel_gt_pm_get(&i915->gt); cancel_delayed_work_sync(&i915->gem.retire_work); flush_work(&i915->gem.idle_work); @@ -386,7 +392,7 @@ static void disable_retire_worker(struct drm_i915_private *i915) static void restore_retire_worker(struct drm_i915_private *i915) { - intel_gt_pm_put(i915); + intel_gt_pm_put(&i915->gt); mutex_lock(&i915->drm.struct_mutex); igt_flush_test(i915, I915_WAIT_LOCKED); @@ -395,6 +401,18 @@ static void restore_retire_worker(struct drm_i915_private *i915) i915_gem_shrinker_register(i915); } +static void mmap_offset_lock(struct drm_i915_private *i915) + __acquires(&i915->drm.vma_offset_manager->vm_lock) +{ + write_lock(&i915->drm.vma_offset_manager->vm_lock); +} + +static void mmap_offset_unlock(struct drm_i915_private *i915) + __releases(&i915->drm.vma_offset_manager->vm_lock) +{ + write_unlock(&i915->drm.vma_offset_manager->vm_lock); +} + static int igt_mmap_offset_exhaustion(void *arg) { struct drm_i915_private *i915 = arg; @@ -413,7 +431,9 @@ static int igt_mmap_offset_exhaustion(void *arg) drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { resv.start = hole_start; resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */ + mmap_offset_lock(i915); err = drm_mm_reserve_node(mm, &resv); + mmap_offset_unlock(i915); if (err) { pr_err("Failed to trim VMA manager, err=%d\n", err); goto out_park; @@ -458,7 +478,7 @@ static int igt_mmap_offset_exhaustion(void *arg) /* Now fill with busy dead objects that we expect to reap */ for (loop = 0; loop < 3; loop++) { - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) break; obj = i915_gem_object_create_internal(i915, PAGE_SIZE); @@ -474,19 +494,12 @@ static int igt_mmap_offset_exhaustion(void *arg) pr_err("[loop %d] Failed to busy the object\n", loop); goto err_obj; } - - /* NB we rely on the _active_ reference to access obj now */ - GEM_BUG_ON(!i915_gem_object_is_active(obj)); - err = create_mmap_offset(obj); - if (err) { - pr_err("[loop %d] create_mmap_offset failed with err=%d\n", - loop, err); - goto out; - } } out: + mmap_offset_lock(i915); drm_mm_remove_node(&resv); + mmap_offset_unlock(i915); out_park: restore_retire_worker(i915); return err; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c index e23d8c9e9298..19843acc84d3 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c @@ -3,6 +3,8 @@ * Copyright © 2019 Intel Corporation */ +#include "gt/intel_gt.h" + #include "i915_selftest.h" #include "selftests/igt_flush_test.h" @@ -11,8 +13,8 @@ static int igt_fill_blt(void *arg) { - struct intel_context *ce = arg; - struct drm_i915_private *i915 = ce->gem_context->i915; + struct drm_i915_private *i915 = arg; + struct intel_context *ce = i915->engine[BCS0]->kernel_context; struct drm_i915_gem_object *obj; struct rnd_state prng; IGT_TIMEOUT(end); @@ -83,11 +85,6 @@ err_unpin: err_put: i915_gem_object_put(obj); err_flush: - mutex_lock(&i915->drm.struct_mutex); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - mutex_unlock(&i915->drm.struct_mutex); - if (err == -ENOMEM) err = 0; @@ -100,11 +97,11 @@ int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_fill_blt), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; if (!HAS_ENGINE(i915, BCS0)) return 0; - return i915_subtests(tests, i915->engine[BCS0]->kernel_context); + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile index 1c75b5c9790c..7e73aa587967 100644 --- a/drivers/gpu/drm/i915/gt/Makefile +++ b/drivers/gpu/drm/i915/gt/Makefile @@ -1,2 +1,5 @@ +# For building individual subdir files on the command line +subdir-ccflags-y += -I$(srctree)/$(src)/.. + # Extra header tests -include $(src)/Makefile.header-test +header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/gt/Makefile.header-test b/drivers/gpu/drm/i915/gt/Makefile.header-test deleted file mode 100644 index 61e06cbb4b32..000000000000 --- a/drivers/gpu/drm/i915/gt/Makefile.header-test +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: MIT -# Copyright © 2019 Intel Corporation - -# Test the headers are compilable as standalone units -header_test := $(notdir $(wildcard $(src)/*.h)) - -quiet_cmd_header_test = HDRTEST $@ - cmd_header_test = echo "\#include \"$(<F)\"" > $@ - -header_test_%.c: %.h - $(call cmd,header_test) - -extra-$(CONFIG_DRM_I915_WERROR) += \ - $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h))) - -clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h))) diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/gt/gen6_renderstate.c index 11c8e7b3dd7c..11c8e7b3dd7c 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c +++ b/drivers/gpu/drm/i915/gt/gen6_renderstate.c diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/gt/gen7_renderstate.c index 655180646152..655180646152 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c +++ b/drivers/gpu/drm/i915/gt/gen7_renderstate.c diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/gt/gen8_renderstate.c index 95288a34c15d..95288a34c15d 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c +++ b/drivers/gpu/drm/i915/gt/gen8_renderstate.c diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/gt/gen9_renderstate.c index 7d3ac02f0177..7d3ac02f0177 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen9.c +++ b/drivers/gpu/drm/i915/gt/gen9_renderstate.c diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 2c454f227c2e..f30441a140f8 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -59,6 +59,10 @@ int __intel_context_do_pin(struct intel_context *ce) if (err) goto err; + GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n", + ce->engine->name, ce->ring->timeline->fence_context, + ce->ring->head, ce->ring->tail); + i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */ smp_mb__before_atomic(); /* flush pin before it is visible */ @@ -85,6 +89,9 @@ void intel_context_unpin(struct intel_context *ce) mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING); if (likely(atomic_dec_and_test(&ce->pin_count))) { + GEM_TRACE("%s context:%llx retire\n", + ce->engine->name, ce->ring->timeline->fence_context); + ce->ops->unpin(ce); i915_gem_context_put(ce->gem_context); @@ -95,11 +102,15 @@ void intel_context_unpin(struct intel_context *ce) intel_context_put(ce); } -static int __context_pin_state(struct i915_vma *vma, unsigned long flags) +static int __context_pin_state(struct i915_vma *vma) { + u64 flags; int err; - err = i915_vma_pin(vma, 0, 0, flags | PIN_GLOBAL); + flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS; + flags |= PIN_HIGH | PIN_GLOBAL; + + err = i915_vma_pin(vma, 0, 0, flags); if (err) return err; @@ -119,16 +130,57 @@ static void __context_unpin_state(struct i915_vma *vma) __i915_vma_unpin(vma); } -static void intel_context_retire(struct i915_active *active) +static void __intel_context_retire(struct i915_active *active) { struct intel_context *ce = container_of(active, typeof(*ce), active); + GEM_TRACE("%s context:%llx retire\n", + ce->engine->name, ce->ring->timeline->fence_context); + if (ce->state) __context_unpin_state(ce->state); + intel_ring_unpin(ce->ring); intel_context_put(ce); } +static int __intel_context_active(struct i915_active *active) +{ + struct intel_context *ce = container_of(active, typeof(*ce), active); + int err; + + intel_context_get(ce); + + err = intel_ring_pin(ce->ring); + if (err) + goto err_put; + + if (!ce->state) + return 0; + + err = __context_pin_state(ce->state); + if (err) + goto err_ring; + + /* Preallocate tracking nodes */ + if (!i915_gem_context_is_kernel(ce->gem_context)) { + err = i915_active_acquire_preallocate_barrier(&ce->active, + ce->engine); + if (err) + goto err_state; + } + + return 0; + +err_state: + __context_unpin_state(ce->state); +err_ring: + intel_ring_unpin(ce->ring); +err_put: + intel_context_put(ce); + return err; +} + void intel_context_init(struct intel_context *ce, struct i915_gem_context *ctx, @@ -139,6 +191,8 @@ intel_context_init(struct intel_context *ce, kref_init(&ce->ref); ce->gem_context = ctx; + ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm); + ce->engine = engine; ce->ops = engine->cops; ce->sseu = engine->sseu; @@ -148,46 +202,16 @@ intel_context_init(struct intel_context *ce, mutex_init(&ce->pin_mutex); - i915_active_init(ctx->i915, &ce->active, intel_context_retire); + i915_active_init(ctx->i915, &ce->active, + __intel_context_active, __intel_context_retire); } -int intel_context_active_acquire(struct intel_context *ce, unsigned long flags) +void intel_context_fini(struct intel_context *ce) { - int err; - - if (!i915_active_acquire(&ce->active)) - return 0; + i915_vm_put(ce->vm); - intel_context_get(ce); - - if (!ce->state) - return 0; - - err = __context_pin_state(ce->state, flags); - if (err) { - i915_active_cancel(&ce->active); - intel_context_put(ce); - return err; - } - - /* Preallocate tracking nodes */ - if (!i915_gem_context_is_kernel(ce->gem_context)) { - err = i915_active_acquire_preallocate_barrier(&ce->active, - ce->engine); - if (err) { - i915_active_release(&ce->active); - return err; - } - } - - return 0; -} - -void intel_context_active_release(struct intel_context *ce) -{ - /* Nodes preallocated in intel_context_active() */ - i915_active_acquire_barrier(&ce->active); - i915_active_release(&ce->active); + mutex_destroy(&ce->pin_mutex); + i915_active_fini(&ce->active); } static void i915_global_context_shrink(void) @@ -225,6 +249,44 @@ void intel_context_exit_engine(struct intel_context *ce) intel_engine_pm_put(ce->engine); } +int intel_context_prepare_remote_request(struct intel_context *ce, + struct i915_request *rq) +{ + struct intel_timeline *tl = ce->ring->timeline; + int err; + + /* Only suitable for use in remotely modifying this context */ + GEM_BUG_ON(rq->hw_context == ce); + + if (rq->timeline != tl) { /* beware timeline sharing */ + err = mutex_lock_interruptible_nested(&tl->mutex, + SINGLE_DEPTH_NESTING); + if (err) + return err; + + /* Queue this switch after current activity by this context. */ + err = i915_active_request_set(&tl->last_request, rq); + if (err) + goto unlock; + } + lockdep_assert_held(&tl->mutex); + + /* + * Guarantee context image and the timeline remains pinned until the + * modifying request is retired by setting the ce activity tracker. + * + * But we only need to take one pin on the account of it. Or in other + * words transfer the pinned ce object to tracked active request. + */ + GEM_BUG_ON(i915_active_is_idle(&ce->active)); + err = i915_active_ref(&ce->active, rq->fence.context, rq); + +unlock: + if (rq->timeline != tl) + mutex_unlock(&tl->mutex); + return err; +} + struct i915_request *intel_context_create_request(struct intel_context *ce) { struct i915_request *rq; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index a47275bc4f01..23c7e4c0ce7c 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -9,12 +9,14 @@ #include <linux/lockdep.h> +#include "i915_active.h" #include "intel_context_types.h" #include "intel_engine_types.h" void intel_context_init(struct intel_context *ce, struct i915_gem_context *ctx, struct intel_engine_cs *engine); +void intel_context_fini(struct intel_context *ce); struct intel_context * intel_context_create(struct i915_gem_context *ctx, @@ -102,8 +104,17 @@ static inline void intel_context_exit(struct intel_context *ce) ce->ops->exit(ce); } -int intel_context_active_acquire(struct intel_context *ce, unsigned long flags); -void intel_context_active_release(struct intel_context *ce); +static inline int intel_context_active_acquire(struct intel_context *ce) +{ + return i915_active_acquire(&ce->active); +} + +static inline void intel_context_active_release(struct intel_context *ce) +{ + /* Nodes preallocated in intel_context_active() */ + i915_active_acquire_barrier(&ce->active); + i915_active_release(&ce->active); +} static inline struct intel_context *intel_context_get(struct intel_context *ce) { @@ -129,6 +140,9 @@ static inline void intel_context_timeline_unlock(struct intel_context *ce) mutex_unlock(&ce->ring->timeline->mutex); } +int intel_context_prepare_remote_request(struct intel_context *ce, + struct i915_request *rq); + struct i915_request *intel_context_create_request(struct intel_context *ce); #endif /* __INTEL_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 08049ee91cee..68a7e979b1a9 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -13,6 +13,7 @@ #include <linux/types.h> #include "i915_active_types.h" +#include "i915_utils.h" #include "intel_engine_types.h" #include "intel_sseu.h" @@ -35,9 +36,15 @@ struct intel_context_ops { struct intel_context { struct kref ref; - struct i915_gem_context *gem_context; struct intel_engine_cs *engine; struct intel_engine_cs *inflight; +#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2) +#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2) +#define intel_context_inflight_inc(ce) ptr_count_inc(&(ce)->inflight) +#define intel_context_inflight_dec(ce) ptr_count_dec(&(ce)->inflight) + + struct i915_address_space *vm; + struct i915_gem_context *gem_context; struct list_head signal_link; struct list_head signals; diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 2f1c6871ee95..db5c73ce86ee 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -14,7 +14,7 @@ #include "i915_reg.h" #include "i915_request.h" #include "i915_selftest.h" -#include "i915_timeline.h" +#include "gt/intel_timeline.h" #include "intel_engine_types.h" #include "intel_gpu_commands.h" #include "intel_workarounds.h" @@ -51,7 +51,7 @@ struct drm_printer; #define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__) #define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__) #define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__) -#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__) +#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read_fw, __VA_ARGS__) #define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__) #define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \ @@ -125,71 +125,26 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) void intel_engines_set_scheduler_caps(struct drm_i915_private *i915); -static inline void -execlists_set_active(struct intel_engine_execlists *execlists, - unsigned int bit) -{ - __set_bit(bit, (unsigned long *)&execlists->active); -} - -static inline bool -execlists_set_active_once(struct intel_engine_execlists *execlists, - unsigned int bit) -{ - return !__test_and_set_bit(bit, (unsigned long *)&execlists->active); -} - -static inline void -execlists_clear_active(struct intel_engine_execlists *execlists, - unsigned int bit) -{ - __clear_bit(bit, (unsigned long *)&execlists->active); -} - -static inline void -execlists_clear_all_active(struct intel_engine_execlists *execlists) +static inline unsigned int +execlists_num_ports(const struct intel_engine_execlists * const execlists) { - execlists->active = 0; + return execlists->port_mask + 1; } -static inline bool -execlists_is_active(const struct intel_engine_execlists *execlists, - unsigned int bit) +static inline struct i915_request * +execlists_active(const struct intel_engine_execlists *execlists) { - return test_bit(bit, (unsigned long *)&execlists->active); + GEM_BUG_ON(execlists->active - execlists->inflight > + execlists_num_ports(execlists)); + return READ_ONCE(*execlists->active); } -void execlists_user_begin(struct intel_engine_execlists *execlists, - const struct execlist_port *port); -void execlists_user_end(struct intel_engine_execlists *execlists); - void execlists_cancel_port_requests(struct intel_engine_execlists * const execlists); struct i915_request * execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists); -static inline unsigned int -execlists_num_ports(const struct intel_engine_execlists * const execlists) -{ - return execlists->port_mask + 1; -} - -static inline struct execlist_port * -execlists_port_complete(struct intel_engine_execlists * const execlists, - struct execlist_port * const port) -{ - const unsigned int m = execlists->port_mask; - - GEM_BUG_ON(port_index(port, execlists) != 0); - GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); - - memmove(port, port + 1, m * sizeof(struct execlist_port)); - memset(port + m, 0, sizeof(struct execlist_port)); - - return port; -} - static inline u32 intel_read_status_page(const struct intel_engine_cs *engine, int reg) { @@ -245,7 +200,7 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) struct intel_ring * intel_engine_create_ring(struct intel_engine_cs *engine, - struct i915_timeline *timeline, + struct intel_timeline *timeline, int size); int intel_ring_pin(struct intel_ring *ring); void intel_ring_reset(struct intel_ring *ring, u32 tail); @@ -456,8 +411,8 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags) return cs; } -static inline void intel_engine_reset(struct intel_engine_cs *engine, - bool stalled) +static inline void __intel_engine_reset(struct intel_engine_cs *engine, + bool stalled) { if (engine->reset.reset) engine->reset.reset(engine, stalled); @@ -465,9 +420,9 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine, } bool intel_engine_is_idle(struct intel_engine_cs *engine); -bool intel_engines_are_idle(struct drm_i915_private *dev_priv); +bool intel_engines_are_idle(struct intel_gt *gt); -void intel_engines_reset_default_submission(struct drm_i915_private *i915); +void intel_engines_reset_default_submission(struct intel_gt *gt); unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915); bool intel_engine_can_store_dword(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 7fd33e81c2d9..65cbf1d9118d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -28,6 +28,8 @@ #include "i915_drv.h" +#include "gt/intel_gt.h" + #include "intel_engine.h" #include "intel_engine_pm.h" #include "intel_context.h" @@ -314,6 +316,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv, engine->id = id; engine->mask = BIT(id); engine->i915 = dev_priv; + engine->gt = &dev_priv->gt; engine->uncore = &dev_priv->uncore; __sprint_engine_name(engine->name, info); engine->hw_id = engine->guc_id = info->hw_id; @@ -423,7 +426,7 @@ int intel_engines_init_mmio(struct drm_i915_private *i915) WARN_ON(engine_mask & GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -ENODEV; for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { @@ -445,15 +448,9 @@ int intel_engines_init_mmio(struct drm_i915_private *i915) if (WARN_ON(mask != engine_mask)) device_info->engine_mask = mask; - /* We always presume we have at least RCS available for later probing */ - if (WARN_ON(!HAS_ENGINE(i915, RCS0))) { - err = -ENODEV; - goto cleanup; - } - RUNTIME_INFO(i915)->num_engines = hweight32(mask); - i915_check_and_clear_faults(i915); + intel_gt_check_and_clear_faults(&i915->gt); intel_setup_engine_capabilities(i915); @@ -508,6 +505,10 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine) GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists))); GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); + memset(execlists->pending, 0, sizeof(execlists->pending)); + execlists->active = + memset(execlists->inflight, 0, sizeof(execlists->inflight)); + execlists->queue_priority_hint = INT_MIN; execlists->queue = RB_ROOT_CACHED; } @@ -577,7 +578,7 @@ static int init_status_page(struct intel_engine_cs *engine) i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); - vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err; @@ -629,6 +630,10 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) engine->sseu = intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu); + intel_engine_init_workarounds(engine); + intel_engine_init_whitelist(engine); + intel_engine_init_ctx_wa(engine); + return 0; } @@ -681,9 +686,10 @@ void intel_engines_set_scheduler_caps(struct drm_i915_private *i915) u8 engine; u8 sched; } map[] = { -#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) } - MAP(PREEMPTION, PREEMPTION), - MAP(SEMAPHORES, SEMAPHORES), +#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) } + MAP(HAS_PREEMPTION, PREEMPTION), + MAP(HAS_SEMAPHORES, SEMAPHORES), + MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS), #undef MAP }; struct intel_engine_cs *engine; @@ -717,7 +723,7 @@ void intel_engines_set_scheduler_caps(struct drm_i915_private *i915) struct measure_breadcrumb { struct i915_request rq; - struct i915_timeline timeline; + struct intel_timeline timeline; struct intel_ring ring; u32 cs[1024]; }; @@ -727,15 +733,15 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) struct measure_breadcrumb *frame; int dw = -ENOMEM; - GEM_BUG_ON(!engine->i915->gt.scratch); + GEM_BUG_ON(!engine->gt->scratch); frame = kzalloc(sizeof(*frame), GFP_KERNEL); if (!frame) return -ENOMEM; - if (i915_timeline_init(engine->i915, - &frame->timeline, - engine->status_page.vma)) + if (intel_timeline_init(&frame->timeline, + engine->gt, + engine->status_page.vma)) goto out_frame; INIT_LIST_HEAD(&frame->ring.request_list); @@ -750,17 +756,17 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) frame->rq.ring = &frame->ring; frame->rq.timeline = &frame->timeline; - dw = i915_timeline_pin(&frame->timeline); + dw = intel_timeline_pin(&frame->timeline); if (dw < 0) goto out_timeline; dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs; GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */ - i915_timeline_unpin(&frame->timeline); + intel_timeline_unpin(&frame->timeline); out_timeline: - i915_timeline_fini(&frame->timeline); + intel_timeline_fini(&frame->timeline); out_frame: kfree(frame); return dw; @@ -823,6 +829,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine) struct drm_i915_private *i915 = engine->i915; int ret; + engine->set_default_submission(engine); + /* We may need to do things with the shrinker which * require us to immediately switch back to the default * context. This can cause a problem as pinning the @@ -835,28 +843,15 @@ int intel_engine_init_common(struct intel_engine_cs *engine) if (ret) return ret; - /* - * Similarly the preempt context must always be available so that - * we can interrupt the engine at any time. However, as preemption - * is optional, we allow it to fail. - */ - if (i915->preempt_context) - pin_context(i915->preempt_context, engine, - &engine->preempt_context); - ret = measure_breadcrumb_dw(engine); if (ret < 0) goto err_unpin; engine->emit_fini_breadcrumb_dw = ret; - engine->set_default_submission(engine); - return 0; err_unpin: - if (engine->preempt_context) - intel_context_unpin(engine->preempt_context); intel_context_unpin(engine->kernel_context); return ret; } @@ -881,8 +876,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) if (engine->default_state) i915_gem_object_put(engine->default_state); - if (engine->preempt_context) - intel_context_unpin(engine->preempt_context); intel_context_unpin(engine->kernel_context); GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); @@ -966,52 +959,23 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) } } -u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) -{ - const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; - u32 mcr_s_ss_select; - u32 slice = fls(sseu->slice_mask); - u32 subslice = fls(sseu->subslice_mask[slice]); - - if (IS_GEN(dev_priv, 10)) - mcr_s_ss_select = GEN8_MCR_SLICE(slice) | - GEN8_MCR_SUBSLICE(subslice); - else if (INTEL_GEN(dev_priv) >= 11) - mcr_s_ss_select = GEN11_MCR_SLICE(slice) | - GEN11_MCR_SUBSLICE(subslice); - else - mcr_s_ss_select = 0; - - return mcr_s_ss_select; -} - static u32 read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice, i915_reg_t reg) { struct drm_i915_private *i915 = engine->i915; struct intel_uncore *uncore = engine->uncore; - u32 mcr_slice_subslice_mask; - u32 mcr_slice_subslice_select; - u32 default_mcr_s_ss_select; - u32 mcr; - u32 ret; + u32 mcr_mask, mcr_ss, mcr, old_mcr, val; enum forcewake_domains fw_domains; if (INTEL_GEN(i915) >= 11) { - mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | - GEN11_MCR_SUBSLICE_MASK; - mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) | - GEN11_MCR_SUBSLICE(subslice); + mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK; + mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice); } else { - mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK | - GEN8_MCR_SUBSLICE_MASK; - mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) | - GEN8_MCR_SUBSLICE(subslice); + mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK; + mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); } - default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(i915); - fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); fw_domains |= intel_uncore_forcewake_for_reg(uncore, @@ -1021,26 +985,23 @@ read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice, spin_lock_irq(&uncore->lock); intel_uncore_forcewake_get__locked(uncore, fw_domains); - mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR); - - WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) != - default_mcr_s_ss_select); + old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR); - mcr &= ~mcr_slice_subslice_mask; - mcr |= mcr_slice_subslice_select; + mcr &= ~mcr_mask; + mcr |= mcr_ss; intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr); - ret = intel_uncore_read_fw(uncore, reg); + val = intel_uncore_read_fw(uncore, reg); - mcr &= ~mcr_slice_subslice_mask; - mcr |= default_mcr_s_ss_select; + mcr &= ~mcr_mask; + mcr |= old_mcr & mcr_mask; intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr); intel_uncore_forcewake_put__locked(uncore, fw_domains); spin_unlock_irq(&uncore->lock); - return ret; + return val; } /* NB: please notice the memset */ @@ -1145,17 +1106,17 @@ static bool ring_is_idle(struct intel_engine_cs *engine) bool intel_engine_is_idle(struct intel_engine_cs *engine) { /* More white lies, if wedged, hw state is inconsistent */ - if (i915_reset_failed(engine->i915)) + if (intel_gt_is_wedged(engine->gt)) return true; - if (!intel_wakeref_active(&engine->wakeref)) + if (!intel_engine_pm_is_awake(engine)) return true; /* Waiting to drain ELSP? */ - if (READ_ONCE(engine->execlists.active)) { + if (execlists_active(&engine->execlists)) { struct tasklet_struct *t = &engine->execlists.tasklet; - synchronize_hardirq(engine->i915->drm.irq); + synchronize_hardirq(engine->i915->drm.pdev->irq); local_bh_disable(); if (tasklet_trylock(t)) { @@ -1169,7 +1130,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) /* Otherwise flush the tasklet if it was on another cpu */ tasklet_unlock_wait(t); - if (READ_ONCE(engine->execlists.active)) + if (execlists_active(&engine->execlists)) return false; } @@ -1181,7 +1142,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) return ring_is_idle(engine); } -bool intel_engines_are_idle(struct drm_i915_private *i915) +bool intel_engines_are_idle(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -1190,14 +1151,14 @@ bool intel_engines_are_idle(struct drm_i915_private *i915) * If the driver is wedged, HW state may be very inconsistent and * report that it is still busy, even though we have stopped using it. */ - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) return true; /* Already parked (and passed an idleness test); must still be idle */ - if (!READ_ONCE(i915->gt.awake)) + if (!READ_ONCE(gt->awake)) return true; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { if (!intel_engine_is_idle(engine)) return false; } @@ -1205,12 +1166,12 @@ bool intel_engines_are_idle(struct drm_i915_private *i915) return true; } -void intel_engines_reset_default_submission(struct drm_i915_private *i915) +void intel_engines_reset_default_submission(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) + for_each_engine(engine, gt->i915, id) engine->set_default_submission(engine); } @@ -1367,6 +1328,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, } if (HAS_EXECLISTS(dev_priv)) { + struct i915_request * const *port, *rq; const u32 *hws = &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; const u8 num_entries = execlists->csb_size; @@ -1399,27 +1361,33 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, } spin_lock_irqsave(&engine->active.lock, flags); - for (idx = 0; idx < execlists_num_ports(execlists); idx++) { - struct i915_request *rq; - unsigned int count; + for (port = execlists->active; (rq = *port); port++) { + char hdr[80]; + int len; + + len = snprintf(hdr, sizeof(hdr), + "\t\tActive[%d: ", + (int)(port - execlists->active)); + if (!i915_request_signaled(rq)) + len += snprintf(hdr + len, sizeof(hdr) - len, + "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ", + i915_ggtt_offset(rq->ring->vma), + rq->timeline->hwsp_offset, + hwsp_seqno(rq)); + snprintf(hdr + len, sizeof(hdr) - len, "rq: "); + print_request(m, rq, hdr); + } + for (port = execlists->pending; (rq = *port); port++) { char hdr[80]; - rq = port_unpack(&execlists->port[idx], &count); - if (!rq) { - drm_printf(m, "\t\tELSP[%d] idle\n", idx); - } else if (!i915_request_signaled(rq)) { - snprintf(hdr, sizeof(hdr), - "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ", - idx, count, - i915_ggtt_offset(rq->ring->vma), - rq->timeline->hwsp_offset, - hwsp_seqno(rq)); - print_request(m, rq, hdr); - } else { - print_request(m, rq, "\t\tELSP[%d] rq: "); - } + snprintf(hdr, sizeof(hdr), + "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ", + (int)(port - execlists->pending), + i915_ggtt_offset(rq->ring->vma), + rq->timeline->hwsp_offset, + hwsp_seqno(rq)); + print_request(m, rq, hdr); } - drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active); spin_unlock_irqrestore(&engine->active.lock, flags); } else if (INTEL_GEN(dev_priv) > 6) { drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", @@ -1471,6 +1439,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct i915_gpu_error * const error = &engine->i915->gpu_error; struct i915_request *rq; intel_wakeref_t wakeref; + unsigned long flags; if (header) { va_list ap; @@ -1480,7 +1449,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, va_end(ap); } - if (i915_reset_failed(engine->i915)) + if (intel_gt_is_wedged(engine->gt)) drm_printf(m, "*** WEDGED ***\n"); drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count)); @@ -1490,10 +1459,9 @@ void intel_engine_dump(struct intel_engine_cs *engine, i915_reset_engine_count(error, engine), i915_reset_count(error)); - rcu_read_lock(); - drm_printf(m, "\tRequests:\n"); + spin_lock_irqsave(&engine->active.lock, flags); rq = intel_engine_find_active_request(engine); if (rq) { print_request(m, rq, "\t\tactive "); @@ -1513,8 +1481,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, print_request_ring(m, rq); } - - rcu_read_unlock(); + spin_unlock_irqrestore(&engine->active.lock, flags); wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm); if (wakeref) { @@ -1583,15 +1550,19 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) } if (engine->stats.enabled++ == 0) { - const struct execlist_port *port = execlists->port; - unsigned int num_ports = execlists_num_ports(execlists); + struct i915_request * const *port; + struct i915_request *rq; engine->stats.enabled_at = ktime_get(); /* XXX submission method oblivious? */ - while (num_ports-- && port_isset(port)) { + for (port = execlists->active; (rq = *port); port++) engine->stats.active++; - port++; + + for (port = execlists->pending; (rq = *port); port++) { + /* Exclude any contexts already counted in active */ + if (intel_context_inflight_count(rq->hw_context) == 1) + engine->stats.active++; } if (engine->stats.active) @@ -1672,7 +1643,6 @@ struct i915_request * intel_engine_find_active_request(struct intel_engine_cs *engine) { struct i915_request *request, *active = NULL; - unsigned long flags; /* * We are called by the error capture, reset and to dump engine @@ -1685,7 +1655,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) * At all other times, we must assume the GPU is still running, but * we only care about the snapshot of this moment. */ - spin_lock_irqsave(&engine->active.lock, flags); + lockdep_assert_held(&engine->active.lock); list_for_each_entry(request, &engine->active.requests, sched.link) { if (i915_request_completed(request)) continue; @@ -1700,7 +1670,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) active = request; break; } - spin_unlock_irqrestore(&engine->active.lock, flags); return active; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 2ce00d3dc42a..e74fbf04a68d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -8,6 +8,7 @@ #include "intel_engine.h" #include "intel_engine_pm.h" +#include "intel_gt.h" #include "intel_gt_pm.h" static int __engine_unpark(struct intel_wakeref *wf) @@ -18,7 +19,7 @@ static int __engine_unpark(struct intel_wakeref *wf) GEM_TRACE("%s\n", engine->name); - intel_gt_pm_get(engine->i915); + intel_gt_pm_get(engine->gt); /* Pin the default state for fast resets from atomic context. */ map = NULL; @@ -66,7 +67,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) return true; /* GPU is pointing to the void, as good as in the kernel context. */ - if (i915_reset_failed(engine->i915)) + if (intel_gt_is_wedged(engine->gt)) return true; /* @@ -129,7 +130,7 @@ static int __engine_park(struct intel_wakeref *wf) engine->execlists.no_priolist = false; - intel_gt_pm_put(engine->i915); + intel_gt_pm_put(engine->gt); return 0; } @@ -142,27 +143,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine) { intel_wakeref_init(&engine->wakeref); } - -int intel_engines_resume(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err = 0; - - intel_gt_pm_get(i915); - for_each_engine(engine, i915, id) { - intel_engine_pm_get(engine); - engine->serial++; /* kernel context lost */ - err = engine->resume(engine); - intel_engine_pm_put(engine); - if (err) { - dev_err(i915->drm.dev, - "Failed to restart %s (%d)\n", - engine->name, err); - break; - } - } - intel_gt_pm_put(i915); - - return err; -} diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index b326cd993d60..015ac72d7ad0 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -7,16 +7,28 @@ #ifndef INTEL_ENGINE_PM_H #define INTEL_ENGINE_PM_H +#include "intel_engine_types.h" +#include "intel_wakeref.h" + struct drm_i915_private; -struct intel_engine_cs; void intel_engine_pm_get(struct intel_engine_cs *engine); void intel_engine_pm_put(struct intel_engine_cs *engine); +static inline bool +intel_engine_pm_is_awake(const struct intel_engine_cs *engine) +{ + return intel_wakeref_is_active(&engine->wakeref); +} + +static inline bool +intel_engine_pm_get_if_awake(struct intel_engine_cs *engine) +{ + return intel_wakeref_get_if_active(&engine->wakeref); +} + void intel_engine_park(struct intel_engine_cs *engine); void intel_engine_init__pm(struct intel_engine_cs *engine); -int intel_engines_resume(struct drm_i915_private *i915); - #endif /* INTEL_ENGINE_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 868b220214f8..da61dd329210 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -12,6 +12,7 @@ #include <linux/kref.h> #include <linux/list.h> #include <linux/llist.h> +#include <linux/timer.h> #include <linux/types.h> #include "i915_gem.h" @@ -19,7 +20,7 @@ #include "i915_pmu.h" #include "i915_priolist_types.h" #include "i915_selftest.h" -#include "i915_timeline_types.h" +#include "gt/intel_timeline_types.h" #include "intel_sseu.h" #include "intel_wakeref.h" #include "intel_workarounds_types.h" @@ -35,6 +36,7 @@ struct drm_i915_reg_table; struct i915_gem_context; struct i915_request; struct i915_sched_attr; +struct intel_gt; struct intel_uncore; typedef u8 intel_engine_mask_t; @@ -66,10 +68,22 @@ struct intel_ring { struct i915_vma *vma; void *vaddr; - struct i915_timeline *timeline; + struct intel_timeline *timeline; struct list_head request_list; struct list_head active_link; + /* + * As we have two types of rings, one global to the engine used + * by ringbuffer submission and those that are exclusive to a + * context used by execlists, we have to play safe and allow + * atomic updates to the pin_count. However, the actual pinning + * of the context is either done during initialisation for + * ringbuffer submission or serialised as part of the context + * pinning for execlists, and so we do not need a mutex ourselves + * to serialise intel_ring_pin/intel_ring_unpin. + */ + atomic_t pin_count; + u32 head; u32 tail; u32 emit; @@ -138,6 +152,11 @@ struct intel_engine_execlists { struct tasklet_struct tasklet; /** + * @timer: kick the current context if its timeslice expires + */ + struct timer_list timer; + + /** * @default_priolist: priority list for I915_PRIORITY_NORMAL */ struct i915_priolist default_priolist; @@ -160,51 +179,28 @@ struct intel_engine_execlists { */ u32 __iomem *ctrl_reg; +#define EXECLIST_MAX_PORTS 2 /** - * @port: execlist port states + * @active: the currently known context executing on HW + */ + struct i915_request * const *active; + /** + * @inflight: the set of contexts submitted and acknowleged by HW * - * For each hardware ELSP (ExecList Submission Port) we keep - * track of the last request and the number of times we submitted - * that port to hw. We then count the number of times the hw reports - * a context completion or preemption. As only one context can - * be active on hw, we limit resubmission of context to port[0]. This - * is called Lite Restore, of the context. + * The set of inflight contexts is managed by reading CS events + * from the HW. On a context-switch event (not preemption), we + * know the HW has transitioned from port0 to port1, and we + * advance our inflight/active tracking accordingly. */ - struct execlist_port { - /** - * @request_count: combined request and submission count - */ - struct i915_request *request_count; -#define EXECLIST_COUNT_BITS 2 -#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) -#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) -#define port_set(p, packed) ((p)->request_count = (packed)) -#define port_isset(p) ((p)->request_count) -#define port_index(p, execlists) ((p) - (execlists)->port) - - /** - * @context_id: context ID for port - */ - GEM_DEBUG_DECL(u32 context_id); - -#define EXECLIST_MAX_PORTS 2 - } port[EXECLIST_MAX_PORTS]; - + struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */]; /** - * @active: is the HW active? We consider the HW as active after - * submitting any context for execution and until we have seen the - * last context completion event. After that, we do not expect any - * more events until we submit, and so can park the HW. + * @pending: the next set of contexts submitted to ELSP * - * As we have a small number of different sources from which we feed - * the HW, we track the state of each inside a single bitfield. + * We store the array of contexts that we submit to HW (via ELSP) and + * promote them to the inflight array once HW has signaled the + * preemption or idle-to-active event. */ - unsigned int active; -#define EXECLISTS_ACTIVE_USER 0 -#define EXECLISTS_ACTIVE_PREEMPT 1 -#define EXECLISTS_ACTIVE_HWACK 2 + struct i915_request *pending[EXECLIST_MAX_PORTS + 1]; /** * @port_mask: number of execlist ports - 1 @@ -246,11 +242,6 @@ struct intel_engine_execlists { u32 *csb_status; /** - * @preempt_complete_status: expected CSB upon completing preemption - */ - u32 preempt_complete_status; - - /** * @csb_size: context status buffer FIFO size */ u8 csb_size; @@ -267,6 +258,7 @@ struct intel_engine_execlists { struct intel_engine_cs { struct drm_i915_private *i915; + struct intel_gt *gt; struct intel_uncore *uncore; char name[INTEL_ENGINE_CS_MAX_NAME]; @@ -296,7 +288,6 @@ struct intel_engine_cs { struct llist_head barrier_tasks; struct intel_context *kernel_context; /* pinned */ - struct intel_context *preempt_context; /* pinned; optional */ intel_engine_mask_t saturated; /* submitting semaphores too late? */ @@ -392,7 +383,6 @@ struct intel_engine_cs { const struct intel_context_ops *cops; int (*request_alloc)(struct i915_request *rq); - int (*init_context)(struct i915_request *rq); int (*emit_flush)(struct i915_request *request, u32 mode); #define EMIT_INVALIDATE BIT(0) diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index eec31e36aca7..69f34737325f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -8,6 +8,13 @@ #define _INTEL_GPU_COMMANDS_H_ /* + * Target address alignments required for GPU access e.g. + * MI_STORE_DWORD_IMM. + */ +#define alignof_dword 4 +#define alignof_qword 8 + +/* * Instruction field definitions used by the command parser */ #define INSTR_CLIENT_SHIFT 29 diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c new file mode 100644 index 000000000000..f7e69db4019d --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" + +#include "intel_gt.h" +#include "intel_gt_pm.h" +#include "intel_uncore.h" + +void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) +{ + gt->i915 = i915; + gt->uncore = &i915->uncore; + + INIT_LIST_HEAD(>->active_rings); + INIT_LIST_HEAD(>->closed_vma); + + spin_lock_init(>->closed_lock); + + intel_gt_init_hangcheck(gt); + intel_gt_init_reset(gt); + intel_gt_pm_init_early(gt); +} + +void intel_gt_init_hw(struct drm_i915_private *i915) +{ + i915->gt.ggtt = &i915->ggtt; +} + +static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set) +{ + intel_uncore_rmw(uncore, reg, 0, set); +} + +static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr) +{ + intel_uncore_rmw(uncore, reg, clr, 0); +} + +static void clear_register(struct intel_uncore *uncore, i915_reg_t reg) +{ + intel_uncore_rmw(uncore, reg, 0, 0); +} + +static void gen8_clear_engine_error_register(struct intel_engine_cs *engine) +{ + GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0); + GEN6_RING_FAULT_REG_POSTING_READ(engine); +} + +void +intel_gt_clear_error_registers(struct intel_gt *gt, + intel_engine_mask_t engine_mask) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + u32 eir; + + if (!IS_GEN(i915, 2)) + clear_register(uncore, PGTBL_ER); + + if (INTEL_GEN(i915) < 4) + clear_register(uncore, IPEIR(RENDER_RING_BASE)); + else + clear_register(uncore, IPEIR_I965); + + clear_register(uncore, EIR); + eir = intel_uncore_read(uncore, EIR); + if (eir) { + /* + * some errors might have become stuck, + * mask them. + */ + DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); + rmw_set(uncore, EMR, eir); + intel_uncore_write(uncore, GEN2_IIR, + I915_MASTER_ERROR_INTERRUPT); + } + + if (INTEL_GEN(i915) >= 8) { + rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID); + intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG); + } else if (INTEL_GEN(i915) >= 6) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine_masked(engine, i915, engine_mask, id) + gen8_clear_engine_error_register(engine); + } +} + +static void gen6_check_faults(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 fault; + + for_each_engine(engine, gt->i915, id) { + fault = GEN6_RING_FAULT_REG_READ(engine); + if (fault & RING_FAULT_VALID) { + DRM_DEBUG_DRIVER("Unexpected fault\n" + "\tAddr: 0x%08lx\n" + "\tAddress space: %s\n" + "\tSource ID: %d\n" + "\tType: %d\n", + fault & PAGE_MASK, + fault & RING_FAULT_GTTSEL_MASK ? + "GGTT" : "PPGTT", + RING_FAULT_SRCID(fault), + RING_FAULT_FAULT_TYPE(fault)); + } + } +} + +static void gen8_check_faults(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + u32 fault = intel_uncore_read(uncore, GEN8_RING_FAULT_REG); + + if (fault & RING_FAULT_VALID) { + u32 fault_data0, fault_data1; + u64 fault_addr; + + fault_data0 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA0); + fault_data1 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA1); + fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | + ((u64)fault_data0 << 12); + + DRM_DEBUG_DRIVER("Unexpected fault\n" + "\tAddr: 0x%08x_%08x\n" + "\tAddress space: %s\n" + "\tEngine ID: %d\n" + "\tSource ID: %d\n" + "\tType: %d\n", + upper_32_bits(fault_addr), + lower_32_bits(fault_addr), + fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", + GEN8_RING_FAULT_ENGINE_ID(fault), + RING_FAULT_SRCID(fault), + RING_FAULT_FAULT_TYPE(fault)); + } +} + +void intel_gt_check_and_clear_faults(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + + /* From GEN8 onwards we only have one 'All Engine Fault Register' */ + if (INTEL_GEN(i915) >= 8) + gen8_check_faults(gt); + else if (INTEL_GEN(i915) >= 6) + gen6_check_faults(gt); + else + return; + + intel_gt_clear_error_registers(gt, ALL_ENGINES); +} + +void intel_gt_flush_ggtt_writes(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + intel_wakeref_t wakeref; + + /* + * No actual flushing is required for the GTT write domain for reads + * from the GTT domain. Writes to it "immediately" go to main memory + * as far as we know, so there's no chipset flush. It also doesn't + * land in the GPU render cache. + * + * However, we do have to enforce the order so that all writes through + * the GTT land before any writes to the device, such as updates to + * the GATT itself. + * + * We also have to wait a bit for the writes to land from the GTT. + * An uncached read (i.e. mmio) seems to be ideal for the round-trip + * timing. This issue has only been observed when switching quickly + * between GTT writes and CPU reads from inside the kernel on recent hw, + * and it appears to only affect discrete GTT blocks (i.e. on LLC + * system agents we cannot reproduce this behaviour, until Cannonlake + * that was!). + */ + + wmb(); + + if (INTEL_INFO(i915)->has_coherent_ggtt) + return; + + intel_gt_chipset_flush(gt); + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + struct intel_uncore *uncore = gt->uncore; + + spin_lock_irq(&uncore->lock); + intel_uncore_posting_read_fw(uncore, + RING_HEAD(RENDER_RING_BASE)); + spin_unlock_irq(&uncore->lock); + } +} + +void intel_gt_chipset_flush(struct intel_gt *gt) +{ + wmb(); + if (INTEL_GEN(gt->i915) < 6) + intel_gtt_chipset_flush(); +} + +int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) +{ + struct drm_i915_private *i915 = gt->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int ret; + + obj = i915_gem_object_create_stolen(i915, size); + if (!obj) + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate scratch page\n"); + return PTR_ERR(obj); + } + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_unref; + } + + ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (ret) + goto err_unref; + + gt->scratch = vma; + return 0; + +err_unref: + i915_gem_object_put(obj); + return ret; +} + +void intel_gt_fini_scratch(struct intel_gt *gt) +{ + i915_vma_unpin_and_release(>->scratch, 0); +} + +void intel_gt_cleanup_early(struct intel_gt *gt) +{ + intel_gt_fini_reset(gt); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h new file mode 100644 index 000000000000..640bb0531f5b --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_GT__ +#define __INTEL_GT__ + +#include "intel_engine_types.h" +#include "intel_gt_types.h" +#include "intel_reset.h" + +struct drm_i915_private; + +static inline struct intel_gt *uc_to_gt(struct intel_uc *uc) +{ + return container_of(uc, struct intel_gt, uc); +} + +static inline struct intel_gt *guc_to_gt(struct intel_guc *guc) +{ + return container_of(guc, struct intel_gt, uc.guc); +} + +static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) +{ + return container_of(huc, struct intel_gt, uc.huc); +} + +void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); +void intel_gt_init_hw(struct drm_i915_private *i915); + +void intel_gt_cleanup_early(struct intel_gt *gt); + +void intel_gt_check_and_clear_faults(struct intel_gt *gt); +void intel_gt_clear_error_registers(struct intel_gt *gt, + intel_engine_mask_t engine_mask); + +void intel_gt_flush_ggtt_writes(struct intel_gt *gt); +void intel_gt_chipset_flush(struct intel_gt *gt); + +void intel_gt_init_hangcheck(struct intel_gt *gt); + +int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size); +void intel_gt_fini_scratch(struct intel_gt *gt); + +static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt, + enum intel_gt_scratch_field field) +{ + return i915_ggtt_offset(gt->scratch) + field; +} + +static inline bool intel_gt_is_wedged(struct intel_gt *gt) +{ + return __intel_reset_failed(>->reset); +} + +void intel_gt_queue_hangcheck(struct intel_gt *gt); + +#endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 7b5967751762..65c0d0c9d543 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -5,6 +5,9 @@ */ #include "i915_drv.h" +#include "i915_params.h" +#include "intel_engine_pm.h" +#include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_pm.h" #include "intel_wakeref.h" @@ -16,8 +19,8 @@ static void pm_notify(struct drm_i915_private *i915, int state) static int intel_gt_unpark(struct intel_wakeref *wf) { - struct drm_i915_private *i915 = - container_of(wf, typeof(*i915), gt.wakeref); + struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref); + struct drm_i915_private *i915 = gt->i915; GEM_TRACE("\n"); @@ -32,8 +35,8 @@ static int intel_gt_unpark(struct intel_wakeref *wf) * Work around it by grabbing a GT IRQ power domain whilst there is any * GT activity, preventing any DC state transitions. */ - i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); - GEM_BUG_ON(!i915->gt.awake); + gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); + GEM_BUG_ON(!gt->awake); intel_enable_gt_powersave(i915); @@ -43,16 +46,18 @@ static int intel_gt_unpark(struct intel_wakeref *wf) i915_pmu_gt_unparked(i915); - i915_queue_hangcheck(i915); + intel_gt_queue_hangcheck(gt); pm_notify(i915, INTEL_GT_UNPARK); return 0; } -void intel_gt_pm_get(struct drm_i915_private *i915) +void intel_gt_pm_get(struct intel_gt *gt) { - intel_wakeref_get(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_unpark); + struct intel_runtime_pm *rpm = >->i915->runtime_pm; + + intel_wakeref_get(rpm, >->wakeref, intel_gt_unpark); } static int intel_gt_park(struct intel_wakeref *wf) @@ -75,28 +80,30 @@ static int intel_gt_park(struct intel_wakeref *wf) return 0; } -void intel_gt_pm_put(struct drm_i915_private *i915) +void intel_gt_pm_put(struct intel_gt *gt) { - intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park); + struct intel_runtime_pm *rpm = >->i915->runtime_pm; + + intel_wakeref_put(rpm, >->wakeref, intel_gt_park); } -void intel_gt_pm_init(struct drm_i915_private *i915) +void intel_gt_pm_init_early(struct intel_gt *gt) { - intel_wakeref_init(&i915->gt.wakeref); - BLOCKING_INIT_NOTIFIER_HEAD(&i915->gt.pm_notifications); + intel_wakeref_init(>->wakeref); + BLOCKING_INIT_NOTIFIER_HEAD(>->pm_notifications); } -static bool reset_engines(struct drm_i915_private *i915) +static bool reset_engines(struct intel_gt *gt) { - if (INTEL_INFO(i915)->gpu_reset_clobbers_display) + if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) return false; - return intel_gpu_reset(i915, ALL_ENGINES) == 0; + return __intel_gt_reset(gt, ALL_ENGINES) == 0; } /** * intel_gt_sanitize: called after the GPU has lost power - * @i915: the i915 device + * @gt: the i915 GT container * @force: ignore a failed reset and sanitize engine state anyway * * Anytime we reset the GPU, either with an explicit GPU reset or through a @@ -104,24 +111,27 @@ static bool reset_engines(struct drm_i915_private *i915) * to match. Note that calling intel_gt_sanitize() if the GPU has not * been reset results in much confusion! */ -void intel_gt_sanitize(struct drm_i915_private *i915, bool force) +void intel_gt_sanitize(struct intel_gt *gt, bool force) { struct intel_engine_cs *engine; enum intel_engine_id id; GEM_TRACE("\n"); - if (!reset_engines(i915) && !force) + intel_uc_sanitize(>->uc); + + if (!reset_engines(gt) && !force) return; - for_each_engine(engine, i915, id) - intel_engine_reset(engine, false); + for_each_engine(engine, gt->i915, id) + __intel_engine_reset(engine, false); } -void intel_gt_resume(struct drm_i915_private *i915) +int intel_gt_resume(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; + int err = 0; /* * After resume, we may need to poke into the pinned kernel @@ -129,15 +139,28 @@ void intel_gt_resume(struct drm_i915_private *i915) * Only the kernel contexts should remain pinned over suspend, * allowing us to fixup the user contexts on their first pin. */ - for_each_engine(engine, i915, id) { + intel_gt_pm_get(gt); + for_each_engine(engine, gt->i915, id) { struct intel_context *ce; + intel_engine_pm_get(engine); + ce = engine->kernel_context; if (ce) ce->ops->reset(ce); - ce = engine->preempt_context; - if (ce) - ce->ops->reset(ce); + engine->serial++; /* kernel context lost */ + err = engine->resume(engine); + + intel_engine_pm_put(engine); + if (err) { + dev_err(gt->i915->drm.dev, + "Failed to restart %s (%d)\n", + engine->name, err); + break; + } } + intel_gt_pm_put(gt); + + return err; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 7dd1130a19a4..ba960e1fc209 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -9,19 +9,19 @@ #include <linux/types.h> -struct drm_i915_private; +struct intel_gt; enum { INTEL_GT_UNPARK, INTEL_GT_PARK, }; -void intel_gt_pm_get(struct drm_i915_private *i915); -void intel_gt_pm_put(struct drm_i915_private *i915); +void intel_gt_pm_get(struct intel_gt *gt); +void intel_gt_pm_put(struct intel_gt *gt); -void intel_gt_pm_init(struct drm_i915_private *i915); +void intel_gt_pm_init_early(struct intel_gt *gt); -void intel_gt_sanitize(struct drm_i915_private *i915, bool force); -void intel_gt_resume(struct drm_i915_private *i915); +void intel_gt_sanitize(struct intel_gt *gt, bool force); +int intel_gt_resume(struct intel_gt *gt); #endif /* INTEL_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h new file mode 100644 index 000000000000..34d4a868e4f1 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_GT_TYPES__ +#define __INTEL_GT_TYPES__ + +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/notifier.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include "uc/intel_uc.h" + +#include "i915_vma.h" +#include "intel_reset_types.h" +#include "intel_wakeref.h" + +struct drm_i915_private; +struct i915_ggtt; +struct intel_uncore; + +struct intel_hangcheck { + /* For hangcheck timer */ +#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ +#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) + + struct delayed_work work; +}; + +struct intel_gt { + struct drm_i915_private *i915; + struct intel_uncore *uncore; + struct i915_ggtt *ggtt; + + struct intel_uc uc; + + struct intel_gt_timelines { + struct mutex mutex; /* protects list */ + struct list_head active_list; + + /* Pack multiple timelines' seqnos into the same page */ + spinlock_t hwsp_lock; + struct list_head hwsp_free_list; + } timelines; + + struct list_head active_rings; + + struct intel_wakeref wakeref; + + struct list_head closed_vma; + spinlock_t closed_lock; /* guards the list of closed_vma */ + + struct intel_hangcheck hangcheck; + struct intel_reset reset; + + /** + * Is the GPU currently considered idle, or busy executing + * userspace requests? Whilst idle, we allow runtime power + * management to power down the hardware and display clocks. + * In order to reduce the effect on performance, there + * is a slight delay before we do so. + */ + intel_wakeref_t awake; + + struct blocking_notifier_head pm_notifications; + + ktime_t last_init_time; + + struct i915_vma *scratch; + + u32 pm_imr; + u32 pm_ier; + + u32 pm_guc_events; +}; + +enum intel_gt_scratch_field { + /* 8 bytes */ + INTEL_GT_SCRATCH_FIELD_DEFAULT = 0, + + /* 8 bytes */ + INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA = 128, + + /* 8 bytes */ + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128, + + /* 8 bytes */ + INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256, + +}; + +#endif /* __INTEL_GT_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c index 6bcfa6456c45..05d042cdefe2 100644 --- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c @@ -22,8 +22,10 @@ * */ -#include "intel_reset.h" #include "i915_drv.h" +#include "intel_engine.h" +#include "intel_gt.h" +#include "intel_reset.h" struct hangcheck { u64 acthd; @@ -57,9 +59,6 @@ static bool subunits_stuck(struct intel_engine_cs *engine) int slice; int subslice; - if (engine->id != RCS0) - return true; - intel_engine_get_instdone(engine, &instdone); /* There might be unstable subunit states even when @@ -103,7 +102,6 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd) static enum intel_engine_hangcheck_action engine_stuck(struct intel_engine_cs *engine, u64 acthd) { - struct drm_i915_private *dev_priv = engine->i915; enum intel_engine_hangcheck_action ha; u32 tmp; @@ -111,7 +109,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) if (ha != ENGINE_DEAD) return ha; - if (IS_GEN(dev_priv, 2)) + if (IS_GEN(engine->i915, 2)) return ENGINE_DEAD; /* Is the chip hanging on a WAIT_FOR_EVENT? @@ -121,8 +119,8 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) */ tmp = ENGINE_READ(engine, RING_CTL); if (tmp & RING_WAIT) { - i915_handle_error(dev_priv, engine->mask, 0, - "stuck wait on %s", engine->name); + intel_gt_handle_error(engine->gt, engine->mask, 0, + "stuck wait on %s", engine->name); ENGINE_WRITE(engine, RING_CTL, tmp); return ENGINE_WAIT_KICK; } @@ -222,7 +220,7 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine, I915_ENGINE_WEDGED_TIMEOUT); } -static void hangcheck_declare_hang(struct drm_i915_private *i915, +static void hangcheck_declare_hang(struct intel_gt *gt, intel_engine_mask_t hung, intel_engine_mask_t stuck) { @@ -238,12 +236,12 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915, hung &= ~stuck; len = scnprintf(msg, sizeof(msg), "%s on ", stuck == hung ? "no progress" : "hang"); - for_each_engine_masked(engine, i915, hung, tmp) + for_each_engine_masked(engine, gt->i915, hung, tmp) len += scnprintf(msg + len, sizeof(msg) - len, "%s, ", engine->name); msg[len-2] = '\0'; - return i915_handle_error(i915, hung, I915_ERROR_CAPTURE, "%s", msg); + return intel_gt_handle_error(gt, hung, I915_ERROR_CAPTURE, "%s", msg); } /* @@ -254,11 +252,10 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915, * we kick the ring. If we see no progress on three subsequent calls * we assume chip is wedged and try to fix it by resetting the chip. */ -static void i915_hangcheck_elapsed(struct work_struct *work) +static void hangcheck_elapsed(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), - gpu_error.hangcheck_work.work); + struct intel_gt *gt = + container_of(work, typeof(*gt), hangcheck.work.work); intel_engine_mask_t hung = 0, stuck = 0, wedged = 0; struct intel_engine_cs *engine; enum intel_engine_id id; @@ -267,13 +264,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (!i915_modparams.enable_hangcheck) return; - if (!READ_ONCE(dev_priv->gt.awake)) + if (!READ_ONCE(gt->awake)) return; - if (i915_terminally_wedged(dev_priv)) + if (intel_gt_is_wedged(gt)) return; - wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); + wakeref = intel_runtime_pm_get_if_in_use(>->i915->runtime_pm); if (!wakeref) return; @@ -281,9 +278,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work) * periodically arm the mmio checker to see if we are triggering * any invalid access. */ - intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); + intel_uncore_arm_unclaimed_mmio_detection(gt->uncore); - for_each_engine(engine, dev_priv, id) { + for_each_engine(engine, gt->i915, id) { struct hangcheck hc; intel_engine_signal_breadcrumbs(engine); @@ -305,7 +302,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (GEM_SHOW_DEBUG() && (hung | stuck)) { struct drm_printer p = drm_debug_printer("hangcheck"); - for_each_engine(engine, dev_priv, id) { + for_each_engine(engine, gt->i915, id) { if (intel_engine_is_idle(engine)) continue; @@ -314,20 +311,37 @@ static void i915_hangcheck_elapsed(struct work_struct *work) } if (wedged) { - dev_err(dev_priv->drm.dev, + dev_err(gt->i915->drm.dev, "GPU recovery timed out," " cancelling all in-flight rendering.\n"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(dev_priv); + intel_gt_set_wedged(gt); } if (hung) - hangcheck_declare_hang(dev_priv, hung, stuck); + hangcheck_declare_hang(gt, hung, stuck); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_runtime_pm_put(>->i915->runtime_pm, wakeref); /* Reset timer in case GPU hangs without another request being added */ - i915_queue_hangcheck(dev_priv); + intel_gt_queue_hangcheck(gt); +} + +void intel_gt_queue_hangcheck(struct intel_gt *gt) +{ + unsigned long delay; + + if (unlikely(!i915_modparams.enable_hangcheck)) + return; + + /* + * Don't continually defer the hangcheck so that it is always run at + * least once after work has been scheduled on any ring. Otherwise, + * we will ignore a hung ring if a second ring is kept busy. + */ + + delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); + queue_delayed_work(system_long_wq, >->hangcheck.work, delay); } void intel_engine_init_hangcheck(struct intel_engine_cs *engine) @@ -336,10 +350,9 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine) engine->hangcheck.action_timestamp = jiffies; } -void intel_hangcheck_init(struct drm_i915_private *i915) +void intel_gt_init_hangcheck(struct intel_gt *gt) { - INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work, - i915_hangcheck_elapsed); + INIT_DELAYED_WORK(>->hangcheck.work, hangcheck_elapsed); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index b42b5f158295..d9061d9348cb 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -136,9 +136,9 @@ #include "gem/i915_gem_context.h" #include "i915_drv.h" -#include "i915_gem_render_state.h" #include "i915_vgpu.h" #include "intel_engine_pm.h" +#include "intel_gt.h" #include "intel_lrc_reg.h" #include "intel_mocs.h" #include "intel_reset.h" @@ -161,6 +161,8 @@ #define GEN8_CTX_STATUS_COMPLETED_MASK \ (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED) +#define CTX_DESC_FORCE_RESTORE BIT_ULL(2) + /* Typical size of the average request (2 pipecontrols and a MI_BB) */ #define EXECLISTS_REQUEST_SIZE 64 /* bytes */ #define WA_TAIL_DWORDS 2 @@ -221,6 +223,26 @@ static void execlists_init_reg_state(u32 *reg_state, struct intel_engine_cs *engine, struct intel_ring *ring); +static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine) +{ + return (i915_ggtt_offset(engine->status_page.vma) + + I915_GEM_HWS_PREEMPT_ADDR); +} + +static inline void +ring_set_paused(const struct intel_engine_cs *engine, int state) +{ + /* + * We inspect HWS_PREEMPT with a semaphore inside + * engine->emit_fini_breadcrumb. If the dword is true, + * the ring is paused as the semaphore will busywait + * until the dword is false. + */ + engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state; + if (state) + wmb(); +} + static inline struct i915_priolist *to_priolist(struct rb_node *rb) { return rb_entry(rb, struct i915_priolist, node); @@ -236,6 +258,17 @@ static int effective_prio(const struct i915_request *rq) int prio = rq_prio(rq); /* + * If this request is special and must not be interrupted at any + * cost, so be it. Note we are only checking the most recent request + * in the context and so may be masking an earlier vip request. It + * is hoped that under the conditions where nopreempt is used, this + * will not matter (i.e. all requests to that context will be + * nopreempt for as long as desired). + */ + if (i915_request_has_nopreempt(rq)) + prio = I915_PRIORITY_UNPREEMPTABLE; + + /* * On unwinding the active request, we give it a priority bump * if it has completed waiting on any semaphore. If we know that * the request has already started, we can prevent an unwanted @@ -245,6 +278,7 @@ static int effective_prio(const struct i915_request *rq) prio |= I915_PRIORITY_NOSEMAPHORE; /* Restrict mere WAIT boosts from triggering preemption */ + BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */ return prio | __NO_PREEMPTION; } @@ -271,10 +305,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, { int last_prio; - if (!engine->preempt_context) - return false; - - if (i915_request_completed(rq)) + if (!intel_engine_has_semaphores(engine)) return false; /* @@ -338,9 +369,6 @@ __maybe_unused static inline bool assert_priority_queue(const struct i915_request *prev, const struct i915_request *next) { - const struct intel_engine_execlists *execlists = - &prev->engine->execlists; - /* * Without preemption, the prev may refer to the still active element * which we refuse to let go. @@ -348,7 +376,7 @@ assert_priority_queue(const struct i915_request *prev, * Even with preemption, there are times when we think it is better not * to preempt and leave an ostensibly lower priority request in flight. */ - if (port_request(execlists->port) == prev) + if (i915_request_is_active(prev)) return true; return rq_prio(prev) >= rq_prio(next); @@ -442,13 +470,11 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) struct intel_engine_cs *owner; if (i915_request_completed(rq)) - break; + continue; /* XXX */ __i915_request_unsubmit(rq); unwind_wa_tail(rq); - GEM_BUG_ON(rq->hw_context->inflight); - /* * Push the request back into the queue for later resubmission. * If this request is not native to this physical engine (i.e. @@ -468,6 +494,19 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) list_move(&rq->sched.link, pl); active = rq; } else { + /* + * Decouple the virtual breadcrumb before moving it + * back to the virtual engine -- we don't want the + * request to complete in the background and try + * and cancel the breadcrumb on the virtual engine + * (instead of the old engine where it is linked)! + */ + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &rq->fence.flags)) { + spin_lock(&rq->lock); + i915_request_cancel_breadcrumb(rq); + spin_unlock(&rq->lock); + } rq->engine = owner; owner->submit_request(rq); active = NULL; @@ -500,32 +539,32 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status) status, rq); } -inline void -execlists_user_begin(struct intel_engine_execlists *execlists, - const struct execlist_port *port) +static inline struct i915_request * +execlists_schedule_in(struct i915_request *rq, int idx) { - execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER); -} + struct intel_context *ce = rq->hw_context; + int count; -inline void -execlists_user_end(struct intel_engine_execlists *execlists) -{ - execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); -} + trace_i915_request_in(rq, idx); -static inline void -execlists_context_schedule_in(struct i915_request *rq) -{ - GEM_BUG_ON(rq->hw_context->inflight); + count = intel_context_inflight_count(ce); + if (!count) { + intel_context_get(ce); + ce->inflight = rq->engine; + + execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); + intel_engine_context_in(ce->inflight); + } + + intel_context_inflight_inc(ce); + GEM_BUG_ON(intel_context_inflight(ce) != rq->engine); - execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); - intel_engine_context_in(rq->engine); - rq->hw_context->inflight = rq->engine; + return i915_request_get(rq); } -static void kick_siblings(struct i915_request *rq) +static void kick_siblings(struct i915_request *rq, struct intel_context *ce) { - struct virtual_engine *ve = to_virtual_engine(rq->hw_context->engine); + struct virtual_engine *ve = container_of(ce, typeof(*ve), context); struct i915_request *next = READ_ONCE(ve->request); if (next && next->execution_mask & ~rq->execution_mask) @@ -533,29 +572,42 @@ static void kick_siblings(struct i915_request *rq) } static inline void -execlists_context_schedule_out(struct i915_request *rq, unsigned long status) +execlists_schedule_out(struct i915_request *rq) { - rq->hw_context->inflight = NULL; - intel_engine_context_out(rq->engine); - execlists_context_status_change(rq, status); + struct intel_context *ce = rq->hw_context; + + GEM_BUG_ON(!intel_context_inflight_count(ce)); + trace_i915_request_out(rq); - /* - * If this is part of a virtual engine, its next request may have - * been blocked waiting for access to the active context. We have - * to kick all the siblings again in case we need to switch (e.g. - * the next request is not runnable on this engine). Hopefully, - * we will already have submitted the next request before the - * tasklet runs and do not need to rebuild each virtual tree - * and kick everyone again. - */ - if (rq->engine != rq->hw_context->engine) - kick_siblings(rq); + intel_context_inflight_dec(ce); + if (!intel_context_inflight_count(ce)) { + intel_engine_context_out(ce->inflight); + execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); + + /* + * If this is part of a virtual engine, its next request may + * have been blocked waiting for access to the active context. + * We have to kick all the siblings again in case we need to + * switch (e.g. the next request is not runnable on this + * engine). Hopefully, we will already have submitted the next + * request before the tasklet runs and do not need to rebuild + * each virtual tree and kick everyone again. + */ + ce->inflight = NULL; + if (rq->engine != ce->engine) + kick_siblings(rq, ce); + + intel_context_put(ce); + } + + i915_request_put(rq); } -static u64 execlists_update_context(struct i915_request *rq) +static u64 execlists_update_context(const struct i915_request *rq) { struct intel_context *ce = rq->hw_context; + u64 desc; ce->lrc_reg_state[CTX_RING_TAIL + 1] = intel_ring_set_tail(rq->ring, rq->tail); @@ -576,7 +628,11 @@ static u64 execlists_update_context(struct i915_request *rq) * wmb). */ mb(); - return ce->lrc_desc; + + desc = ce->lrc_desc; + ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; + + return desc; } static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) @@ -590,12 +646,62 @@ static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc } } +static __maybe_unused void +trace_ports(const struct intel_engine_execlists *execlists, + const char *msg, + struct i915_request * const *ports) +{ + const struct intel_engine_cs *engine = + container_of(execlists, typeof(*engine), execlists); + + GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n", + engine->name, msg, + ports[0]->fence.context, + ports[0]->fence.seqno, + i915_request_completed(ports[0]) ? "!" : + i915_request_started(ports[0]) ? "*" : + "", + ports[1] ? ports[1]->fence.context : 0, + ports[1] ? ports[1]->fence.seqno : 0); +} + +static __maybe_unused bool +assert_pending_valid(const struct intel_engine_execlists *execlists, + const char *msg) +{ + struct i915_request * const *port, *rq; + struct intel_context *ce = NULL; + + trace_ports(execlists, msg, execlists->pending); + + if (execlists->pending[execlists_num_ports(execlists)]) + return false; + + for (port = execlists->pending; (rq = *port); port++) { + if (ce == rq->hw_context) + return false; + + ce = rq->hw_context; + if (i915_request_completed(rq)) + continue; + + if (i915_active_is_idle(&ce->active)) + return false; + + if (!i915_vma_is_pinned(ce->state)) + return false; + } + + return ce; +} + static void execlists_submit_ports(struct intel_engine_cs *engine) { struct intel_engine_execlists *execlists = &engine->execlists; - struct execlist_port *port = execlists->port; unsigned int n; + GEM_BUG_ON(!assert_pending_valid(execlists, "submit")); + /* * We can skip acquiring intel_runtime_pm_get() here as it was taken * on our behalf by the request (see i915_gem_mark_busy()) and it will @@ -604,7 +710,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) * that all ELSP are drained i.e. we have processed the CSB, * before allowing ourselves to idle and calling intel_runtime_pm_put(). */ - GEM_BUG_ON(!intel_wakeref_active(&engine->wakeref)); + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); /* * ELSQ note: the submit queue is not cleared after being submitted @@ -613,38 +719,16 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) * of elsq entries, keep this in mind before changing the loop below. */ for (n = execlists_num_ports(execlists); n--; ) { - struct i915_request *rq; - unsigned int count; - u64 desc; + struct i915_request *rq = execlists->pending[n]; - rq = port_unpack(&port[n], &count); - if (rq) { - GEM_BUG_ON(count > !n); - if (!count++) - execlists_context_schedule_in(rq); - port_set(&port[n], port_pack(rq, count)); - desc = execlists_update_context(rq); - GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc)); - - GEM_TRACE("%s in[%d]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n", - engine->name, n, - port[n].context_id, count, - rq->fence.context, rq->fence.seqno, - hwsp_seqno(rq), - rq_prio(rq)); - } else { - GEM_BUG_ON(!n); - desc = 0; - } - - write_desc(execlists, desc, n); + write_desc(execlists, + rq ? execlists_update_context(rq) : 0, + n); } /* we need to manually load the submit queue */ if (execlists->ctrl_reg) writel(EL_CTRL_LOAD, execlists->ctrl_reg); - - execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK); } static bool ctx_single_port_submission(const struct intel_context *ce) @@ -668,6 +752,7 @@ static bool can_merge_ctx(const struct intel_context *prev, static bool can_merge_rq(const struct i915_request *prev, const struct i915_request *next) { + GEM_BUG_ON(prev == next); GEM_BUG_ON(!assert_priority_queue(prev, next)); if (!can_merge_ctx(prev->hw_context, next->hw_context)) @@ -676,58 +761,6 @@ static bool can_merge_rq(const struct i915_request *prev, return true; } -static void port_assign(struct execlist_port *port, struct i915_request *rq) -{ - GEM_BUG_ON(rq == port_request(port)); - - if (port_isset(port)) - i915_request_put(port_request(port)); - - port_set(port, port_pack(i915_request_get(rq), port_count(port))); -} - -static void inject_preempt_context(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists *execlists = &engine->execlists; - struct intel_context *ce = engine->preempt_context; - unsigned int n; - - GEM_BUG_ON(execlists->preempt_complete_status != - upper_32_bits(ce->lrc_desc)); - - /* - * Switch to our empty preempt context so - * the state of the GPU is known (idle). - */ - GEM_TRACE("%s\n", engine->name); - for (n = execlists_num_ports(execlists); --n; ) - write_desc(execlists, 0, n); - - write_desc(execlists, ce->lrc_desc, n); - - /* we need to manually load the submit queue */ - if (execlists->ctrl_reg) - writel(EL_CTRL_LOAD, execlists->ctrl_reg); - - execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK); - execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT); - - (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); -} - -static void complete_preempt_context(struct intel_engine_execlists *execlists) -{ - GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)); - - if (inject_preempt_hang(execlists)) - return; - - execlists_cancel_port_requests(execlists); - __unwind_incomplete_requests(container_of(execlists, - struct intel_engine_cs, - execlists)); -} - static void virtual_update_register_offsets(u32 *regs, struct intel_engine_cs *engine) { @@ -792,7 +825,7 @@ static bool virtual_matches(const struct virtual_engine *ve, * we reuse the register offsets). This is a very small * hystersis on the greedy seelction algorithm. */ - inflight = READ_ONCE(ve->context.inflight); + inflight = intel_context_inflight(&ve->context); if (inflight && inflight != engine) return false; @@ -815,13 +848,108 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, spin_unlock(&old->breadcrumbs.irq_lock); } +static struct i915_request * +last_active(const struct intel_engine_execlists *execlists) +{ + struct i915_request * const *last = execlists->active; + + while (*last && i915_request_completed(*last)) + last++; + + return *last; +} + +static void defer_request(struct i915_request *rq, struct list_head * const pl) +{ + LIST_HEAD(list); + + /* + * We want to move the interrupted request to the back of + * the round-robin list (i.e. its priority level), but + * in doing so, we must then move all requests that were in + * flight and were waiting for the interrupted request to + * be run after it again. + */ + do { + struct i915_dependency *p; + + GEM_BUG_ON(i915_request_is_active(rq)); + list_move_tail(&rq->sched.link, pl); + + list_for_each_entry(p, &rq->sched.waiters_list, wait_link) { + struct i915_request *w = + container_of(p->waiter, typeof(*w), sched); + + /* Leave semaphores spinning on the other engines */ + if (w->engine != rq->engine) + continue; + + /* No waiter should start before its signaler */ + GEM_BUG_ON(i915_request_started(w) && + !i915_request_completed(rq)); + + GEM_BUG_ON(i915_request_is_active(w)); + if (list_empty(&w->sched.link)) + continue; /* Not yet submitted; unready */ + + if (rq_prio(w) < rq_prio(rq)) + continue; + + GEM_BUG_ON(rq_prio(w) > rq_prio(rq)); + list_move_tail(&w->sched.link, &list); + } + + rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); + } while (rq); +} + +static void defer_active(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + rq = __unwind_incomplete_requests(engine); + if (!rq) + return; + + defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq))); +} + +static bool +need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) +{ + int hint; + + if (!intel_engine_has_semaphores(engine)) + return false; + + if (list_is_last(&rq->sched.link, &engine->active.requests)) + return false; + + hint = max(rq_prio(list_next_entry(rq, sched.link)), + engine->execlists.queue_priority_hint); + + return hint >= effective_prio(rq); +} + +static bool +enable_timeslice(struct intel_engine_cs *engine) +{ + struct i915_request *last = last_active(&engine->execlists); + + return last && need_timeslice(engine, last); +} + +static void record_preemption(struct intel_engine_execlists *execlists) +{ + (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); +} + static void execlists_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - struct execlist_port *port = execlists->port; - const struct execlist_port * const last_port = - &execlists->port[execlists->port_mask]; - struct i915_request *last = port_request(port); + struct i915_request **port = execlists->pending; + struct i915_request ** const last_port = port + execlists->port_mask; + struct i915_request *last; struct rb_node *rb; bool submit = false; @@ -867,65 +995,100 @@ static void execlists_dequeue(struct intel_engine_cs *engine) break; } + /* + * If the queue is higher priority than the last + * request in the currently active context, submit afresh. + * We will resubmit again afterwards in case we need to split + * the active context to interject the preemption request, + * i.e. we will retrigger preemption following the ack in case + * of trouble. + */ + last = last_active(execlists); if (last) { - /* - * Don't resubmit or switch until all outstanding - * preemptions (lite-restore) are seen. Then we - * know the next preemption status we see corresponds - * to this ELSP update. - */ - GEM_BUG_ON(!execlists_is_active(execlists, - EXECLISTS_ACTIVE_USER)); - GEM_BUG_ON(!port_count(&port[0])); + if (need_preempt(engine, last, rb)) { + GEM_TRACE("%s: preempting last=%llx:%lld, prio=%d, hint=%d\n", + engine->name, + last->fence.context, + last->fence.seqno, + last->sched.attr.priority, + execlists->queue_priority_hint); + record_preemption(execlists); - /* - * If we write to ELSP a second time before the HW has had - * a chance to respond to the previous write, we can confuse - * the HW and hit "undefined behaviour". After writing to ELSP, - * we must then wait until we see a context-switch event from - * the HW to indicate that it has had a chance to respond. - */ - if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) - return; + /* + * Don't let the RING_HEAD advance past the breadcrumb + * as we unwind (and until we resubmit) so that we do + * not accidentally tell it to go backwards. + */ + ring_set_paused(engine, 1); - if (need_preempt(engine, last, rb)) { - inject_preempt_context(engine); - return; - } + /* + * Note that we have not stopped the GPU at this point, + * so we are unwinding the incomplete requests as they + * remain inflight and so by the time we do complete + * the preemption, some of the unwound requests may + * complete! + */ + __unwind_incomplete_requests(engine); - /* - * In theory, we could coalesce more requests onto - * the second port (the first port is active, with - * no preemptions pending). However, that means we - * then have to deal with the possible lite-restore - * of the second port (as we submit the ELSP, there - * may be a context-switch) but also we may complete - * the resubmission before the context-switch. Ergo, - * coalescing onto the second port will cause a - * preemption event, but we cannot predict whether - * that will affect port[0] or port[1]. - * - * If the second port is already active, we can wait - * until the next context-switch before contemplating - * new requests. The GPU will be busy and we should be - * able to resubmit the new ELSP before it idles, - * avoiding pipeline bubbles (momentary pauses where - * the driver is unable to keep up the supply of new - * work). However, we have to double check that the - * priorities of the ports haven't been switch. - */ - if (port_count(&port[1])) - return; + /* + * If we need to return to the preempted context, we + * need to skip the lite-restore and force it to + * reload the RING_TAIL. Otherwise, the HW has a + * tendency to ignore us rewinding the TAIL to the + * end of an earlier request. + */ + last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE; + last = NULL; + } else if (need_timeslice(engine, last) && + !timer_pending(&engine->execlists.timer)) { + GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n", + engine->name, + last->fence.context, + last->fence.seqno, + last->sched.attr.priority, + execlists->queue_priority_hint); - /* - * WaIdleLiteRestore:bdw,skl - * Apply the wa NOOPs to prevent - * ring:HEAD == rq:TAIL as we resubmit the - * request. See gen8_emit_fini_breadcrumb() for - * where we prepare the padding after the - * end of the request. - */ - last->tail = last->wa_tail; + ring_set_paused(engine, 1); + defer_active(engine); + + /* + * Unlike for preemption, if we rewind and continue + * executing the same context as previously active, + * the order of execution will remain the same and + * the tail will only advance. We do not need to + * force a full context restore, as a lite-restore + * is sufficient to resample the monotonic TAIL. + * + * If we switch to any other context, similarly we + * will not rewind TAIL of current context, and + * normal save/restore will preserve state and allow + * us to later continue executing the same request. + */ + last = NULL; + } else { + /* + * Otherwise if we already have a request pending + * for execution after the current one, we can + * just wait until the next CS event before + * queuing more. In either case we will force a + * lite-restore preemption event, but if we wait + * we hopefully coalesce several updates into a single + * submission. + */ + if (!list_is_last(&last->sched.link, + &engine->active.requests)) + return; + + /* + * WaIdleLiteRestore:bdw,skl + * Apply the wa NOOPs to prevent + * ring:HEAD == rq:TAIL as we resubmit the + * request. See gen8_emit_fini_breadcrumb() for + * where we prepare the padding after the + * end of the request. + */ + last->tail = last->wa_tail; + } } while (rb) { /* XXX virtual is always taking precedence */ @@ -955,9 +1118,24 @@ static void execlists_dequeue(struct intel_engine_cs *engine) continue; } + if (i915_request_completed(rq)) { + ve->request = NULL; + ve->base.execlists.queue_priority_hint = INT_MIN; + rb_erase_cached(rb, &execlists->virtual); + RB_CLEAR_NODE(rb); + + rq->engine = engine; + __i915_request_submit(rq); + + spin_unlock(&ve->base.active.lock); + + rb = rb_first_cached(&execlists->virtual); + continue; + } + if (last && !can_merge_rq(last, rq)) { spin_unlock(&ve->base.active.lock); - return; /* leave this rq for another engine */ + return; /* leave this for another */ } GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n", @@ -1006,9 +1184,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine) } __i915_request_submit(rq); - trace_i915_request_in(rq, port_index(port, execlists)); - submit = true; - last = rq; + if (!i915_request_completed(rq)) { + submit = true; + last = rq; + } } spin_unlock(&ve->base.active.lock); @@ -1021,6 +1200,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) int i; priolist_for_each_request_consume(rq, rn, p, i) { + if (i915_request_completed(rq)) + goto skip; + /* * Can we combine this request with the current port? * It has to be the same context/ringbuffer and not @@ -1060,19 +1242,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ctx_single_port_submission(rq->hw_context)) goto done; - - if (submit) - port_assign(port, last); + *port = execlists_schedule_in(last, port - execlists->pending); port++; - - GEM_BUG_ON(port_isset(port)); } - __i915_request_submit(rq); - trace_i915_request_in(rq, port_index(port, execlists)); - last = rq; submit = true; +skip: + __i915_request_submit(rq); } rb_erase_cached(&p->node, &execlists->queue); @@ -1097,54 +1274,32 @@ done: * interrupt for secondary ports). */ execlists->queue_priority_hint = queue_prio(execlists); + GEM_TRACE("%s: queue_priority_hint:%d, submit:%s\n", + engine->name, execlists->queue_priority_hint, + yesno(submit)); if (submit) { - port_assign(port, last); + *port = execlists_schedule_in(last, port - execlists->pending); + memset(port + 1, 0, (last_port - port) * sizeof(*port)); execlists_submit_ports(engine); + } else { + ring_set_paused(engine, 0); } - - /* We must always keep the beast fed if we have work piled up */ - GEM_BUG_ON(rb_first_cached(&execlists->queue) && - !port_isset(execlists->port)); - - /* Re-evaluate the executing context setup after each preemptive kick */ - if (last) - execlists_user_begin(execlists, execlists->port); - - /* If the engine is now idle, so should be the flag; and vice versa. */ - GEM_BUG_ON(execlists_is_active(&engine->execlists, - EXECLISTS_ACTIVE_USER) == - !port_isset(engine->execlists.port)); } void execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) { - struct execlist_port *port = execlists->port; - unsigned int num_ports = execlists_num_ports(execlists); - - while (num_ports-- && port_isset(port)) { - struct i915_request *rq = port_request(port); - - GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n", - rq->engine->name, - (unsigned int)(port - execlists->port), - rq->fence.context, rq->fence.seqno, - hwsp_seqno(rq)); - - GEM_BUG_ON(!execlists->active); - execlists_context_schedule_out(rq, - i915_request_completed(rq) ? - INTEL_CONTEXT_SCHEDULE_OUT : - INTEL_CONTEXT_SCHEDULE_PREEMPTED); + struct i915_request * const *port, *rq; - i915_request_put(rq); + for (port = execlists->pending; (rq = *port); port++) + execlists_schedule_out(rq); + memset(execlists->pending, 0, sizeof(execlists->pending)); - memset(port, 0, sizeof(*port)); - port++; - } - - execlists_clear_all_active(execlists); + for (port = execlists->active; (rq = *port); port++) + execlists_schedule_out(rq); + execlists->active = + memset(execlists->inflight, 0, sizeof(execlists->inflight)); } static inline void @@ -1160,10 +1315,33 @@ reset_in_progress(const struct intel_engine_execlists *execlists) return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); } +enum csb_step { + CSB_NOP, + CSB_PROMOTE, + CSB_PREEMPT, + CSB_COMPLETE, +}; + +static inline enum csb_step +csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) +{ + unsigned int status = *csb; + + if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) + return CSB_PROMOTE; + + if (status & GEN8_CTX_STATUS_PREEMPTED) + return CSB_PREEMPT; + + if (*execlists->active) + return CSB_COMPLETE; + + return CSB_NOP; +} + static void process_csb(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - struct execlist_port *port = execlists->port; const u32 * const buf = execlists->csb_status; const u8 num_entries = execlists->csb_size; u8 head, tail; @@ -1198,10 +1376,6 @@ static void process_csb(struct intel_engine_cs *engine) rmb(); do { - struct i915_request *rq; - unsigned int status; - unsigned int count; - if (++head == num_entries) head = 0; @@ -1223,68 +1397,39 @@ static void process_csb(struct intel_engine_cs *engine) * status notifier. */ - GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n", + GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n", engine->name, head, - buf[2 * head + 0], buf[2 * head + 1], - execlists->active); - - status = buf[2 * head]; - if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE | - GEN8_CTX_STATUS_PREEMPTED)) - execlists_set_active(execlists, - EXECLISTS_ACTIVE_HWACK); - if (status & GEN8_CTX_STATUS_ACTIVE_IDLE) - execlists_clear_active(execlists, - EXECLISTS_ACTIVE_HWACK); - - if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) - continue; + buf[2 * head + 0], buf[2 * head + 1]); - /* We should never get a COMPLETED | IDLE_ACTIVE! */ - GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE); + switch (csb_parse(execlists, buf + 2 * head)) { + case CSB_PREEMPT: /* cancel old inflight, prepare for switch */ + trace_ports(execlists, "preempted", execlists->active); - if (status & GEN8_CTX_STATUS_COMPLETE && - buf[2*head + 1] == execlists->preempt_complete_status) { - GEM_TRACE("%s preempt-idle\n", engine->name); - complete_preempt_context(execlists); - continue; - } + while (*execlists->active) + execlists_schedule_out(*execlists->active++); - if (status & GEN8_CTX_STATUS_PREEMPTED && - execlists_is_active(execlists, - EXECLISTS_ACTIVE_PREEMPT)) - continue; + /* fallthrough */ + case CSB_PROMOTE: /* switch pending to inflight */ + GEM_BUG_ON(*execlists->active); + GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); + execlists->active = + memcpy(execlists->inflight, + execlists->pending, + execlists_num_ports(execlists) * + sizeof(*execlists->pending)); + execlists->pending[0] = NULL; - GEM_BUG_ON(!execlists_is_active(execlists, - EXECLISTS_ACTIVE_USER)); + trace_ports(execlists, "promoted", execlists->active); - rq = port_unpack(port, &count); - GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n", - engine->name, - port->context_id, count, - rq ? rq->fence.context : 0, - rq ? rq->fence.seqno : 0, - rq ? hwsp_seqno(rq) : 0, - rq ? rq_prio(rq) : 0); + if (enable_timeslice(engine)) + mod_timer(&execlists->timer, jiffies + 1); - /* Check the context/desc id for this event matches */ - GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); + if (!inject_preempt_hang(execlists)) + ring_set_paused(engine, 0); + break; - GEM_BUG_ON(count == 0); - if (--count == 0) { - /* - * On the final event corresponding to the - * submission of this context, we expect either - * an element-switch event or a completion - * event (and on completion, the active-idle - * marker). No more preemptions, lite-restore - * or otherwise. - */ - GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); - GEM_BUG_ON(port_isset(&port[1]) && - !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)); - GEM_BUG_ON(!port_isset(&port[1]) && - !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); + case CSB_COMPLETE: /* port0 completed, advanced to port1 */ + trace_ports(execlists, "completed", execlists->active); /* * We rely on the hardware being strongly @@ -1292,22 +1437,16 @@ static void process_csb(struct intel_engine_cs *engine) * coherent (visible from the CPU) before the * user interrupt and CSB is processed. */ - GEM_BUG_ON(!i915_request_completed(rq)); + GEM_BUG_ON(!i915_request_completed(*execlists->active) && + !reset_in_progress(execlists)); + execlists_schedule_out(*execlists->active++); - execlists_context_schedule_out(rq, - INTEL_CONTEXT_SCHEDULE_OUT); - i915_request_put(rq); - - GEM_TRACE("%s completed ctx=%d\n", - engine->name, port->context_id); + GEM_BUG_ON(execlists->active - execlists->inflight > + execlists_num_ports(execlists)); + break; - port = execlists_port_complete(execlists, port); - if (port_isset(port)) - execlists_user_begin(execlists, port); - else - execlists_user_end(execlists); - } else { - port_set(port, port_pack(rq, count)); + case CSB_NOP: + break; } } while (head != tail); @@ -1332,7 +1471,7 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) lockdep_assert_held(&engine->active.lock); process_csb(engine); - if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) + if (!engine->execlists.pending[0]) execlists_dequeue(engine); } @@ -1345,16 +1484,20 @@ static void execlists_submission_tasklet(unsigned long data) struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; unsigned long flags; - GEM_TRACE("%s awake?=%d, active=%x\n", - engine->name, - !!intel_wakeref_active(&engine->wakeref), - engine->execlists.active); - spin_lock_irqsave(&engine->active.lock, flags); __execlists_submission_tasklet(engine); spin_unlock_irqrestore(&engine->active.lock, flags); } +static void execlists_submission_timer(struct timer_list *timer) +{ + struct intel_engine_cs *engine = + from_timer(engine, timer, execlists.timer); + + /* Kick the tasklet for some interrupt coalescing and reset handling */ + tasklet_hi_schedule(&engine->execlists.tasklet); +} + static void queue_request(struct intel_engine_cs *engine, struct i915_sched_node *node, int prio) @@ -1376,12 +1519,16 @@ static void __submit_queue_imm(struct intel_engine_cs *engine) tasklet_hi_schedule(&execlists->tasklet); } -static void submit_queue(struct intel_engine_cs *engine, int prio) +static void submit_queue(struct intel_engine_cs *engine, + const struct i915_request *rq) { - if (prio > engine->execlists.queue_priority_hint) { - engine->execlists.queue_priority_hint = prio; - __submit_queue_imm(engine); - } + struct intel_engine_execlists *execlists = &engine->execlists; + + if (rq_prio(rq) <= execlists->queue_priority_hint) + return; + + execlists->queue_priority_hint = rq_prio(rq); + __submit_queue_imm(engine); } static void execlists_submit_request(struct i915_request *request) @@ -1397,7 +1544,7 @@ static void execlists_submit_request(struct i915_request *request) GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); GEM_BUG_ON(list_empty(&request->sched.link)); - submit_queue(engine, rq_prio(request)); + submit_queue(engine, request); spin_unlock_irqrestore(&engine->active.lock, flags); } @@ -1405,20 +1552,20 @@ static void execlists_submit_request(struct i915_request *request) static void __execlists_context_fini(struct intel_context *ce) { intel_ring_put(ce->ring); - - GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); - i915_gem_object_put(ce->state->obj); + i915_vma_put(ce->state); } static void execlists_context_destroy(struct kref *kref) { struct intel_context *ce = container_of(kref, typeof(*ce), ref); + GEM_BUG_ON(!i915_active_is_idle(&ce->active)); GEM_BUG_ON(intel_context_is_pinned(ce)); if (ce->state) __execlists_context_fini(ce); + intel_context_fini(ce); intel_context_free(ce); } @@ -1426,7 +1573,6 @@ static void execlists_context_unpin(struct intel_context *ce) { i915_gem_context_unpin_hw_id(ce->gem_context); i915_gem_object_unpin_map(ce->state->obj); - intel_ring_unpin(ce->ring); } static void @@ -1444,9 +1590,12 @@ __execlists_update_reg_state(struct intel_context *ce, regs[CTX_RING_TAIL + 1] = ring->tail; /* RPCS */ - if (engine->class == RENDER_CLASS) + if (engine->class == RENDER_CLASS) { regs[CTX_R_PWR_CLK_STATE + 1] = intel_sseu_make_rpcs(engine->i915, &ce->sseu); + + i915_oa_init_reg_state(engine, ce, regs); + } } static int @@ -1456,19 +1605,15 @@ __execlists_context_pin(struct intel_context *ce, void *vaddr; int ret; - GEM_BUG_ON(!ce->gem_context->vm); - ret = execlists_context_deferred_alloc(ce, engine); if (ret) goto err; GEM_BUG_ON(!ce->state); - ret = intel_context_active_acquire(ce, - engine->i915->ggtt.pin_bias | - PIN_OFFSET_BIAS | - PIN_HIGH); + ret = intel_context_active_acquire(ce); if (ret) goto err; + GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); vaddr = i915_gem_object_pin_map(ce->state->obj, i915_coherent_map_type(engine->i915) | @@ -1478,13 +1623,9 @@ __execlists_context_pin(struct intel_context *ce, goto unpin_active; } - ret = intel_ring_pin(ce->ring); - if (ret) - goto unpin_map; - ret = i915_gem_context_pin_hw_id(ce->gem_context); if (ret) - goto unpin_ring; + goto unpin_map; ce->lrc_desc = lrc_descriptor(ce, engine); ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; @@ -1492,8 +1633,6 @@ __execlists_context_pin(struct intel_context *ce, return 0; -unpin_ring: - intel_ring_unpin(ce->ring); unpin_map: i915_gem_object_unpin_map(ce->state->obj); unpin_active: @@ -1575,8 +1714,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) static int emit_pdps(struct i915_request *rq) { const struct intel_engine_cs * const engine = rq->engine; - struct i915_ppgtt * const ppgtt = - i915_vm_to_ppgtt(rq->gem_context->vm); + struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->hw_context->vm); int err, i; u32 *cs; @@ -1649,7 +1787,7 @@ static int execlists_request_alloc(struct i915_request *request) */ /* Unconditionally invalidate GPU caches and TLBs. */ - if (i915_vm_is_4lvl(request->gem_context->vm)) + if (i915_vm_is_4lvl(request->hw_context->vm)) ret = request->engine->emit_flush(request, EMIT_INVALIDATE); else ret = emit_pdps(request); @@ -1682,7 +1820,8 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) /* NB no one else is allowed to scribble over scratch + 256! */ *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); - *batch++ = i915_scratch_offset(engine->i915) + 256; + *batch++ = intel_gt_scratch_offset(engine->gt, + INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA); *batch++ = 0; *batch++ = MI_LOAD_REGISTER_IMM(1); @@ -1696,12 +1835,19 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); - *batch++ = i915_scratch_offset(engine->i915) + 256; + *batch++ = intel_gt_scratch_offset(engine->gt, + INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA); *batch++ = 0; return batch; } +static u32 slm_offset(struct intel_engine_cs *engine) +{ + return intel_gt_scratch_offset(engine->gt, + INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA); +} + /* * Typically we only have one indirect_ctx and per_ctx batch buffer which are * initialized at the beginning and shared across all contexts but this field @@ -1733,8 +1879,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL | PIPE_CONTROL_QW_WRITE, - i915_scratch_offset(engine->i915) + - 2 * CACHELINE_BYTES); + slm_offset(engine)); *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; @@ -1880,7 +2025,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) if (IS_ERR(obj)) return PTR_ERR(obj); - vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err; @@ -1976,22 +2121,23 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) static void enable_execlists(struct intel_engine_cs *engine) { + u32 mode; + + assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); + intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ if (INTEL_GEN(engine->i915) >= 11) - ENGINE_WRITE(engine, - RING_MODE_GEN7, - _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE)); + mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE); else - ENGINE_WRITE(engine, - RING_MODE_GEN7, - _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); + mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE); + ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode); - ENGINE_WRITE(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); + ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); - ENGINE_WRITE(engine, - RING_HWS_PGA, - i915_ggtt_offset(engine->status_page.vma)); + ENGINE_WRITE_FW(engine, + RING_HWS_PGA, + i915_ggtt_offset(engine->status_page.vma)); ENGINE_POSTING_READ(engine, RING_HWS_PGA); } @@ -1999,7 +2145,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine) { bool unexpected = false; - if (ENGINE_READ(engine, RING_MI_MODE) & STOP_RING) { + if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) { DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n"); unexpected = true; } @@ -2047,34 +2193,32 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine) __tasklet_disable_sync_once(&execlists->tasklet); GEM_BUG_ON(!reset_in_progress(execlists)); - intel_engine_stop_cs(engine); - /* And flush any current direct submission. */ spin_lock_irqsave(&engine->active.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags); -} - -static bool lrc_regs_ok(const struct i915_request *rq) -{ - const struct intel_ring *ring = rq->ring; - const u32 *regs = rq->hw_context->lrc_reg_state; - - /* Quick spot check for the common signs of context corruption */ - - if (regs[CTX_RING_BUFFER_CONTROL + 1] != - (RING_CTL_SIZE(ring->size) | RING_VALID)) - return false; - if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma)) - return false; - - return true; + /* + * We stop engines, otherwise we might get failed reset and a + * dead gpu (on elk). Also as modern gpu as kbl can suffer + * from system hang if batchbuffer is progressing when + * the reset is issued, regardless of READY_TO_RESET ack. + * Thus assume it is best to stop engines on all gens + * where we have a gpu reset. + * + * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) + * + * FIXME: Wa for more modern gens needs to be validated + */ + intel_engine_stop_cs(engine); } -static void reset_csb_pointers(struct intel_engine_execlists *execlists) +static void reset_csb_pointers(struct intel_engine_cs *engine) { + struct intel_engine_execlists * const execlists = &engine->execlists; const unsigned int reset_value = execlists->csb_size - 1; + ring_set_paused(engine, 0); + /* * After a reset, the HW starts writing into CSB entry [0]. We * therefore have to set our HEAD pointer back one entry so that @@ -2121,18 +2265,21 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) process_csb(engine); /* drain preemption events */ /* Following the reset, we need to reload the CSB read/write pointers */ - reset_csb_pointers(&engine->execlists); + reset_csb_pointers(engine); /* * Save the currently executing context, even if we completed * its request, it was still running at the time of the * reset and will have been clobbered. */ - if (!port_isset(execlists->port)) - goto out_clear; + rq = execlists_active(execlists); + if (!rq) + goto unwind; - rq = port_request(execlists->port); ce = rq->hw_context; + GEM_BUG_ON(i915_active_is_idle(&ce->active)); + GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); + rq = active_request(rq); /* * Catch up with any missed context-switch interrupts. @@ -2145,9 +2292,12 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) */ execlists_cancel_port_requests(execlists); - rq = active_request(rq); - if (!rq) + if (!rq) { + ce->ring->head = ce->ring->tail; goto out_replay; + } + + ce->ring->head = intel_ring_wrap(ce->ring, rq->head); /* * If this request hasn't started yet, e.g. it is waiting on a @@ -2161,7 +2311,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) * Otherwise, if we have not started yet, the request should replay * perfectly and we do not need to flag the result as being erroneous. */ - if (!i915_request_started(rq) && lrc_regs_ok(rq)) + if (!i915_request_started(rq)) goto out_replay; /* @@ -2175,8 +2325,8 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) * and have to at least restore the RING register in the context * image back to the expected values to skip over the guilty request. */ - i915_reset_request(rq, stalled); - if (!stalled && lrc_regs_ok(rq)) + __i915_request_reset(rq, stalled); + if (!stalled) goto out_replay; /* @@ -2196,17 +2346,14 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) execlists_init_reg_state(regs, ce, engine, ce->ring); out_replay: - /* Rerun the request; its payload has been neutered (if guilty). */ - ce->ring->head = - rq ? intel_ring_wrap(ce->ring, rq->head) : ce->ring->tail; + GEM_TRACE("%s replay {head:%04x, tail:%04x\n", + engine->name, ce->ring->head, ce->ring->tail); intel_ring_update_space(ce->ring); __execlists_update_reg_state(ce, engine); +unwind: /* Push back any incomplete requests for replay after the reset. */ __unwind_incomplete_requests(engine); - -out_clear: - execlists_clear_all_active(execlists); } static void execlists_reset(struct intel_engine_cs *engine, bool stalled) @@ -2302,7 +2449,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) execlists->queue_priority_hint = INT_MIN; execlists->queue = RB_ROOT_CACHED; - GEM_BUG_ON(port_isset(execlists->port)); GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); execlists->tasklet.func = nop_submission_tasklet; @@ -2440,7 +2586,8 @@ static int gen8_emit_flush_render(struct i915_request *request, { struct intel_engine_cs *engine = request->engine; u32 scratch_addr = - i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES; + intel_gt_scratch_offset(engine->gt, + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); bool vf_flush_wa = false, dc_flush_wa = false; u32 *cs, flags = 0; int len; @@ -2520,15 +2667,30 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) return cs; } +static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs) +{ + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = intel_hws_preempt_address(request->engine); + *cs++ = 0; + + return cs; +} + static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) { cs = gen8_emit_ggtt_write(cs, request->fence.seqno, request->timeline->hwsp_offset, 0); - *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + if (intel_engine_has_semaphores(request->engine)) + cs = emit_preempt_busywait(request, cs); request->tail = intel_ring_offset(request, cs); assert_ring_tail_valid(request->ring, request->tail); @@ -2549,9 +2711,11 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) PIPE_CONTROL_FLUSH_ENABLE | PIPE_CONTROL_CS_STALL, 0); - *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + if (intel_engine_has_semaphores(request->engine)) + cs = emit_preempt_busywait(request, cs); request->tail = intel_ring_offset(request, cs); assert_ring_tail_valid(request->ring, request->tail); @@ -2559,27 +2723,9 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) return gen8_emit_wa_tail(request, cs); } -static int gen8_init_rcs_context(struct i915_request *rq) -{ - int ret; - - ret = intel_engine_emit_ctx_wa(rq); - if (ret) - return ret; - - ret = intel_rcs_context_init_mocs(rq); - /* - * Failing to program the MOCS is non-fatal.The system will not - * run at peak performance. So generate an error and carry on. - */ - if (ret) - DRM_ERROR("MOCS failed to program: expect performance issues.\n"); - - return i915_gem_render_state_emit(rq); -} - static void execlists_park(struct intel_engine_cs *engine) { + del_timer_sync(&engine->execlists.timer); intel_engine_park(engine); } @@ -2598,11 +2744,11 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) engine->unpark = NULL; engine->flags |= I915_ENGINE_SUPPORTS_STATS; - if (!intel_vgpu_active(engine->i915)) + if (!intel_vgpu_active(engine->i915)) { engine->flags |= I915_ENGINE_HAS_SEMAPHORES; - if (engine->preempt_context && - HAS_LOGICAL_RING_PREEMPTION(engine->i915)) - engine->flags |= I915_ENGINE_HAS_PREEMPTION; + if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) + engine->flags |= I915_ENGINE_HAS_PREEMPTION; + } } static void execlists_destroy(struct intel_engine_cs *engine) @@ -2678,12 +2824,12 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) tasklet_init(&engine->execlists.tasklet, execlists_submission_tasklet, (unsigned long)engine); + timer_setup(&engine->execlists.timer, execlists_submission_timer, 0); logical_ring_default_vfuncs(engine); logical_ring_default_irqs(engine); if (engine->class == RENDER_CLASS) { - engine->init_context = gen8_init_rcs_context; engine->emit_flush = gen8_emit_flush_render; engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; } @@ -2703,9 +2849,6 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine) if (ret) return ret; - intel_engine_init_workarounds(engine); - intel_engine_init_whitelist(engine); - if (intel_init_workaround_bb(engine)) /* * We continue even if we fail to initialize WA batch @@ -2724,11 +2867,6 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine) i915_mmio_reg_offset(RING_ELSP(base)); } - execlists->preempt_complete_status = ~0u; - if (engine->preempt_context) - execlists->preempt_complete_status = - upper_32_bits(engine->preempt_context->lrc_desc); - execlists->csb_status = &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; @@ -2740,7 +2878,7 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine) else execlists->csb_size = GEN11_CSB_ENTRIES; - reset_csb_pointers(execlists); + reset_csb_pointers(engine); return 0; } @@ -2779,7 +2917,7 @@ static void execlists_init_reg_state(u32 *regs, struct intel_engine_cs *engine, struct intel_ring *ring) { - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->gem_context->vm); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm); bool rcs = engine->class == RENDER_CLASS; u32 base = engine->mmio_base; @@ -2870,8 +3008,6 @@ static void execlists_init_reg_state(u32 *regs, if (rcs) { regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0); - - i915_oa_init_reg_state(engine, ce, regs); } regs[CTX_END] = MI_BATCH_BUFFER_END; @@ -2923,11 +3059,6 @@ populate_lr_context(struct intel_context *ce, if (!engine->default_state) regs[CTX_CONTEXT_CONTROL + 1] |= _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); - if (ce->gem_context == engine->i915->preempt_context && - INTEL_GEN(engine->i915) < 11) - regs[CTX_CONTEXT_CONTROL + 1] |= - _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT); ret = 0; err_unpin_ctx: @@ -2938,12 +3069,13 @@ err_unpin_ctx: return ret; } -static struct i915_timeline *get_timeline(struct i915_gem_context *ctx) +static struct intel_timeline * +get_timeline(struct i915_gem_context *ctx, struct intel_gt *gt) { if (ctx->timeline) - return i915_timeline_get(ctx->timeline); + return intel_timeline_get(ctx->timeline); else - return i915_timeline_create(ctx->i915, NULL); + return intel_timeline_create(gt, NULL); } static int execlists_context_deferred_alloc(struct intel_context *ce, @@ -2953,7 +3085,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce, struct i915_vma *vma; u32 context_size; struct intel_ring *ring; - struct i915_timeline *timeline; + struct intel_timeline *timeline; int ret; if (ce->state) @@ -2971,13 +3103,13 @@ static int execlists_context_deferred_alloc(struct intel_context *ce, if (IS_ERR(ctx_obj)) return PTR_ERR(ctx_obj); - vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL); + vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto error_deref_obj; } - timeline = get_timeline(ce->gem_context); + timeline = get_timeline(ce->gem_context, engine->gt); if (IS_ERR(timeline)) { ret = PTR_ERR(timeline); goto error_deref_obj; @@ -2986,7 +3118,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce, ring = intel_engine_create_ring(engine, timeline, ce->gem_context->ring_size); - i915_timeline_put(timeline); + intel_timeline_put(timeline); if (IS_ERR(ring)) { ret = PTR_ERR(ring); goto error_deref_obj; @@ -3044,6 +3176,7 @@ static void virtual_context_destroy(struct kref *kref) if (ve->context.state) __execlists_context_fini(&ve->context); + intel_context_fini(&ve->context); kfree(ve->bonds); kfree(ve); @@ -3296,11 +3429,11 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, return ERR_PTR(-ENOMEM); ve->base.i915 = ctx->i915; + ve->base.gt = siblings[0]->gt; ve->base.id = -1; ve->base.class = OTHER_CLASS; ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; - ve->base.flags = I915_ENGINE_IS_VIRTUAL; /* * The decision on whether to submit a request using semaphores @@ -3397,8 +3530,12 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb; ve->base.emit_fini_breadcrumb_dw = sibling->emit_fini_breadcrumb_dw; + + ve->base.flags = sibling->flags; } + ve->base.flags |= I915_ENGINE_IS_VIRTUAL; + return &ve->context; err_put: diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 1f9db50b1869..e082b25d2db1 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -23,6 +23,7 @@ #include "i915_drv.h" #include "intel_engine.h" +#include "intel_gt.h" #include "intel_mocs.h" #include "intel_lrc.h" @@ -247,7 +248,7 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = { /** * get_mocs_settings() - * @dev_priv: i915 device. + * @gt: gt device * @table: Output table that will be made to point at appropriate * MOCS values for the device. * @@ -257,33 +258,34 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = { * * Return: true if there are applicable MOCS settings for the device. */ -static bool get_mocs_settings(struct drm_i915_private *dev_priv, +static bool get_mocs_settings(struct intel_gt *gt, struct drm_i915_mocs_table *table) { + struct drm_i915_private *i915 = gt->i915; bool result = false; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(i915) >= 11) { table->size = ARRAY_SIZE(icelake_mocs_table); table->table = icelake_mocs_table; table->n_entries = GEN11_NUM_MOCS_ENTRIES; result = true; - } else if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { + } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) { table->size = ARRAY_SIZE(skylake_mocs_table); table->n_entries = GEN9_NUM_MOCS_ENTRIES; table->table = skylake_mocs_table; result = true; - } else if (IS_GEN9_LP(dev_priv)) { + } else if (IS_GEN9_LP(i915)) { table->size = ARRAY_SIZE(broxton_mocs_table); table->n_entries = GEN9_NUM_MOCS_ENTRIES; table->table = broxton_mocs_table; result = true; } else { - WARN_ONCE(INTEL_GEN(dev_priv) >= 9, + WARN_ONCE(INTEL_GEN(i915) >= 9, "Platform that should have a MOCS table does not.\n"); } /* WaDisableSkipCaching:skl,bxt,kbl,glk */ - if (IS_GEN(dev_priv, 9)) { + if (IS_GEN(i915, 9)) { int i; for (i = 0; i < table->size; i++) @@ -338,12 +340,16 @@ static u32 get_entry_control(const struct drm_i915_mocs_table *table, */ void intel_mocs_init_engine(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + struct intel_gt *gt = engine->gt; + struct intel_uncore *uncore = gt->uncore; struct drm_i915_mocs_table table; unsigned int index; u32 unused_value; - if (!get_mocs_settings(dev_priv, &table)) + /* Called under a blanket forcewake */ + assert_forcewakes_active(uncore, FORCEWAKE_ALL); + + if (!get_mocs_settings(gt, &table)) return; /* Set unused values to PTE */ @@ -352,12 +358,16 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine) for (index = 0; index < table.size; index++) { u32 value = get_entry_control(&table, index); - I915_WRITE(mocs_register(engine->id, index), value); + intel_uncore_write_fw(uncore, + mocs_register(engine->id, index), + value); } /* All remaining entries are also unused */ for (; index < table.n_entries; index++) - I915_WRITE(mocs_register(engine->id, index), unused_value); + intel_uncore_write_fw(uncore, + mocs_register(engine->id, index), + unused_value); } /** @@ -490,7 +500,7 @@ static int emit_mocs_l3cc_table(struct i915_request *rq, /** * intel_mocs_init_l3cc_table() - program the mocs control table - * @dev_priv: i915 device private + * @gt: the intel_gt container * * This function simply programs the mocs registers for the given table * starting at the given address. This register set is programmed in pairs. @@ -502,13 +512,14 @@ static int emit_mocs_l3cc_table(struct i915_request *rq, * * Return: Nothing. */ -void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv) +void intel_mocs_init_l3cc_table(struct intel_gt *gt) { + struct intel_uncore *uncore = gt->uncore; struct drm_i915_mocs_table table; unsigned int i; u16 unused_value; - if (!get_mocs_settings(dev_priv, &table)) + if (!get_mocs_settings(gt, &table)) return; /* Set unused values to PTE */ @@ -518,23 +529,27 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv) u16 low = get_entry_l3cc(&table, 2 * i); u16 high = get_entry_l3cc(&table, 2 * i + 1); - I915_WRITE(GEN9_LNCFCMOCS(i), - l3cc_combine(&table, low, high)); + intel_uncore_write(uncore, + GEN9_LNCFCMOCS(i), + l3cc_combine(&table, low, high)); } /* Odd table size - 1 left over */ if (table.size & 0x01) { u16 low = get_entry_l3cc(&table, 2 * i); - I915_WRITE(GEN9_LNCFCMOCS(i), - l3cc_combine(&table, low, unused_value)); + intel_uncore_write(uncore, + GEN9_LNCFCMOCS(i), + l3cc_combine(&table, low, unused_value)); i++; } /* All remaining entries are also unused */ for (; i < table.n_entries / 2; i++) - I915_WRITE(GEN9_LNCFCMOCS(i), - l3cc_combine(&table, unused_value, unused_value)); + intel_uncore_write(uncore, + GEN9_LNCFCMOCS(i), + l3cc_combine(&table, unused_value, + unused_value)); } /** @@ -553,12 +568,15 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv) * * Return: 0 on success, otherwise the error status. */ -int intel_rcs_context_init_mocs(struct i915_request *rq) +int intel_mocs_emit(struct i915_request *rq) { struct drm_i915_mocs_table t; int ret; - if (get_mocs_settings(rq->i915, &t)) { + if (rq->engine->class != RENDER_CLASS) + return 0; + + if (get_mocs_settings(rq->engine->gt, &t)) { /* Program the RCS control registers */ ret = emit_mocs_control_table(rq, &t); if (ret) diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h index 0913704a1af2..a334db2d6d6b 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.h +++ b/drivers/gpu/drm/i915/gt/intel_mocs.h @@ -52,9 +52,11 @@ struct drm_i915_private; struct i915_request; struct intel_engine_cs; +struct intel_gt; -int intel_rcs_context_init_mocs(struct i915_request *rq); -void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv); +void intel_mocs_init_l3cc_table(struct intel_gt *gt); void intel_mocs_init_engine(struct intel_engine_cs *engine); +int intel_mocs_emit(struct i915_request *rq); + #endif diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index 4ee032072d4f..be37d4501c67 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -26,10 +26,9 @@ */ #include "i915_drv.h" -#include "i915_gem_render_state.h" #include "intel_renderstate.h" -struct intel_render_state { +struct intel_renderstate { const struct intel_renderstate_rodata *rodata; struct drm_i915_gem_object *obj; struct i915_vma *vma; @@ -42,7 +41,7 @@ struct intel_render_state { static const struct intel_renderstate_rodata * render_state_get_rodata(const struct intel_engine_cs *engine) { - if (engine->id != RCS0) + if (engine->class != RENDER_CLASS) return NULL; switch (INTEL_GEN(engine->i915)) { @@ -75,7 +74,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine) (batch)[(i)++] = (val); \ } while(0) -static int render_state_setup(struct intel_render_state *so, +static int render_state_setup(struct intel_renderstate *so, struct drm_i915_private *i915) { const struct intel_renderstate_rodata *rodata = so->rodata; @@ -177,10 +176,10 @@ err: #undef OUT_BATCH -int i915_gem_render_state_emit(struct i915_request *rq) +int intel_renderstate_emit(struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; - struct intel_render_state so = {}; /* keep the compiler happy */ + struct intel_renderstate so = {}; /* keep the compiler happy */ int err; so.rodata = render_state_get_rodata(engine); @@ -194,7 +193,7 @@ int i915_gem_render_state_emit(struct i915_request *rq) if (IS_ERR(so.obj)) return PTR_ERR(so.obj); - so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL); + so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(so.vma)) { err = PTR_ERR(so.vma); goto err_obj; diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h index 08f6fea05a2c..8d5079145054 100644 --- a/drivers/gpu/drm/i915/intel_renderstate.h +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h @@ -21,11 +21,13 @@ * DEALINGS IN THE SOFTWARE. */ -#ifndef _INTEL_RENDERSTATE_H -#define _INTEL_RENDERSTATE_H +#ifndef _INTEL_RENDERSTATE_H_ +#define _INTEL_RENDERSTATE_H_ #include <linux/types.h> +struct i915_request; + struct intel_renderstate_rodata { const u32 *reloc; const u32 *batch; @@ -44,4 +46,6 @@ extern const struct intel_renderstate_rodata gen7_null_state; extern const struct intel_renderstate_rodata gen8_null_state; extern const struct intel_renderstate_rodata gen9_null_state; -#endif /* INTEL_RENDERSTATE_H */ +int intel_renderstate_emit(struct i915_request *rq); + +#endif /* _INTEL_RENDERSTATE_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 4c478b38e420..98c071fe532b 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -15,26 +15,17 @@ #include "i915_gpu_error.h" #include "i915_irq.h" #include "intel_engine_pm.h" +#include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_reset.h" -#include "intel_guc.h" +#include "uc/intel_guc.h" #define RESET_MAX_RETRIES 3 /* XXX How to handle concurrent GGTT updates using tiling registers? */ #define RESET_UNDER_STOP_MACHINE 0 -static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set) -{ - intel_uncore_rmw(uncore, reg, 0, set); -} - -static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr) -{ - intel_uncore_rmw(uncore, reg, clr, 0); -} - static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set) { intel_uncore_rmw_fw(uncore, reg, 0, set); @@ -123,7 +114,7 @@ static void context_mark_innocent(struct i915_gem_context *ctx) atomic_inc(&ctx->active_count); } -void i915_reset_request(struct i915_request *rq, bool guilty) +void __i915_request_reset(struct i915_request *rq, bool guilty) { GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n", rq->engine->name, @@ -144,48 +135,6 @@ void i915_reset_request(struct i915_request *rq, bool guilty) } } -static void gen3_stop_engine(struct intel_engine_cs *engine) -{ - struct intel_uncore *uncore = engine->uncore; - const u32 base = engine->mmio_base; - - GEM_TRACE("%s\n", engine->name); - - if (intel_engine_stop_cs(engine)) - GEM_TRACE("%s: timed out on STOP_RING\n", engine->name); - - intel_uncore_write_fw(uncore, - RING_HEAD(base), - intel_uncore_read_fw(uncore, RING_TAIL(base))); - intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */ - - intel_uncore_write_fw(uncore, RING_HEAD(base), 0); - intel_uncore_write_fw(uncore, RING_TAIL(base), 0); - intel_uncore_posting_read_fw(uncore, RING_TAIL(base)); - - /* The ring must be empty before it is disabled */ - intel_uncore_write_fw(uncore, RING_CTL(base), 0); - - /* Check acts as a post */ - if (intel_uncore_read_fw(uncore, RING_HEAD(base))) - GEM_TRACE("%s: ring head [%x] not parked\n", - engine->name, - intel_uncore_read_fw(uncore, RING_HEAD(base))); -} - -static void i915_stop_engines(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask) -{ - struct intel_engine_cs *engine; - intel_engine_mask_t tmp; - - if (INTEL_GEN(i915) < 3) - return; - - for_each_engine_masked(engine, i915, engine_mask, tmp) - gen3_stop_engine(engine); -} - static bool i915_in_reset(struct pci_dev *pdev) { u8 gdrst; @@ -194,11 +143,11 @@ static bool i915_in_reset(struct pci_dev *pdev) return gdrst & GRDOM_RESET_STATUS; } -static int i915_do_reset(struct drm_i915_private *i915, +static int i915_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = gt->i915->drm.pdev; int err; /* Assert reset for at least 20 usec, and wait for acknowledgement. */ @@ -223,22 +172,22 @@ static bool g4x_reset_complete(struct pci_dev *pdev) return (gdrst & GRDOM_RESET_ENABLE) == 0; } -static int g33_do_reset(struct drm_i915_private *i915, +static int g33_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = gt->i915->drm.pdev; pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); return wait_for_atomic(g4x_reset_complete(pdev), 50); } -static int g4x_do_reset(struct drm_i915_private *i915, +static int g4x_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - struct pci_dev *pdev = i915->drm.pdev; - struct intel_uncore *uncore = &i915->uncore; + struct pci_dev *pdev = gt->i915->drm.pdev; + struct intel_uncore *uncore = gt->uncore; int ret; /* WaVcpClkGateDisableForMediaReset:ctg,elk */ @@ -270,11 +219,11 @@ out: return ret; } -static int ironlake_do_reset(struct drm_i915_private *i915, +static int ironlake_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - struct intel_uncore *uncore = &i915->uncore; + struct intel_uncore *uncore = gt->uncore; int ret; intel_uncore_write_fw(uncore, ILK_GDSR, @@ -306,10 +255,9 @@ out: } /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ -static int gen6_hw_domain_reset(struct drm_i915_private *i915, - u32 hw_domain_mask) +static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) { - struct intel_uncore *uncore = &i915->uncore; + struct intel_uncore *uncore = gt->uncore; int err; /* @@ -331,7 +279,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *i915, return err; } -static int gen6_reset_engines(struct drm_i915_private *i915, +static int gen6_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { @@ -351,13 +299,13 @@ static int gen6_reset_engines(struct drm_i915_private *i915, intel_engine_mask_t tmp; hw_mask = 0; - for_each_engine_masked(engine, i915, engine_mask, tmp) { + for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); hw_mask |= hw_engine_mask[engine->id]; } } - return gen6_hw_domain_reset(i915, hw_mask); + return gen6_hw_domain_reset(gt, hw_mask); } static u32 gen11_lock_sfc(struct intel_engine_cs *engine) @@ -455,7 +403,7 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine) rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit); } -static int gen11_reset_engines(struct drm_i915_private *i915, +static int gen11_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { @@ -478,17 +426,17 @@ static int gen11_reset_engines(struct drm_i915_private *i915, hw_mask = GEN11_GRDOM_FULL; } else { hw_mask = 0; - for_each_engine_masked(engine, i915, engine_mask, tmp) { + for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); hw_mask |= hw_engine_mask[engine->id]; hw_mask |= gen11_lock_sfc(engine); } } - ret = gen6_hw_domain_reset(i915, hw_mask); + ret = gen6_hw_domain_reset(gt, hw_mask); if (engine_mask != ALL_ENGINES) - for_each_engine_masked(engine, i915, engine_mask, tmp) + for_each_engine_masked(engine, gt->i915, engine_mask, tmp) gen11_unlock_sfc(engine); return ret; @@ -538,7 +486,7 @@ static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); } -static int gen8_reset_engines(struct drm_i915_private *i915, +static int gen8_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { @@ -547,7 +495,7 @@ static int gen8_reset_engines(struct drm_i915_private *i915, intel_engine_mask_t tmp; int ret; - for_each_engine_masked(engine, i915, engine_mask, tmp) { + for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { ret = gen8_engine_reset_prepare(engine); if (ret && !reset_non_ready) goto skip_reset; @@ -563,23 +511,23 @@ static int gen8_reset_engines(struct drm_i915_private *i915, * We rather take context corruption instead of * failed reset with a wedged driver/gpu. And * active bb execution case should be covered by - * i915_stop_engines we have before the reset. + * stop_engines() we have before the reset. */ } - if (INTEL_GEN(i915) >= 11) - ret = gen11_reset_engines(i915, engine_mask, retry); + if (INTEL_GEN(gt->i915) >= 11) + ret = gen11_reset_engines(gt, engine_mask, retry); else - ret = gen6_reset_engines(i915, engine_mask, retry); + ret = gen6_reset_engines(gt, engine_mask, retry); skip_reset: - for_each_engine_masked(engine, i915, engine_mask, tmp) + for_each_engine_masked(engine, gt->i915, engine_mask, tmp) gen8_engine_reset_cancel(engine); return ret; } -typedef int (*reset_func)(struct drm_i915_private *, +typedef int (*reset_func)(struct intel_gt *, intel_engine_mask_t engine_mask, unsigned int retry); @@ -601,15 +549,14 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *i915) return NULL; } -int intel_gpu_reset(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask) +int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) { const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1; reset_func reset; int ret = -ETIMEDOUT; int retry; - reset = intel_get_gpu_reset(i915); + reset = intel_get_gpu_reset(gt->i915); if (!reset) return -ENODEV; @@ -617,31 +564,14 @@ int intel_gpu_reset(struct drm_i915_private *i915, * If the power well sleeps during the reset, the reset * request may be dropped and never completes (causing -EIO). */ - intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { - /* - * We stop engines, otherwise we might get failed reset and a - * dead gpu (on elk). Also as modern gpu as kbl can suffer - * from system hang if batchbuffer is progressing when - * the reset is issued, regardless of READY_TO_RESET ack. - * Thus assume it is best to stop engines on all gens - * where we have a gpu reset. - * - * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) - * - * WaMediaResetMainRingCleanup:ctg,elk (presumably) - * - * FIXME: Wa for more modern gens needs to be validated - */ - if (retry) - i915_stop_engines(i915, engine_mask); - GEM_TRACE("engine_mask=%x\n", engine_mask); preempt_disable(); - ret = reset(i915, engine_mask, retry); + ret = reset(gt, engine_mask, retry); preempt_enable(); } - intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); return ret; } @@ -659,17 +589,17 @@ bool intel_has_reset_engine(struct drm_i915_private *i915) return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2; } -int intel_reset_guc(struct drm_i915_private *i915) +int intel_reset_guc(struct intel_gt *gt) { u32 guc_domain = - INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; + INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; int ret; - GEM_BUG_ON(!HAS_GUC(i915)); + GEM_BUG_ON(!HAS_GT_UC(gt->i915)); - intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); - ret = gen6_hw_domain_reset(i915, guc_domain); - intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + ret = gen6_hw_domain_reset(gt, guc_domain); + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); return ret; } @@ -687,56 +617,59 @@ static void reset_prepare_engine(struct intel_engine_cs *engine) * written to the powercontext is undefined and so we may lose * GPU state upon resume, i.e. fail to restart after a reset. */ - intel_engine_pm_get(engine); intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); engine->reset.prepare(engine); } -static void revoke_mmaps(struct drm_i915_private *i915) +static void revoke_mmaps(struct intel_gt *gt) { int i; - for (i = 0; i < i915->ggtt.num_fences; i++) { + for (i = 0; i < gt->ggtt->num_fences; i++) { struct drm_vma_offset_node *node; struct i915_vma *vma; u64 vma_offset; - vma = READ_ONCE(i915->ggtt.fence_regs[i].vma); + vma = READ_ONCE(gt->ggtt->fence_regs[i].vma); if (!vma) continue; if (!i915_vma_has_userfault(vma)) continue; - GEM_BUG_ON(vma->fence != &i915->ggtt.fence_regs[i]); + GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]); node = &vma->obj->base.vma_node; vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; - unmap_mapping_range(i915->drm.anon_inode->i_mapping, + unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping, drm_vma_node_offset_addr(node) + vma_offset, vma->size, 1); } } -static void reset_prepare(struct drm_i915_private *i915) +static intel_engine_mask_t reset_prepare(struct intel_gt *gt) { struct intel_engine_cs *engine; + intel_engine_mask_t awake = 0; enum intel_engine_id id; - intel_gt_pm_get(i915); - for_each_engine(engine, i915, id) + for_each_engine(engine, gt->i915, id) { + if (intel_engine_pm_get_if_awake(engine)) + awake |= engine->mask; reset_prepare_engine(engine); + } + + intel_uc_reset_prepare(>->uc); - intel_uc_reset_prepare(i915); + return awake; } -static void gt_revoke(struct drm_i915_private *i915) +static void gt_revoke(struct intel_gt *gt) { - revoke_mmaps(i915); + revoke_mmaps(gt); } -static int gt_reset(struct drm_i915_private *i915, - intel_engine_mask_t stalled_mask) +static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -746,14 +679,14 @@ static int gt_reset(struct drm_i915_private *i915, * Everything depends on having the GTT running, so we need to start * there. */ - err = i915_ggtt_enable_hw(i915); + err = i915_ggtt_enable_hw(gt->i915); if (err) return err; - for_each_engine(engine, i915, id) - intel_engine_reset(engine, stalled_mask & engine->mask); + for_each_engine(engine, gt->i915, id) + __intel_engine_reset(engine, stalled_mask & engine->mask); - i915_gem_restore_fences(i915); + i915_gem_restore_fences(gt->i915); return err; } @@ -761,20 +694,21 @@ static int gt_reset(struct drm_i915_private *i915, static void reset_finish_engine(struct intel_engine_cs *engine) { engine->reset.finish(engine); - intel_engine_pm_put(engine); intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); + + intel_engine_signal_breadcrumbs(engine); } -static void reset_finish(struct drm_i915_private *i915) +static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { reset_finish_engine(engine); - intel_engine_signal_breadcrumbs(engine); + if (awake & engine->mask) + intel_engine_pm_put(engine); } - intel_gt_pm_put(i915); } static void nop_submit_request(struct i915_request *request) @@ -794,19 +728,19 @@ static void nop_submit_request(struct i915_request *request) intel_engine_queue_breadcrumbs(engine); } -static void __i915_gem_set_wedged(struct drm_i915_private *i915) +static void __intel_gt_set_wedged(struct intel_gt *gt) { - struct i915_gpu_error *error = &i915->gpu_error; struct intel_engine_cs *engine; + intel_engine_mask_t awake; enum intel_engine_id id; - if (test_bit(I915_WEDGED, &error->flags)) + if (test_bit(I915_WEDGED, >->reset.flags)) return; - if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) { + if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) { struct drm_printer p = drm_debug_printer(__func__); - for_each_engine(engine, i915, id) + for_each_engine(engine, gt->i915, id) intel_engine_dump(engine, &p, "%s\n", engine->name); } @@ -817,17 +751,17 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) * rolling the global seqno forward (since this would complete requests * for which we haven't set the fence error to EIO yet). */ - reset_prepare(i915); + awake = reset_prepare(gt); /* Even if the GPU reset fails, it should still stop the engines */ - if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) - intel_gpu_reset(i915, ALL_ENGINES); + if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) + __intel_gt_reset(gt, ALL_ENGINES); - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { engine->submit_request = nop_submit_request; engine->schedule = NULL; } - i915->caps.scheduler = 0; + gt->i915->caps.scheduler = 0; /* * Make sure no request can slip through without getting completed by @@ -835,37 +769,36 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) * in nop_submit_request. */ synchronize_rcu_expedited(); - set_bit(I915_WEDGED, &error->flags); + set_bit(I915_WEDGED, >->reset.flags); /* Mark all executing requests as skipped */ - for_each_engine(engine, i915, id) + for_each_engine(engine, gt->i915, id) engine->cancel_requests(engine); - reset_finish(i915); + reset_finish(gt, awake); GEM_TRACE("end\n"); } -void i915_gem_set_wedged(struct drm_i915_private *i915) +void intel_gt_set_wedged(struct intel_gt *gt) { - struct i915_gpu_error *error = &i915->gpu_error; intel_wakeref_t wakeref; - mutex_lock(&error->wedge_mutex); - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - __i915_gem_set_wedged(i915); - mutex_unlock(&error->wedge_mutex); + mutex_lock(>->reset.mutex); + with_intel_runtime_pm(>->i915->runtime_pm, wakeref) + __intel_gt_set_wedged(gt); + mutex_unlock(>->reset.mutex); } -static bool __i915_gem_unset_wedged(struct drm_i915_private *i915) +static bool __intel_gt_unset_wedged(struct intel_gt *gt) { - struct i915_gpu_error *error = &i915->gpu_error; - struct i915_timeline *tl; + struct intel_gt_timelines *timelines = >->timelines; + struct intel_timeline *tl; - if (!test_bit(I915_WEDGED, &error->flags)) + if (!test_bit(I915_WEDGED, >->reset.flags)) return true; - if (!i915->gt.scratch) /* Never full initialised, recovery impossible */ + if (!gt->scratch) /* Never full initialised, recovery impossible */ return false; GEM_TRACE("start\n"); @@ -880,8 +813,8 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915) * * No more can be submitted until we reset the wedged bit. */ - mutex_lock(&i915->gt.timelines.mutex); - list_for_each_entry(tl, &i915->gt.timelines.active_list, link) { + mutex_lock(&timelines->mutex); + list_for_each_entry(tl, &timelines->active_list, link) { struct i915_request *rq; rq = i915_active_request_get_unlocked(&tl->last_request); @@ -898,9 +831,9 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915) dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT); i915_request_put(rq); } - mutex_unlock(&i915->gt.timelines.mutex); + mutex_unlock(&timelines->mutex); - intel_gt_sanitize(i915, false); + intel_gt_sanitize(gt, false); /* * Undo nop_submit_request. We prevent all new i915 requests from @@ -911,49 +844,62 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915) * the nop_submit_request on reset, we can do this from normal * context and do not require stop_machine(). */ - intel_engines_reset_default_submission(i915); + intel_engines_reset_default_submission(gt); GEM_TRACE("end\n"); smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ - clear_bit(I915_WEDGED, &i915->gpu_error.flags); + clear_bit(I915_WEDGED, >->reset.flags); return true; } -bool i915_gem_unset_wedged(struct drm_i915_private *i915) +bool intel_gt_unset_wedged(struct intel_gt *gt) { - struct i915_gpu_error *error = &i915->gpu_error; bool result; - mutex_lock(&error->wedge_mutex); - result = __i915_gem_unset_wedged(i915); - mutex_unlock(&error->wedge_mutex); + mutex_lock(>->reset.mutex); + result = __intel_gt_unset_wedged(gt); + mutex_unlock(>->reset.mutex); return result; } -static int do_reset(struct drm_i915_private *i915, - intel_engine_mask_t stalled_mask) +static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) { int err, i; - gt_revoke(i915); + gt_revoke(gt); - err = intel_gpu_reset(i915, ALL_ENGINES); + err = __intel_gt_reset(gt, ALL_ENGINES); for (i = 0; err && i < RESET_MAX_RETRIES; i++) { msleep(10 * (i + 1)); - err = intel_gpu_reset(i915, ALL_ENGINES); + err = __intel_gt_reset(gt, ALL_ENGINES); } if (err) return err; - return gt_reset(i915, stalled_mask); + return gt_reset(gt, stalled_mask); +} + +static int resume(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int ret; + + for_each_engine(engine, gt->i915, id) { + ret = engine->resume(engine); + if (ret) + return ret; + } + + return 0; } /** - * i915_reset - reset chip after a hang - * @i915: #drm_i915_private to reset + * intel_gt_reset - reset chip after a hang + * @gt: #intel_gt to reset * @stalled_mask: mask of the stalled engines with the guilty requests * @reason: user error message for why we are resetting * @@ -968,49 +914,50 @@ static int do_reset(struct drm_i915_private *i915, * - re-init interrupt state * - re-init display */ -void i915_reset(struct drm_i915_private *i915, - intel_engine_mask_t stalled_mask, - const char *reason) +void intel_gt_reset(struct intel_gt *gt, + intel_engine_mask_t stalled_mask, + const char *reason) { - struct i915_gpu_error *error = &i915->gpu_error; + intel_engine_mask_t awake; int ret; - GEM_TRACE("flags=%lx\n", error->flags); + GEM_TRACE("flags=%lx\n", gt->reset.flags); might_sleep(); - GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); - mutex_lock(&error->wedge_mutex); + GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags)); + mutex_lock(>->reset.mutex); /* Clear any previous failed attempts at recovery. Time to try again. */ - if (!__i915_gem_unset_wedged(i915)) + if (!__intel_gt_unset_wedged(gt)) goto unlock; if (reason) - dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); - error->reset_count++; + dev_notice(gt->i915->drm.dev, + "Resetting chip for %s\n", reason); + atomic_inc(>->i915->gpu_error.reset_count); - reset_prepare(i915); + awake = reset_prepare(gt); - if (!intel_has_gpu_reset(i915)) { + if (!intel_has_gpu_reset(gt->i915)) { if (i915_modparams.reset) - dev_err(i915->drm.dev, "GPU reset not supported\n"); + dev_err(gt->i915->drm.dev, "GPU reset not supported\n"); else DRM_DEBUG_DRIVER("GPU reset disabled\n"); goto error; } - if (INTEL_INFO(i915)->gpu_reset_clobbers_display) - intel_runtime_pm_disable_interrupts(i915); + if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) + intel_runtime_pm_disable_interrupts(gt->i915); - if (do_reset(i915, stalled_mask)) { - dev_err(i915->drm.dev, "Failed to reset chip\n"); + if (do_reset(gt, stalled_mask)) { + dev_err(gt->i915->drm.dev, "Failed to reset chip\n"); goto taint; } - if (INTEL_INFO(i915)->gpu_reset_clobbers_display) - intel_runtime_pm_enable_interrupts(i915); + if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) + intel_runtime_pm_enable_interrupts(gt->i915); - intel_overlay_reset(i915); + intel_overlay_reset(gt->i915); /* * Next we need to restore the context, but we don't use those @@ -1020,19 +967,23 @@ void i915_reset(struct drm_i915_private *i915, * was running at the time of the reset (i.e. we weren't VT * switched away). */ - ret = i915_gem_init_hw(i915); + ret = i915_gem_init_hw(gt->i915); if (ret) { DRM_ERROR("Failed to initialise HW following reset (%d)\n", ret); - goto error; + goto taint; } - i915_queue_hangcheck(i915); + ret = resume(gt); + if (ret) + goto taint; + + intel_gt_queue_hangcheck(gt); finish: - reset_finish(i915); + reset_finish(gt, awake); unlock: - mutex_unlock(&error->wedge_mutex); + mutex_unlock(>->reset.mutex); return; taint: @@ -1050,18 +1001,17 @@ taint: */ add_taint_for_CI(TAINT_WARN); error: - __i915_gem_set_wedged(i915); + __intel_gt_set_wedged(gt); goto finish; } -static inline int intel_gt_reset_engine(struct drm_i915_private *i915, - struct intel_engine_cs *engine) +static inline int intel_gt_reset_engine(struct intel_engine_cs *engine) { - return intel_gpu_reset(i915, engine->mask); + return __intel_gt_reset(engine->gt, engine->mask); } /** - * i915_reset_engine - reset GPU engine to recover from a hang + * intel_engine_reset - reset GPU engine to recover from a hang * @engine: engine to reset * @msg: reason for GPU reset; or NULL for no dev_notice() * @@ -1073,15 +1023,15 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *i915, * - reset engine (which will force the engine to idle) * - re-init/configure engine */ -int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) +int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) { - struct i915_gpu_error *error = &engine->i915->gpu_error; + struct intel_gt *gt = engine->gt; int ret; - GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); - GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); + GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags); + GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags)); - if (!intel_wakeref_active(&engine->wakeref)) + if (!intel_engine_pm_get_if_awake(engine)) return 0; reset_prepare_engine(engine); @@ -1089,16 +1039,16 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) if (msg) dev_notice(engine->i915->drm.dev, "Resetting %s for %s\n", engine->name, msg); - error->reset_engine_count[engine->id]++; + atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]); - if (!engine->i915->guc.execbuf_client) - ret = intel_gt_reset_engine(engine->i915, engine); + if (!engine->gt->uc.guc.execbuf_client) + ret = intel_gt_reset_engine(engine); else - ret = intel_guc_reset_engine(&engine->i915->guc, engine); + ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine); if (ret) { /* If we fail here, we expect to fallback to a global reset */ DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n", - engine->i915->guc.execbuf_client ? "GuC " : "", + engine->gt->uc.guc.execbuf_client ? "GuC " : "", engine->name, ret); goto out; } @@ -1108,7 +1058,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) * active request and can drop it, adjust head to skip the offending * request to resume executing remaining requests in the queue. */ - intel_engine_reset(engine, true); + __intel_engine_reset(engine, true); /* * The engine and its registers (and workarounds in case of render) @@ -1116,25 +1066,23 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) * process to program RING_MODE, HWSP and re-enable submission. */ ret = engine->resume(engine); - if (ret) - goto out; out: intel_engine_cancel_stop_cs(engine); reset_finish_engine(engine); + intel_engine_pm_put(engine); return ret; } -static void i915_reset_device(struct drm_i915_private *i915, - u32 engine_mask, - const char *reason) +static void intel_gt_reset_global(struct intel_gt *gt, + u32 engine_mask, + const char *reason) { - struct i915_gpu_error *error = &i915->gpu_error; - struct kobject *kobj = &i915->drm.primary->kdev->kobj; + struct kobject *kobj = >->i915->drm.primary->kdev->kobj; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; - struct i915_wedge_me w; + struct intel_wedge_me w; kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); @@ -1142,137 +1090,24 @@ static void i915_reset_device(struct drm_i915_private *i915, kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); /* Use a watchdog to ensure that our reset completes */ - i915_wedge_on_timeout(&w, i915, 5 * HZ) { - intel_prepare_reset(i915); + intel_wedge_on_timeout(&w, gt, 5 * HZ) { + intel_prepare_reset(gt->i915); /* Flush everyone using a resource about to be clobbered */ - synchronize_srcu_expedited(&error->reset_backoff_srcu); + synchronize_srcu_expedited(>->reset.backoff_srcu); - i915_reset(i915, engine_mask, reason); + intel_gt_reset(gt, engine_mask, reason); - intel_finish_reset(i915); + intel_finish_reset(gt->i915); } - if (!test_bit(I915_WEDGED, &error->flags)) + if (!test_bit(I915_WEDGED, >->reset.flags)) kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); } -static void clear_register(struct intel_uncore *uncore, i915_reg_t reg) -{ - intel_uncore_rmw(uncore, reg, 0, 0); -} - -static void gen8_clear_engine_error_register(struct intel_engine_cs *engine) -{ - GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0); - GEN6_RING_FAULT_REG_POSTING_READ(engine); -} - -static void clear_error_registers(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask) -{ - struct intel_uncore *uncore = &i915->uncore; - u32 eir; - - if (!IS_GEN(i915, 2)) - clear_register(uncore, PGTBL_ER); - - if (INTEL_GEN(i915) < 4) - clear_register(uncore, IPEIR(RENDER_RING_BASE)); - else - clear_register(uncore, IPEIR_I965); - - clear_register(uncore, EIR); - eir = intel_uncore_read(uncore, EIR); - if (eir) { - /* - * some errors might have become stuck, - * mask them. - */ - DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); - rmw_set(uncore, EMR, eir); - intel_uncore_write(uncore, GEN2_IIR, - I915_MASTER_ERROR_INTERRUPT); - } - - if (INTEL_GEN(i915) >= 8) { - rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID); - intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG); - } else if (INTEL_GEN(i915) >= 6) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine_masked(engine, i915, engine_mask, id) - gen8_clear_engine_error_register(engine); - } -} - -static void gen6_check_faults(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 fault; - - for_each_engine(engine, dev_priv, id) { - fault = GEN6_RING_FAULT_REG_READ(engine); - if (fault & RING_FAULT_VALID) { - DRM_DEBUG_DRIVER("Unexpected fault\n" - "\tAddr: 0x%08lx\n" - "\tAddress space: %s\n" - "\tSource ID: %d\n" - "\tType: %d\n", - fault & PAGE_MASK, - fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", - RING_FAULT_SRCID(fault), - RING_FAULT_FAULT_TYPE(fault)); - } - } -} - -static void gen8_check_faults(struct drm_i915_private *dev_priv) -{ - u32 fault = I915_READ(GEN8_RING_FAULT_REG); - - if (fault & RING_FAULT_VALID) { - u32 fault_data0, fault_data1; - u64 fault_addr; - - fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0); - fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1); - fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | - ((u64)fault_data0 << 12); - - DRM_DEBUG_DRIVER("Unexpected fault\n" - "\tAddr: 0x%08x_%08x\n" - "\tAddress space: %s\n" - "\tEngine ID: %d\n" - "\tSource ID: %d\n" - "\tType: %d\n", - upper_32_bits(fault_addr), - lower_32_bits(fault_addr), - fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", - GEN8_RING_FAULT_ENGINE_ID(fault), - RING_FAULT_SRCID(fault), - RING_FAULT_FAULT_TYPE(fault)); - } -} - -void i915_check_and_clear_faults(struct drm_i915_private *i915) -{ - /* From GEN8 onwards we only have one 'All Engine Fault Register' */ - if (INTEL_GEN(i915) >= 8) - gen8_check_faults(i915); - else if (INTEL_GEN(i915) >= 6) - gen6_check_faults(i915); - else - return; - - clear_error_registers(i915, ALL_ENGINES); -} - /** - * i915_handle_error - handle a gpu error - * @i915: i915 device private + * intel_gt_handle_error - handle a gpu error + * @gt: the intel_gt * @engine_mask: mask representing engines that are hung * @flags: control flags * @fmt: Error message format string @@ -1283,12 +1118,11 @@ void i915_check_and_clear_faults(struct drm_i915_private *i915) * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). */ -void i915_handle_error(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask, - unsigned long flags, - const char *fmt, ...) +void intel_gt_handle_error(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned long flags, + const char *fmt, ...) { - struct i915_gpu_error *error = &i915->gpu_error; struct intel_engine_cs *engine; intel_wakeref_t wakeref; intel_engine_mask_t tmp; @@ -1312,33 +1146,31 @@ void i915_handle_error(struct drm_i915_private *i915, * isn't the case at least when we get here by doing a * simulated reset via debugfs, so get an RPM reference. */ - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_runtime_pm_get(>->i915->runtime_pm); - engine_mask &= INTEL_INFO(i915)->engine_mask; + engine_mask &= INTEL_INFO(gt->i915)->engine_mask; if (flags & I915_ERROR_CAPTURE) { - i915_capture_error_state(i915, engine_mask, msg); - clear_error_registers(i915, engine_mask); + i915_capture_error_state(gt->i915, engine_mask, msg); + intel_gt_clear_error_registers(gt, engine_mask); } /* * Try engine reset when available. We fall back to full reset if * single reset fails. */ - if (intel_has_reset_engine(i915) && !__i915_wedged(error)) { - for_each_engine_masked(engine, i915, engine_mask, tmp) { + if (intel_has_reset_engine(gt->i915) && !intel_gt_is_wedged(gt)) { + for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); if (test_and_set_bit(I915_RESET_ENGINE + engine->id, - &error->flags)) + >->reset.flags)) continue; - if (i915_reset_engine(engine, msg) == 0) + if (intel_engine_reset(engine, msg) == 0) engine_mask &= ~engine->mask; - clear_bit(I915_RESET_ENGINE + engine->id, - &error->flags); - wake_up_bit(&error->flags, - I915_RESET_ENGINE + engine->id); + clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, + >->reset.flags); } } @@ -1346,9 +1178,9 @@ void i915_handle_error(struct drm_i915_private *i915, goto out; /* Full reset needs the mutex, stop any other user trying to do so. */ - if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) { - wait_event(error->reset_queue, - !test_bit(I915_RESET_BACKOFF, &error->flags)); + if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) { + wait_event(gt->reset.queue, + !test_bit(I915_RESET_BACKOFF, >->reset.flags)); goto out; /* piggy-back on the other reset */ } @@ -1356,113 +1188,119 @@ void i915_handle_error(struct drm_i915_private *i915, synchronize_rcu_expedited(); /* Prevent any other reset-engine attempt. */ - for_each_engine(engine, i915, tmp) { + for_each_engine(engine, gt->i915, tmp) { while (test_and_set_bit(I915_RESET_ENGINE + engine->id, - &error->flags)) - wait_on_bit(&error->flags, + >->reset.flags)) + wait_on_bit(>->reset.flags, I915_RESET_ENGINE + engine->id, TASK_UNINTERRUPTIBLE); } - i915_reset_device(i915, engine_mask, msg); - - for_each_engine(engine, i915, tmp) { - clear_bit(I915_RESET_ENGINE + engine->id, - &error->flags); - } + intel_gt_reset_global(gt, engine_mask, msg); - clear_bit(I915_RESET_BACKOFF, &error->flags); - wake_up_all(&error->reset_queue); + for_each_engine(engine, gt->i915, tmp) + clear_bit_unlock(I915_RESET_ENGINE + engine->id, + >->reset.flags); + clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags); + smp_mb__after_atomic(); + wake_up_all(>->reset.queue); out: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_runtime_pm_put(>->i915->runtime_pm, wakeref); } -int i915_reset_trylock(struct drm_i915_private *i915) +int intel_gt_reset_trylock(struct intel_gt *gt) { - struct i915_gpu_error *error = &i915->gpu_error; int srcu; - might_lock(&error->reset_backoff_srcu); + might_lock(>->reset.backoff_srcu); might_sleep(); rcu_read_lock(); - while (test_bit(I915_RESET_BACKOFF, &error->flags)) { + while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) { rcu_read_unlock(); - if (wait_event_interruptible(error->reset_queue, + if (wait_event_interruptible(gt->reset.queue, !test_bit(I915_RESET_BACKOFF, - &error->flags))) + >->reset.flags))) return -EINTR; rcu_read_lock(); } - srcu = srcu_read_lock(&error->reset_backoff_srcu); + srcu = srcu_read_lock(>->reset.backoff_srcu); rcu_read_unlock(); return srcu; } -void i915_reset_unlock(struct drm_i915_private *i915, int tag) -__releases(&i915->gpu_error.reset_backoff_srcu) +void intel_gt_reset_unlock(struct intel_gt *gt, int tag) +__releases(>->reset.backoff_srcu) { - struct i915_gpu_error *error = &i915->gpu_error; - - srcu_read_unlock(&error->reset_backoff_srcu, tag); + srcu_read_unlock(>->reset.backoff_srcu, tag); } -int i915_terminally_wedged(struct drm_i915_private *i915) +int intel_gt_terminally_wedged(struct intel_gt *gt) { - struct i915_gpu_error *error = &i915->gpu_error; - might_sleep(); - if (!__i915_wedged(error)) + if (!intel_gt_is_wedged(gt)) return 0; /* Reset still in progress? Maybe we will recover? */ - if (!test_bit(I915_RESET_BACKOFF, &error->flags)) + if (!test_bit(I915_RESET_BACKOFF, >->reset.flags)) return -EIO; /* XXX intel_reset_finish() still takes struct_mutex!!! */ - if (mutex_is_locked(&i915->drm.struct_mutex)) + if (mutex_is_locked(>->i915->drm.struct_mutex)) return -EAGAIN; - if (wait_event_interruptible(error->reset_queue, + if (wait_event_interruptible(gt->reset.queue, !test_bit(I915_RESET_BACKOFF, - &error->flags))) + >->reset.flags))) return -EINTR; - return __i915_wedged(error) ? -EIO : 0; + return intel_gt_is_wedged(gt) ? -EIO : 0; +} + +void intel_gt_init_reset(struct intel_gt *gt) +{ + init_waitqueue_head(>->reset.queue); + mutex_init(>->reset.mutex); + init_srcu_struct(>->reset.backoff_srcu); +} + +void intel_gt_fini_reset(struct intel_gt *gt) +{ + cleanup_srcu_struct(>->reset.backoff_srcu); } -static void i915_wedge_me(struct work_struct *work) +static void intel_wedge_me(struct work_struct *work) { - struct i915_wedge_me *w = container_of(work, typeof(*w), work.work); + struct intel_wedge_me *w = container_of(work, typeof(*w), work.work); - dev_err(w->i915->drm.dev, + dev_err(w->gt->i915->drm.dev, "%s timed out, cancelling all in-flight rendering.\n", w->name); - i915_gem_set_wedged(w->i915); + intel_gt_set_wedged(w->gt); } -void __i915_init_wedge(struct i915_wedge_me *w, - struct drm_i915_private *i915, - long timeout, - const char *name) +void __intel_init_wedge(struct intel_wedge_me *w, + struct intel_gt *gt, + long timeout, + const char *name) { - w->i915 = i915; + w->gt = gt; w->name = name; - INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me); + INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me); schedule_delayed_work(&w->work, timeout); } -void __i915_fini_wedge(struct i915_wedge_me *w) +void __intel_fini_wedge(struct intel_wedge_me *w) { cancel_delayed_work_sync(&w->work); destroy_delayed_work_on_stack(&w->work); - w->i915 = NULL; + w->gt = NULL; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h index 580ebdb59eca..37a987b17108 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.h +++ b/drivers/gpu/drm/i915/gt/intel_reset.h @@ -11,58 +11,67 @@ #include <linux/types.h> #include <linux/srcu.h> -#include "gt/intel_engine_types.h" +#include "intel_engine_types.h" +#include "intel_reset_types.h" struct drm_i915_private; struct i915_request; struct intel_engine_cs; +struct intel_gt; struct intel_guc; +void intel_gt_init_reset(struct intel_gt *gt); +void intel_gt_fini_reset(struct intel_gt *gt); + __printf(4, 5) -void i915_handle_error(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask, - unsigned long flags, - const char *fmt, ...); +void intel_gt_handle_error(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned long flags, + const char *fmt, ...); #define I915_ERROR_CAPTURE BIT(0) -void i915_check_and_clear_faults(struct drm_i915_private *i915); - -void i915_reset(struct drm_i915_private *i915, - intel_engine_mask_t stalled_mask, - const char *reason); -int i915_reset_engine(struct intel_engine_cs *engine, - const char *reason); - -void i915_reset_request(struct i915_request *rq, bool guilty); +void intel_gt_reset(struct intel_gt *gt, + intel_engine_mask_t stalled_mask, + const char *reason); +int intel_engine_reset(struct intel_engine_cs *engine, + const char *reason); -int __must_check i915_reset_trylock(struct drm_i915_private *i915); -void i915_reset_unlock(struct drm_i915_private *i915, int tag); +void __i915_request_reset(struct i915_request *rq, bool guilty); -int i915_terminally_wedged(struct drm_i915_private *i915); +int __must_check intel_gt_reset_trylock(struct intel_gt *gt); +void intel_gt_reset_unlock(struct intel_gt *gt, int tag); -bool intel_has_gpu_reset(struct drm_i915_private *i915); -bool intel_has_reset_engine(struct drm_i915_private *i915); +void intel_gt_set_wedged(struct intel_gt *gt); +bool intel_gt_unset_wedged(struct intel_gt *gt); +int intel_gt_terminally_wedged(struct intel_gt *gt); -int intel_gpu_reset(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask); +int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask); -int intel_reset_guc(struct drm_i915_private *i915); +int intel_reset_guc(struct intel_gt *gt); -struct i915_wedge_me { +struct intel_wedge_me { struct delayed_work work; - struct drm_i915_private *i915; + struct intel_gt *gt; const char *name; }; -void __i915_init_wedge(struct i915_wedge_me *w, - struct drm_i915_private *i915, - long timeout, - const char *name); -void __i915_fini_wedge(struct i915_wedge_me *w); +void __intel_init_wedge(struct intel_wedge_me *w, + struct intel_gt *gt, + long timeout, + const char *name); +void __intel_fini_wedge(struct intel_wedge_me *w); -#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ - for (__i915_init_wedge((W), (DEV), (TIMEOUT), __func__); \ - (W)->i915; \ - __i915_fini_wedge((W))) +#define intel_wedge_on_timeout(W, GT, TIMEOUT) \ + for (__intel_init_wedge((W), (GT), (TIMEOUT), __func__); \ + (W)->gt; \ + __intel_fini_wedge((W))) + +static inline bool __intel_reset_failed(const struct intel_reset *reset) +{ + return unlikely(test_bit(I915_WEDGED, &reset->flags)); +} + +bool intel_has_gpu_reset(struct drm_i915_private *i915); +bool intel_has_reset_engine(struct drm_i915_private *i915); #endif /* I915_RESET_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h new file mode 100644 index 000000000000..31968356e0c0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_RESET_TYPES_H_ +#define __INTEL_RESET_TYPES_H_ + +#include <linux/mutex.h> +#include <linux/wait.h> +#include <linux/srcu.h> + +struct intel_reset { + /** + * flags: Control various stages of the GPU reset + * + * #I915_RESET_BACKOFF - When we start a global reset, we need to + * serialise with any other users attempting to do the same, and + * any global resources that may be clobber by the reset (such as + * FENCE registers). + * + * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to + * acquire the struct_mutex to reset an engine, we need an explicit + * flag to prevent two concurrent reset attempts in the same engine. + * As the number of engines continues to grow, allocate the flags from + * the most significant bits. + * + * #I915_WEDGED - If reset fails and we can no longer use the GPU, + * we set the #I915_WEDGED bit. Prior to command submission, e.g. + * i915_request_alloc(), this bit is checked and the sequence + * aborted (with -EIO reported to userspace) if set. + */ + unsigned long flags; +#define I915_RESET_BACKOFF 0 +#define I915_RESET_MODESET 1 +#define I915_RESET_ENGINE 2 +#define I915_WEDGED (BITS_PER_LONG - 1) + + struct mutex mutex; /* serialises wedging/unwedging */ + + /** + * Waitqueue to signal when the reset has completed. Used by clients + * that wait for dev_priv->mm.wedged to settle. + */ + wait_queue_head_t queue; + + struct srcu_struct backoff_srcu; +}; + +#endif /* _INTEL_RESET_TYPES_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index c6023bc9452d..8d24a49e5139 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -34,9 +34,9 @@ #include "gem/i915_gem_context.h" #include "i915_drv.h" -#include "i915_gem_render_state.h" #include "i915_trace.h" #include "intel_context.h" +#include "intel_gt.h" #include "intel_reset.h" #include "intel_workarounds.h" @@ -75,7 +75,8 @@ gen2_render_ring_flush(struct i915_request *rq, u32 mode) *cs++ = cmd; while (num_store_dw--) { *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; - *cs++ = i915_scratch_offset(rq->i915); + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT); *cs++ = 0; } *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; @@ -148,7 +149,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) */ if (mode & EMIT_INVALIDATE) { *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; - *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT) | + PIPE_CONTROL_GLOBAL_GTT; *cs++ = 0; *cs++ = 0; @@ -156,7 +159,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) *cs++ = MI_FLUSH; *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; - *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT) | + PIPE_CONTROL_GLOBAL_GTT; *cs++ = 0; *cs++ = 0; } @@ -208,7 +213,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) static int gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) { - u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; + u32 scratch_addr = + intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); u32 *cs; cs = intel_ring_begin(rq, 6); @@ -241,7 +248,9 @@ gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) static int gen6_render_ring_flush(struct i915_request *rq, u32 mode) { - u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; + u32 scratch_addr = + intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); u32 *cs, flags = 0; int ret; @@ -299,7 +308,9 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) *cs++ = GFX_OP_PIPE_CONTROL(4); *cs++ = PIPE_CONTROL_QW_WRITE; - *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT) | + PIPE_CONTROL_GLOBAL_GTT; *cs++ = 0; /* Finally we can flush and with it emit the breadcrumb */ @@ -342,7 +353,9 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq) static int gen7_render_ring_flush(struct i915_request *rq, u32 mode) { - u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; + u32 scratch_addr = + intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); u32 *cs, flags = 0; /* @@ -725,7 +738,45 @@ out: static void reset_prepare(struct intel_engine_cs *engine) { - intel_engine_stop_cs(engine); + struct intel_uncore *uncore = engine->uncore; + const u32 base = engine->mmio_base; + + /* + * We stop engines, otherwise we might get failed reset and a + * dead gpu (on elk). Also as modern gpu as kbl can suffer + * from system hang if batchbuffer is progressing when + * the reset is issued, regardless of READY_TO_RESET ack. + * Thus assume it is best to stop engines on all gens + * where we have a gpu reset. + * + * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) + * + * WaMediaResetMainRingCleanup:ctg,elk (presumably) + * + * FIXME: Wa for more modern gens needs to be validated + */ + GEM_TRACE("%s\n", engine->name); + + if (intel_engine_stop_cs(engine)) + GEM_TRACE("%s: timed out on STOP_RING\n", engine->name); + + intel_uncore_write_fw(uncore, + RING_HEAD(base), + intel_uncore_read_fw(uncore, RING_TAIL(base))); + intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */ + + intel_uncore_write_fw(uncore, RING_HEAD(base), 0); + intel_uncore_write_fw(uncore, RING_TAIL(base), 0); + intel_uncore_posting_read_fw(uncore, RING_TAIL(base)); + + /* The ring must be empty before it is disabled */ + intel_uncore_write_fw(uncore, RING_CTL(base), 0); + + /* Check acts as a post */ + if (intel_uncore_read_fw(uncore, RING_HEAD(base))) + GEM_TRACE("%s: ring head [%x] not parked\n", + engine->name, + intel_uncore_read_fw(uncore, RING_HEAD(base))); } static void reset_ring(struct intel_engine_cs *engine, bool stalled) @@ -781,7 +832,7 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled) * If the request was innocent, we try to replay the request * with the restored context. */ - i915_reset_request(rq, stalled); + __i915_request_reset(rq, stalled); GEM_BUG_ON(rq->ring != engine->buffer); head = rq->head; @@ -797,21 +848,6 @@ static void reset_finish(struct intel_engine_cs *engine) { } -static int intel_rcs_ctx_init(struct i915_request *rq) -{ - int ret; - - ret = intel_engine_emit_ctx_wa(rq); - if (ret != 0) - return ret; - - ret = i915_gem_render_state_emit(rq); - if (ret) - return ret; - - return 0; -} - static int rcs_resume(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; @@ -1033,14 +1069,14 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine) /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ ENGINE_POSTING_READ(engine, RING_IMR); - gen6_unmask_pm_irq(engine->i915, engine->irq_enable_mask); + gen6_unmask_pm_irq(engine->gt, engine->irq_enable_mask); } static void hsw_vebox_irq_disable(struct intel_engine_cs *engine) { ENGINE_WRITE(engine, RING_IMR, ~0); - gen6_mask_pm_irq(engine->i915, engine->irq_enable_mask); + gen6_mask_pm_irq(engine->gt, engine->irq_enable_mask); } static int @@ -1071,9 +1107,11 @@ i830_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) { - u32 *cs, cs_offset = i915_scratch_offset(rq->i915); + u32 *cs, cs_offset = + intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT); - GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE); + GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE); cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) @@ -1149,16 +1187,16 @@ i915_emit_bb_start(struct i915_request *rq, int intel_ring_pin(struct intel_ring *ring) { struct i915_vma *vma = ring->vma; - enum i915_map_type map = i915_coherent_map_type(vma->vm->i915); unsigned int flags; void *addr; int ret; - GEM_BUG_ON(ring->vaddr); + if (atomic_fetch_inc(&ring->pin_count)) + return 0; - ret = i915_timeline_pin(ring->timeline); + ret = intel_timeline_pin(ring->timeline); if (ret) - return ret; + goto err_unpin; flags = PIN_GLOBAL; @@ -1172,26 +1210,32 @@ int intel_ring_pin(struct intel_ring *ring) ret = i915_vma_pin(vma, 0, 0, flags); if (unlikely(ret)) - goto unpin_timeline; + goto err_timeline; if (i915_vma_is_map_and_fenceable(vma)) addr = (void __force *)i915_vma_pin_iomap(vma); else - addr = i915_gem_object_pin_map(vma->obj, map); + addr = i915_gem_object_pin_map(vma->obj, + i915_coherent_map_type(vma->vm->i915)); if (IS_ERR(addr)) { ret = PTR_ERR(addr); - goto unpin_ring; + goto err_ring; } vma->obj->pin_global++; + GEM_BUG_ON(ring->vaddr); ring->vaddr = addr; + + GEM_TRACE("ring:%llx pin\n", ring->timeline->fence_context); return 0; -unpin_ring: +err_ring: i915_vma_unpin(vma); -unpin_timeline: - i915_timeline_unpin(ring->timeline); +err_timeline: + intel_timeline_unpin(ring->timeline); +err_unpin: + atomic_dec(&ring->pin_count); return ret; } @@ -1207,34 +1251,40 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail) void intel_ring_unpin(struct intel_ring *ring) { - GEM_BUG_ON(!ring->vma); - GEM_BUG_ON(!ring->vaddr); + if (!atomic_dec_and_test(&ring->pin_count)) + return; + + GEM_TRACE("ring:%llx unpin\n", ring->timeline->fence_context); /* Discard any unused bytes beyond that submitted to hw. */ intel_ring_reset(ring, ring->tail); + GEM_BUG_ON(!ring->vma); + i915_vma_unset_ggtt_write(ring->vma); if (i915_vma_is_map_and_fenceable(ring->vma)) i915_vma_unpin_iomap(ring->vma); else i915_gem_object_unpin_map(ring->vma->obj); + + GEM_BUG_ON(!ring->vaddr); ring->vaddr = NULL; ring->vma->obj->pin_global--; i915_vma_unpin(ring->vma); - i915_timeline_unpin(ring->timeline); + intel_timeline_unpin(ring->timeline); } -static struct i915_vma * -intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) +static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) { - struct i915_address_space *vm = &dev_priv->ggtt.vm; + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *i915 = vm->i915; struct drm_i915_gem_object *obj; struct i915_vma *vma; - obj = i915_gem_object_create_stolen(dev_priv, size); + obj = i915_gem_object_create_stolen(i915, size); if (!obj) - obj = i915_gem_object_create_internal(dev_priv, size); + obj = i915_gem_object_create_internal(i915, size); if (IS_ERR(obj)) return ERR_CAST(obj); @@ -1258,9 +1308,10 @@ err: struct intel_ring * intel_engine_create_ring(struct intel_engine_cs *engine, - struct i915_timeline *timeline, + struct intel_timeline *timeline, int size) { + struct drm_i915_private *i915 = engine->i915; struct intel_ring *ring; struct i915_vma *vma; @@ -1273,7 +1324,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, kref_init(&ring->ref); INIT_LIST_HEAD(&ring->request_list); - ring->timeline = i915_timeline_get(timeline); + ring->timeline = intel_timeline_get(timeline); ring->size = size; /* Workaround an erratum on the i830 which causes a hang if @@ -1281,12 +1332,12 @@ intel_engine_create_ring(struct intel_engine_cs *engine, * of the buffer. */ ring->effective_size = size; - if (IS_I830(engine->i915) || IS_I845G(engine->i915)) + if (IS_I830(i915) || IS_I845G(i915)) ring->effective_size -= 2 * CACHELINE_BYTES; intel_ring_update_space(ring); - vma = intel_ring_create_vma(engine->i915, size); + vma = create_ring_vma(engine->gt->ggtt, size); if (IS_ERR(vma)) { kfree(ring); return ERR_CAST(vma); @@ -1303,13 +1354,12 @@ void intel_ring_free(struct kref *ref) i915_vma_close(ring->vma); i915_vma_put(ring->vma); - i915_timeline_put(ring->timeline); + intel_timeline_put(ring->timeline); kfree(ring); } static void __ring_context_fini(struct intel_context *ce) { - GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); i915_gem_object_put(ce->state->obj); } @@ -1322,33 +1372,45 @@ static void ring_context_destroy(struct kref *ref) if (ce->state) __ring_context_fini(ce); + intel_context_fini(ce); intel_context_free(ce); } -static int __context_pin_ppgtt(struct i915_gem_context *ctx) +static struct i915_address_space *vm_alias(struct intel_context *ce) +{ + struct i915_address_space *vm; + + vm = ce->vm; + if (i915_is_ggtt(vm)) + vm = &i915_vm_to_ggtt(vm)->alias->vm; + + return vm; +} + +static int __context_pin_ppgtt(struct intel_context *ce) { struct i915_address_space *vm; int err = 0; - vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm; + vm = vm_alias(ce); if (vm) err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm))); return err; } -static void __context_unpin_ppgtt(struct i915_gem_context *ctx) +static void __context_unpin_ppgtt(struct intel_context *ce) { struct i915_address_space *vm; - vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm; + vm = vm_alias(ce); if (vm) gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); } static void ring_context_unpin(struct intel_context *ce) { - __context_unpin_ppgtt(ce->gem_context); + __context_unpin_ppgtt(ce); } static struct i915_vma * @@ -1404,7 +1466,7 @@ alloc_context_vma(struct intel_engine_cs *engine) i915_gem_object_unpin_map(obj); } - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_obj; @@ -1438,11 +1500,11 @@ static int ring_context_pin(struct intel_context *ce) ce->state = vma; } - err = intel_context_active_acquire(ce, PIN_HIGH); + err = intel_context_active_acquire(ce); if (err) return err; - err = __context_pin_ppgtt(ce->gem_context); + err = __context_pin_ppgtt(ce); if (err) goto err_active; @@ -1484,7 +1546,7 @@ static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt) *cs++ = MI_LOAD_REGISTER_IMM(1); *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); - *cs++ = ppgtt->pd->base.ggtt_offset << 10; + *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10; intel_ring_advance(rq, cs); @@ -1503,7 +1565,8 @@ static int flush_pd_dir(struct i915_request *rq) /* Stall until the page table load is complete */ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); - *cs++ = i915_scratch_offset(rq->i915); + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT); *cs++ = MI_NOOP; intel_ring_advance(rq, cs); @@ -1619,7 +1682,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) /* Insert a delay before the next switch! */ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; *cs++ = i915_mmio_reg_offset(last_reg); - *cs++ = i915_scratch_offset(rq->i915); + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT); *cs++ = MI_NOOP; } *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; @@ -1632,7 +1696,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) return 0; } -static int remap_l3(struct i915_request *rq, int slice) +static int remap_l3_slice(struct i915_request *rq, int slice) { u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; int i; @@ -1660,15 +1724,34 @@ static int remap_l3(struct i915_request *rq, int slice) return 0; } +static int remap_l3(struct i915_request *rq) +{ + struct i915_gem_context *ctx = rq->gem_context; + int i, err; + + if (!ctx->remap_slice) + return 0; + + for (i = 0; i < MAX_L3_SLICES; i++) { + if (!(ctx->remap_slice & BIT(i))) + continue; + + err = remap_l3_slice(rq, i); + if (err) + return err; + } + + ctx->remap_slice = 0; + return 0; +} + static int switch_context(struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; - struct i915_gem_context *ctx = rq->gem_context; - struct i915_address_space *vm = - ctx->vm ?: &rq->i915->mm.aliasing_ppgtt->vm; + struct i915_address_space *vm = vm_alias(rq->hw_context); unsigned int unwind_mm = 0; u32 hw_flags = 0; - int ret, i; + int ret; GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); @@ -1712,7 +1795,7 @@ static int switch_context(struct i915_request *rq) * as nothing actually executes using the kernel context; it * is purely used for flushing user contexts. */ - if (i915_gem_context_is_kernel(ctx)) + if (i915_gem_context_is_kernel(rq->gem_context)) hw_flags = MI_RESTORE_INHIBIT; ret = mi_set_context(rq, hw_flags); @@ -1746,18 +1829,9 @@ static int switch_context(struct i915_request *rq) goto err_mm; } - if (ctx->remap_slice) { - for (i = 0; i < MAX_L3_SLICES; i++) { - if (!(ctx->remap_slice & BIT(i))) - continue; - - ret = remap_l3(rq, i); - if (ret) - goto err_mm; - } - - ctx->remap_slice = 0; - } + ret = remap_l3(rq); + if (ret) + goto err_mm; return 0; @@ -2081,10 +2155,11 @@ static void ring_destroy(struct intel_engine_cs *engine) WARN_ON(INTEL_GEN(dev_priv) > 2 && (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); + intel_engine_cleanup_common(engine); + intel_ring_unpin(engine->buffer); intel_ring_put(engine->buffer); - intel_engine_cleanup_common(engine); kfree(engine); } @@ -2157,11 +2232,9 @@ static void setup_rcs(struct intel_engine_cs *engine) engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; if (INTEL_GEN(i915) >= 7) { - engine->init_context = intel_rcs_ctx_init; engine->emit_flush = gen7_render_ring_flush; engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb; } else if (IS_GEN(i915, 6)) { - engine->init_context = intel_rcs_ctx_init; engine->emit_flush = gen6_render_ring_flush; engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb; } else if (IS_GEN(i915, 5)) { @@ -2258,11 +2331,11 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine) int intel_ring_submission_init(struct intel_engine_cs *engine) { - struct i915_timeline *timeline; + struct intel_timeline *timeline; struct intel_ring *ring; int err; - timeline = i915_timeline_create(engine->i915, engine->status_page.vma); + timeline = intel_timeline_create(engine->gt, engine->status_page.vma); if (IS_ERR(timeline)) { err = PTR_ERR(timeline); goto err; @@ -2270,7 +2343,7 @@ int intel_ring_submission_init(struct intel_engine_cs *engine) GEM_BUG_ON(timeline->has_initial_breadcrumb); ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE); - i915_timeline_put(timeline); + intel_timeline_put(timeline); if (IS_ERR(ring)) { err = PTR_ERR(ring); goto err; diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index c311ce9c6f9d..6daa9eb59e19 100644 --- a/drivers/gpu/drm/i915/i915_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -4,38 +4,36 @@ * Copyright © 2016-2018 Intel Corporation */ +#include "gt/intel_gt_types.h" + #include "i915_drv.h" #include "i915_active.h" #include "i915_syncmap.h" -#include "i915_timeline.h" +#include "gt/intel_timeline.h" #define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit))) #define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit)) -struct i915_timeline_hwsp { - struct i915_gt_timelines *gt; +struct intel_timeline_hwsp { + struct intel_gt *gt; + struct intel_gt_timelines *gt_timelines; struct list_head free_link; struct i915_vma *vma; u64 free_bitmap; }; -struct i915_timeline_cacheline { +struct intel_timeline_cacheline { struct i915_active active; - struct i915_timeline_hwsp *hwsp; + struct intel_timeline_hwsp *hwsp; void *vaddr; #define CACHELINE_BITS 6 #define CACHELINE_FREE CACHELINE_BITS }; -static inline struct drm_i915_private * -hwsp_to_i915(struct i915_timeline_hwsp *hwsp) -{ - return container_of(hwsp->gt, struct drm_i915_private, gt.timelines); -} - -static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915) +static struct i915_vma *__hwsp_alloc(struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; struct drm_i915_gem_object *obj; struct i915_vma *vma; @@ -45,7 +43,7 @@ static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915) i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); if (IS_ERR(vma)) i915_gem_object_put(obj); @@ -53,11 +51,10 @@ static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915) } static struct i915_vma * -hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) +hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline) { - struct drm_i915_private *i915 = timeline->i915; - struct i915_gt_timelines *gt = &i915->gt.timelines; - struct i915_timeline_hwsp *hwsp; + struct intel_gt_timelines *gt = &timeline->gt->timelines; + struct intel_timeline_hwsp *hwsp; BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE); @@ -75,16 +72,17 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) if (!hwsp) return ERR_PTR(-ENOMEM); - vma = __hwsp_alloc(i915); + vma = __hwsp_alloc(timeline->gt); if (IS_ERR(vma)) { kfree(hwsp); return vma; } vma->private = hwsp; + hwsp->gt = timeline->gt; hwsp->vma = vma; hwsp->free_bitmap = ~0ull; - hwsp->gt = gt; + hwsp->gt_timelines = gt; spin_lock_irq(>->hwsp_lock); list_add(&hwsp->free_link, >->hwsp_free_list); @@ -102,9 +100,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) return hwsp->vma; } -static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline) +static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline) { - struct i915_gt_timelines *gt = hwsp->gt; + struct intel_gt_timelines *gt = hwsp->gt_timelines; unsigned long flags; spin_lock_irqsave(>->hwsp_lock, flags); @@ -126,7 +124,7 @@ static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline) spin_unlock_irqrestore(>->hwsp_lock, flags); } -static void __idle_cacheline_free(struct i915_timeline_cacheline *cl) +static void __idle_cacheline_free(struct intel_timeline_cacheline *cl) { GEM_BUG_ON(!i915_active_is_idle(&cl->active)); @@ -140,7 +138,7 @@ static void __idle_cacheline_free(struct i915_timeline_cacheline *cl) static void __cacheline_retire(struct i915_active *active) { - struct i915_timeline_cacheline *cl = + struct intel_timeline_cacheline *cl = container_of(active, typeof(*cl), active); i915_vma_unpin(cl->hwsp->vma); @@ -148,10 +146,19 @@ static void __cacheline_retire(struct i915_active *active) __idle_cacheline_free(cl); } -static struct i915_timeline_cacheline * -cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline) +static int __cacheline_active(struct i915_active *active) +{ + struct intel_timeline_cacheline *cl = + container_of(active, typeof(*cl), active); + + __i915_vma_pin(cl->hwsp->vma); + return 0; +} + +static struct intel_timeline_cacheline * +cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) { - struct i915_timeline_cacheline *cl; + struct intel_timeline_cacheline *cl; void *vaddr; GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS)); @@ -170,24 +177,25 @@ cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline) cl->hwsp = hwsp; cl->vaddr = page_pack_bits(vaddr, cacheline); - i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire); + i915_active_init(hwsp->gt->i915, &cl->active, + __cacheline_active, __cacheline_retire); return cl; } -static void cacheline_acquire(struct i915_timeline_cacheline *cl) +static void cacheline_acquire(struct intel_timeline_cacheline *cl) { - if (cl && i915_active_acquire(&cl->active)) - __i915_vma_pin(cl->hwsp->vma); + if (cl) + i915_active_acquire(&cl->active); } -static void cacheline_release(struct i915_timeline_cacheline *cl) +static void cacheline_release(struct intel_timeline_cacheline *cl) { if (cl) i915_active_release(&cl->active); } -static void cacheline_free(struct i915_timeline_cacheline *cl) +static void cacheline_free(struct intel_timeline_cacheline *cl) { GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE)); cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE); @@ -196,29 +204,22 @@ static void cacheline_free(struct i915_timeline_cacheline *cl) __idle_cacheline_free(cl); } -int i915_timeline_init(struct drm_i915_private *i915, - struct i915_timeline *timeline, - struct i915_vma *hwsp) +int intel_timeline_init(struct intel_timeline *timeline, + struct intel_gt *gt, + struct i915_vma *hwsp) { void *vaddr; - /* - * Ideally we want a set of engines on a single leaf as we expect - * to mostly be tracking synchronisation between engines. It is not - * a huge issue if this is not the case, but we may want to mitigate - * any page crossing penalties if they become an issue. - * - * Called during early_init before we know how many engines there are. - */ - BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES); + kref_init(&timeline->kref); - timeline->i915 = i915; + timeline->gt = gt; timeline->pin_count = 0; + timeline->has_initial_breadcrumb = !hwsp; timeline->hwsp_cacheline = NULL; if (!hwsp) { - struct i915_timeline_cacheline *cl; + struct intel_timeline_cacheline *cl; unsigned int cacheline; hwsp = hwsp_alloc(timeline, &cacheline); @@ -261,55 +262,47 @@ int i915_timeline_init(struct drm_i915_private *i915, return 0; } -void i915_timelines_init(struct drm_i915_private *i915) +static void timelines_init(struct intel_gt *gt) { - struct i915_gt_timelines *gt = &i915->gt.timelines; + struct intel_gt_timelines *timelines = >->timelines; - mutex_init(>->mutex); - INIT_LIST_HEAD(>->active_list); + mutex_init(&timelines->mutex); + INIT_LIST_HEAD(&timelines->active_list); - spin_lock_init(>->hwsp_lock); - INIT_LIST_HEAD(>->hwsp_free_list); + spin_lock_init(&timelines->hwsp_lock); + INIT_LIST_HEAD(&timelines->hwsp_free_list); +} - /* via i915_gem_wait_for_idle() */ - i915_gem_shrinker_taints_mutex(i915, >->mutex); +void intel_timelines_init(struct drm_i915_private *i915) +{ + timelines_init(&i915->gt); } -static void timeline_add_to_active(struct i915_timeline *tl) +static void timeline_add_to_active(struct intel_timeline *tl) { - struct i915_gt_timelines *gt = &tl->i915->gt.timelines; + struct intel_gt_timelines *gt = &tl->gt->timelines; mutex_lock(>->mutex); list_add(&tl->link, >->active_list); mutex_unlock(>->mutex); } -static void timeline_remove_from_active(struct i915_timeline *tl) +static void timeline_remove_from_active(struct intel_timeline *tl) { - struct i915_gt_timelines *gt = &tl->i915->gt.timelines; + struct intel_gt_timelines *gt = &tl->gt->timelines; mutex_lock(>->mutex); list_del(&tl->link); mutex_unlock(>->mutex); } -/** - * i915_timelines_park - called when the driver idles - * @i915: the drm_i915_private device - * - * When the driver is completely idle, we know that all of our sync points - * have been signaled and our tracking is then entirely redundant. Any request - * to wait upon an older sync point will be completed instantly as we know - * the fence is signaled and therefore we will not even look them up in the - * sync point map. - */ -void i915_timelines_park(struct drm_i915_private *i915) +static void timelines_park(struct intel_gt *gt) { - struct i915_gt_timelines *gt = &i915->gt.timelines; - struct i915_timeline *timeline; + struct intel_gt_timelines *timelines = >->timelines; + struct intel_timeline *timeline; - mutex_lock(>->mutex); - list_for_each_entry(timeline, >->active_list, link) { + mutex_lock(&timelines->mutex); + list_for_each_entry(timeline, &timelines->active_list, link) { /* * All known fences are completed so we can scrap * the current sync point tracking and start afresh, @@ -318,10 +311,25 @@ void i915_timelines_park(struct drm_i915_private *i915) */ i915_syncmap_free(&timeline->sync); } - mutex_unlock(>->mutex); + mutex_unlock(&timelines->mutex); } -void i915_timeline_fini(struct i915_timeline *timeline) +/** + * intel_timelines_park - called when the driver idles + * @i915: the drm_i915_private device + * + * When the driver is completely idle, we know that all of our sync points + * have been signaled and our tracking is then entirely redundant. Any request + * to wait upon an older sync point will be completed instantly as we know + * the fence is signaled and therefore we will not even look them up in the + * sync point map. + */ +void intel_timelines_park(struct drm_i915_private *i915) +{ + timelines_park(&i915->gt); +} + +void intel_timeline_fini(struct intel_timeline *timeline) { GEM_BUG_ON(timeline->pin_count); GEM_BUG_ON(!list_empty(&timeline->requests)); @@ -336,29 +344,26 @@ void i915_timeline_fini(struct i915_timeline *timeline) i915_vma_put(timeline->hwsp_ggtt); } -struct i915_timeline * -i915_timeline_create(struct drm_i915_private *i915, - struct i915_vma *global_hwsp) +struct intel_timeline * +intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp) { - struct i915_timeline *timeline; + struct intel_timeline *timeline; int err; timeline = kzalloc(sizeof(*timeline), GFP_KERNEL); if (!timeline) return ERR_PTR(-ENOMEM); - err = i915_timeline_init(i915, timeline, global_hwsp); + err = intel_timeline_init(timeline, gt, global_hwsp); if (err) { kfree(timeline); return ERR_PTR(err); } - kref_init(&timeline->kref); - return timeline; } -int i915_timeline_pin(struct i915_timeline *tl) +int intel_timeline_pin(struct intel_timeline *tl) { int err; @@ -384,7 +389,7 @@ unpin: return err; } -static u32 timeline_advance(struct i915_timeline *tl) +static u32 timeline_advance(struct intel_timeline *tl) { GEM_BUG_ON(!tl->pin_count); GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb); @@ -392,17 +397,17 @@ static u32 timeline_advance(struct i915_timeline *tl) return tl->seqno += 1 + tl->has_initial_breadcrumb; } -static void timeline_rollback(struct i915_timeline *tl) +static void timeline_rollback(struct intel_timeline *tl) { tl->seqno -= 1 + tl->has_initial_breadcrumb; } static noinline int -__i915_timeline_get_seqno(struct i915_timeline *tl, - struct i915_request *rq, - u32 *seqno) +__intel_timeline_get_seqno(struct intel_timeline *tl, + struct i915_request *rq, + u32 *seqno) { - struct i915_timeline_cacheline *cl; + struct intel_timeline_cacheline *cl; unsigned int cacheline; struct i915_vma *vma; void *vaddr; @@ -488,31 +493,31 @@ err_rollback: return err; } -int i915_timeline_get_seqno(struct i915_timeline *tl, - struct i915_request *rq, - u32 *seqno) +int intel_timeline_get_seqno(struct intel_timeline *tl, + struct i915_request *rq, + u32 *seqno) { *seqno = timeline_advance(tl); /* Replace the HWSP on wraparound for HW semaphores */ if (unlikely(!*seqno && tl->hwsp_cacheline)) - return __i915_timeline_get_seqno(tl, rq, seqno); + return __intel_timeline_get_seqno(tl, rq, seqno); return 0; } -static int cacheline_ref(struct i915_timeline_cacheline *cl, +static int cacheline_ref(struct intel_timeline_cacheline *cl, struct i915_request *rq) { return i915_active_ref(&cl->active, rq->fence.context, rq); } -int i915_timeline_read_hwsp(struct i915_request *from, - struct i915_request *to, - u32 *hwsp) +int intel_timeline_read_hwsp(struct i915_request *from, + struct i915_request *to, + u32 *hwsp) { - struct i915_timeline_cacheline *cl = from->hwsp_cacheline; - struct i915_timeline *tl = from->timeline; + struct intel_timeline_cacheline *cl = from->hwsp_cacheline; + struct intel_timeline *tl = from->timeline; int err; GEM_BUG_ON(to->timeline == tl); @@ -535,7 +540,7 @@ int i915_timeline_read_hwsp(struct i915_request *from, return err; } -void i915_timeline_unpin(struct i915_timeline *tl) +void intel_timeline_unpin(struct intel_timeline *tl) { GEM_BUG_ON(!tl->pin_count); if (--tl->pin_count) @@ -554,26 +559,31 @@ void i915_timeline_unpin(struct i915_timeline *tl) __i915_vma_unpin(tl->hwsp_ggtt); } -void __i915_timeline_free(struct kref *kref) +void __intel_timeline_free(struct kref *kref) { - struct i915_timeline *timeline = + struct intel_timeline *timeline = container_of(kref, typeof(*timeline), kref); - i915_timeline_fini(timeline); + intel_timeline_fini(timeline); kfree(timeline); } -void i915_timelines_fini(struct drm_i915_private *i915) +static void timelines_fini(struct intel_gt *gt) { - struct i915_gt_timelines *gt = &i915->gt.timelines; + struct intel_gt_timelines *timelines = >->timelines; - GEM_BUG_ON(!list_empty(>->active_list)); - GEM_BUG_ON(!list_empty(>->hwsp_free_list)); + GEM_BUG_ON(!list_empty(&timelines->active_list)); + GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list)); - mutex_destroy(>->mutex); + mutex_destroy(&timelines->mutex); +} + +void intel_timelines_fini(struct drm_i915_private *i915) +{ + timelines_fini(&i915->gt); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftests/mock_timeline.c" -#include "selftests/i915_timeline.c" +#include "gt/selftests/mock_timeline.c" +#include "gt/selftest_timeline.c" #endif diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h new file mode 100644 index 000000000000..e08cebf64833 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_timeline.h @@ -0,0 +1,93 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef I915_TIMELINE_H +#define I915_TIMELINE_H + +#include <linux/lockdep.h> + +#include "i915_active.h" +#include "i915_syncmap.h" +#include "gt/intel_timeline_types.h" + +int intel_timeline_init(struct intel_timeline *tl, + struct intel_gt *gt, + struct i915_vma *hwsp); +void intel_timeline_fini(struct intel_timeline *tl); + +struct intel_timeline * +intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp); + +static inline struct intel_timeline * +intel_timeline_get(struct intel_timeline *timeline) +{ + kref_get(&timeline->kref); + return timeline; +} + +void __intel_timeline_free(struct kref *kref); +static inline void intel_timeline_put(struct intel_timeline *timeline) +{ + kref_put(&timeline->kref, __intel_timeline_free); +} + +static inline int __intel_timeline_sync_set(struct intel_timeline *tl, + u64 context, u32 seqno) +{ + return i915_syncmap_set(&tl->sync, context, seqno); +} + +static inline int intel_timeline_sync_set(struct intel_timeline *tl, + const struct dma_fence *fence) +{ + return __intel_timeline_sync_set(tl, fence->context, fence->seqno); +} + +static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl, + u64 context, u32 seqno) +{ + return i915_syncmap_is_later(&tl->sync, context, seqno); +} + +static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl, + const struct dma_fence *fence) +{ + return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno); +} + +int intel_timeline_pin(struct intel_timeline *tl); +int intel_timeline_get_seqno(struct intel_timeline *tl, + struct i915_request *rq, + u32 *seqno); +void intel_timeline_unpin(struct intel_timeline *tl); + +int intel_timeline_read_hwsp(struct i915_request *from, + struct i915_request *until, + u32 *hwsp_offset); + +void intel_timelines_init(struct drm_i915_private *i915); +void intel_timelines_park(struct drm_i915_private *i915); +void intel_timelines_fini(struct drm_i915_private *i915); + +#endif diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h index fce5cb4f1090..9a71aea7a338 100644 --- a/drivers/gpu/drm/i915/i915_timeline_types.h +++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h @@ -16,10 +16,10 @@ struct drm_i915_private; struct i915_vma; -struct i915_timeline_cacheline; +struct intel_timeline_cacheline; struct i915_syncmap; -struct i915_timeline { +struct intel_timeline { u64 fence_context; u32 seqno; @@ -30,7 +30,7 @@ struct i915_timeline { struct i915_vma *hwsp_ggtt; u32 hwsp_offset; - struct i915_timeline_cacheline *hwsp_cacheline; + struct intel_timeline_cacheline *hwsp_cacheline; bool has_initial_breadcrumb; @@ -59,7 +59,7 @@ struct i915_timeline { struct i915_syncmap *sync; struct list_head link; - struct drm_i915_private *i915; + struct intel_gt *gt; struct kref kref; }; diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 15e90fd2cfdc..704ace01e7f5 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "intel_context.h" +#include "intel_gt.h" #include "intel_workarounds.h" /** @@ -49,9 +50,10 @@ * - Public functions to init or apply the given workaround type. */ -static void wa_init_start(struct i915_wa_list *wal, const char *name) +static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name) { wal->name = name; + wal->engine_name = engine_name; } #define WA_LIST_CHUNK (1 << 4) @@ -73,8 +75,8 @@ static void wa_init_finish(struct i915_wa_list *wal) if (!wal->count) return; - DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n", - wal->wa_count, wal->name); + DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n", + wal->wa_count, wal->name, wal->engine_name); } static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) @@ -175,19 +177,6 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val) wa_write_masked_or(wal, reg, val, val); } -static void -ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val) -{ - struct i915_wa wa = { - .reg = reg, - .mask = mask, - .val = val, - /* Bonkers HW, skip verifying */ - }; - - _wa_add(wal, &wa); -} - #define WA_SET_BIT_MASKED(addr, mask) \ wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask)) @@ -536,12 +525,6 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) | GEN8_ERRDETBCTRL); - /* WaDisableBankHangMode:icl */ - wa_write(wal, - GEN8_L3CNTLREG, - intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) | - GEN8_ERRDETBCTRL); - /* Wa_1604370585:icl (pre-prod) * Formerly known as WaPushConstantDereferenceHoldDisable */ @@ -596,7 +579,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, if (engine->class != RENDER_CLASS) return; - wa_init_start(wal, name); + wa_init_start(wal, name, engine->name); if (IS_GEN(i915, 11)) icl_ctx_workarounds_init(engine, wal); @@ -766,7 +749,10 @@ static void wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) { const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; - u32 mcr_slice_subslice_mask; + unsigned int slice, subslice; + u32 l3_en, mcr, mcr_mask; + + GEM_BUG_ON(INTEL_GEN(i915) < 10); /* * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl @@ -774,42 +760,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) * the case, we might need to program MCR select to a valid L3Bank * by default, to make sure we correctly read certain registers * later on (in the range 0xB100 - 0xB3FF). - * This might be incompatible with - * WaProgramMgsrForCorrectSliceSpecificMmioReads. - * Fortunately, this should not happen in production hardware, so - * we only assert that this is the case (instead of implementing - * something more complex that requires checking the range of every - * MMIO read). - */ - if (INTEL_GEN(i915) >= 10 && - is_power_of_2(sseu->slice_mask)) { - /* - * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches - * enabled subslice, no need to redirect MCR packet - */ - u32 slice = fls(sseu->slice_mask); - u32 fuse3 = - intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3); - u8 ss_mask = sseu->subslice_mask[slice]; - - u8 enabled_mask = (ss_mask | ss_mask >> - GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK; - u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK; - - /* - * Production silicon should have matched L3Bank and - * subslice enabled - */ - WARN_ON((enabled_mask & disabled_mask) != enabled_mask); - } - - if (INTEL_GEN(i915) >= 11) - mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | - GEN11_MCR_SUBSLICE_MASK; - else - mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK | - GEN8_MCR_SUBSLICE_MASK; - /* + * * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl * Before any MMIO read into slice/subslice specific registers, MCR * packet control register needs to be programmed to point to any @@ -819,11 +770,51 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) * are consistent across s/ss in almost all cases. In the rare * occasions, such as INSTDONE, where this value is dependent * on s/ss combo, the read should be done with read_subslice_reg. + * + * Since GEN8_MCR_SELECTOR contains dual-purpose bits which select both + * to which subslice, or to which L3 bank, the respective mmio reads + * will go, we have to find a common index which works for both + * accesses. + * + * Case where we cannot find a common index fortunately should not + * happen in production hardware, so we only emit a warning instead of + * implementing something more complex that requires checking the range + * of every MMIO read. */ - wa_write_masked_or(wal, - GEN8_MCR_SELECTOR, - mcr_slice_subslice_mask, - intel_calculate_mcr_s_ss_select(i915)); + + if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) { + u32 l3_fuse = + intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) & + GEN10_L3BANK_MASK; + + DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse); + l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse); + } else { + l3_en = ~0; + } + + slice = fls(sseu->slice_mask) - 1; + GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask)); + subslice = fls(l3_en & sseu->subslice_mask[slice]); + if (!subslice) { + DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n", + sseu->subslice_mask[slice], l3_en); + subslice = fls(l3_en); + WARN_ON(!subslice); + } + subslice--; + + if (INTEL_GEN(i915) >= 11) { + mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice); + mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK; + } else { + mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); + mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK; + } + + DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr); + + wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr); } static void @@ -926,7 +917,7 @@ void intel_gt_init_workarounds(struct drm_i915_private *i915) { struct i915_wa_list *wal = &i915->gt_wa_list; - wa_init_start(wal, "GT"); + wa_init_start(wal, "GT", "global"); gt_init_workarounds(i915, wal); wa_init_finish(wal); } @@ -990,9 +981,9 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal) spin_unlock_irqrestore(&uncore->lock, flags); } -void intel_gt_apply_workarounds(struct drm_i915_private *i915) +void intel_gt_apply_workarounds(struct intel_gt *gt) { - wa_list_apply(&i915->uncore, &i915->gt_wa_list); + wa_list_apply(gt->uncore, >->i915->gt_wa_list); } static bool wa_list_verify(struct intel_uncore *uncore, @@ -1011,10 +1002,23 @@ static bool wa_list_verify(struct intel_uncore *uncore, return ok; } -bool intel_gt_verify_workarounds(struct drm_i915_private *i915, - const char *from) +bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from) { - return wa_list_verify(&i915->uncore, &i915->gt_wa_list, from); + return wa_list_verify(gt->uncore, >->i915->gt_wa_list, from); +} + +static inline bool is_nonpriv_flags_valid(u32 flags) +{ + /* Check only valid flag bits are set */ + if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID) + return false; + + /* NB: Only 3 out of 4 enum values are valid for access field */ + if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == + RING_FORCE_TO_NONPRIV_ACCESS_INVALID) + return false; + + return true; } static void @@ -1027,6 +1031,9 @@ whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags) if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS)) return; + if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags))) + return; + wa.reg.reg |= flags; _wa_add(wal, &wa); } @@ -1034,7 +1041,7 @@ whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags) static void whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg) { - whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_RW); + whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW); } static void gen9_whitelist_build(struct i915_wa_list *w) @@ -1098,10 +1105,25 @@ static void glk_whitelist_build(struct intel_engine_cs *engine) static void cfl_whitelist_build(struct intel_engine_cs *engine) { + struct i915_wa_list *w = &engine->whitelist; + if (engine->class != RENDER_CLASS) return; - gen9_whitelist_build(&engine->whitelist); + gen9_whitelist_build(w); + + /* + * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml + * + * This covers 4 register which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_ACCESS_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); } static void cnl_whitelist_build(struct intel_engine_cs *engine) @@ -1129,18 +1151,31 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) /* WaEnableStateCacheRedirectToCS:icl */ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); + + /* + * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl + * + * This covers 4 register which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_ACCESS_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); break; case VIDEO_DECODE_CLASS: /* hucStatusRegOffset */ whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base), - RING_FORCE_TO_NONPRIV_RD); + RING_FORCE_TO_NONPRIV_ACCESS_RD); /* hucUKernelHdrInfoRegOffset */ whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base), - RING_FORCE_TO_NONPRIV_RD); + RING_FORCE_TO_NONPRIV_ACCESS_RD); /* hucStatus2RegOffset */ whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base), - RING_FORCE_TO_NONPRIV_RD); + RING_FORCE_TO_NONPRIV_ACCESS_RD); break; default: @@ -1153,7 +1188,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) struct drm_i915_private *i915 = engine->i915; struct i915_wa_list *w = &engine->whitelist; - wa_init_start(w, "whitelist"); + wa_init_start(w, "whitelist", engine->name); if (IS_GEN(i915, 11)) icl_whitelist_build(engine); @@ -1212,10 +1247,9 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE); /* WaPipelineFlushCoherentLines:icl */ - ignore_wa_write_or(wal, - GEN8_L3SQCREG4, - GEN8_LQSC_FLUSH_COHERENT_LINES, - GEN8_LQSC_FLUSH_COHERENT_LINES); + wa_write_or(wal, + GEN8_L3SQCREG4, + GEN8_LQSC_FLUSH_COHERENT_LINES); /* * Wa_1405543622:icl @@ -1242,10 +1276,9 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) * Wa_1405733216:icl * Formerly known as WaDisableCleanEvicts */ - ignore_wa_write_or(wal, - GEN8_L3SQCREG4, - GEN11_LQSC_CLEAN_EVICT_DISABLE, - GEN11_LQSC_CLEAN_EVICT_DISABLE); + wa_write_or(wal, + GEN8_L3SQCREG4, + GEN11_LQSC_CLEAN_EVICT_DISABLE); /* WaForwardProgressSoftReset:icl */ wa_write_or(wal, @@ -1258,8 +1291,18 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) wa_write_or(wal, GEN7_SARCHKMD, - GEN7_DISABLE_DEMAND_PREFETCH | - GEN7_DISABLE_SAMPLER_PREFETCH); + GEN7_DISABLE_DEMAND_PREFETCH); + + /* Wa_1606682166:icl */ + wa_write_or(wal, + GEN7_SARCHKMD, + GEN7_DISABLE_SAMPLER_PREFETCH); + + /* Wa_1409178092:icl */ + wa_write_masked_or(wal, + GEN11_SCRATCH2, + GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE, + 0); } if (IS_GEN_RANGE(i915, 9, 11)) { @@ -1328,7 +1371,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8)) return; - if (engine->id == RCS0) + if (engine->class == RENDER_CLASS) rcs_engine_wa_init(engine, wal); else xcs_engine_wa_init(engine, wal); @@ -1338,10 +1381,10 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine) { struct i915_wa_list *wal = &engine->wa_list; - if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8)) + if (INTEL_GEN(engine->i915) < 8) return; - wa_init_start(wal, engine->name); + wa_init_start(wal, "engine", engine->name); engine_init_workarounds(engine, wal); wa_init_finish(wal); } @@ -1384,26 +1427,50 @@ err_obj: return ERR_PTR(err); } +static bool mcr_range(struct drm_i915_private *i915, u32 offset) +{ + /* + * Registers in this range are affected by the MCR selector + * which only controls CPU initiated MMIO. Routing does not + * work for CS access so we cannot verify them on this path. + */ + if (INTEL_GEN(i915) >= 8 && (offset >= 0xb100 && offset <= 0xb3ff)) + return true; + + return false; +} + static int wa_list_srm(struct i915_request *rq, const struct i915_wa_list *wal, struct i915_vma *vma) { + struct drm_i915_private *i915 = rq->i915; + unsigned int i, count = 0; const struct i915_wa *wa; - unsigned int i; u32 srm, *cs; srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; - if (INTEL_GEN(rq->i915) >= 8) + if (INTEL_GEN(i915) >= 8) srm++; - cs = intel_ring_begin(rq, 4 * wal->count); + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg))) + count++; + } + + cs = intel_ring_begin(rq, 4 * count); if (IS_ERR(cs)) return PTR_ERR(cs); for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + u32 offset = i915_mmio_reg_offset(wa->reg); + + if (mcr_range(i915, offset)) + continue; + *cs++ = srm; - *cs++ = i915_mmio_reg_offset(wa->reg); + *cs++ = offset; *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; *cs++ = 0; } @@ -1426,7 +1493,7 @@ static int engine_wa_list_verify(struct intel_context *ce, if (!wal->count) return 0; - vma = create_scratch(&ce->engine->i915->ggtt.vm, wal->count); + vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -1453,9 +1520,13 @@ static int engine_wa_list_verify(struct intel_context *ce, } err = 0; - for (i = 0, wa = wal->list; i < wal->count; i++, wa++) + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg))) + continue; + if (!wa_verify(wa, results[i], wal->name, from)) err = -ENXIO; + } i915_gem_object_unpin_map(vma->obj); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h index 3761a6ee58bb..8c9c769c2204 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.h +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h @@ -14,6 +14,7 @@ struct drm_i915_private; struct i915_request; struct intel_engine_cs; +struct intel_gt; static inline void intel_wa_list_free(struct i915_wa_list *wal) { @@ -25,9 +26,8 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine); int intel_engine_emit_ctx_wa(struct i915_request *rq); void intel_gt_init_workarounds(struct drm_i915_private *i915); -void intel_gt_apply_workarounds(struct drm_i915_private *i915); -bool intel_gt_verify_workarounds(struct drm_i915_private *i915, - const char *from); +void intel_gt_apply_workarounds(struct intel_gt *gt); +bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from); void intel_engine_init_whitelist(struct intel_engine_cs *engine); void intel_engine_apply_whitelist(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h index 42ac1fb99572..e27ab1b710b3 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h +++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h @@ -20,6 +20,7 @@ struct i915_wa { struct i915_wa_list { const char *name; + const char *engine_name; struct i915_wa *list; unsigned int count; unsigned int wa_count; diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 086801b51441..10cb312462e5 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -33,15 +33,15 @@ struct mock_ring { struct intel_ring base; - struct i915_timeline timeline; + struct intel_timeline timeline; }; -static void mock_timeline_pin(struct i915_timeline *tl) +static void mock_timeline_pin(struct intel_timeline *tl) { tl->pin_count++; } -static void mock_timeline_unpin(struct i915_timeline *tl) +static void mock_timeline_unpin(struct intel_timeline *tl) { GEM_BUG_ON(!tl->pin_count); tl->pin_count--; @@ -56,7 +56,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) if (!ring) return NULL; - if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) { + if (intel_timeline_init(&ring->timeline, engine->gt, NULL)) { kfree(ring); return NULL; } @@ -66,6 +66,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) ring->base.effective_size = sz; ring->base.vaddr = (void *)(ring + 1); ring->base.timeline = &ring->timeline; + atomic_set(&ring->base.pin_count, 1); INIT_LIST_HEAD(&ring->base.request_list); intel_ring_update_space(&ring->base); @@ -77,7 +78,7 @@ static void mock_ring_free(struct intel_ring *base) { struct mock_ring *ring = container_of(base, typeof(*ring), base); - i915_timeline_fini(&ring->timeline); + intel_timeline_fini(&ring->timeline); kfree(ring); } @@ -141,6 +142,7 @@ static void mock_context_destroy(struct kref *ref) if (ce->ring) mock_ring_free(ce->ring); + intel_context_fini(ce); intel_context_free(ce); } @@ -154,7 +156,7 @@ static int mock_context_pin(struct intel_context *ce) return -ENOMEM; } - ret = intel_context_active_acquire(ce, PIN_HIGH); + ret = intel_context_active_acquire(ce); if (ret) return ret; @@ -256,6 +258,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, /* minimal engine setup for requests */ engine->base.i915 = i915; + engine->base.gt = &i915->gt; snprintf(engine->base.name, sizeof(engine->base.name), "%s", name); engine->base.id = id; engine->base.mask = BIT(id); diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 1ee4c923044f..4484b4447db1 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -25,13 +25,13 @@ #include <linux/kthread.h> #include "gem/i915_gem_context.h" +#include "gt/intel_gt.h" #include "intel_engine_pm.h" #include "i915_selftest.h" #include "selftests/i915_random.h" #include "selftests/igt_flush_test.h" #include "selftests/igt_reset.h" -#include "selftests/igt_wedge_me.h" #include "selftests/igt_atomic.h" #include "selftests/mock_drm.h" @@ -42,7 +42,7 @@ #define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */ struct hang { - struct drm_i915_private *i915; + struct intel_gt *gt; struct drm_i915_gem_object *hws; struct drm_i915_gem_object *obj; struct i915_gem_context *ctx; @@ -50,27 +50,27 @@ struct hang { u32 *batch; }; -static int hang_init(struct hang *h, struct drm_i915_private *i915) +static int hang_init(struct hang *h, struct intel_gt *gt) { void *vaddr; int err; memset(h, 0, sizeof(*h)); - h->i915 = i915; + h->gt = gt; - h->ctx = kernel_context(i915); + h->ctx = kernel_context(gt->i915); if (IS_ERR(h->ctx)) return PTR_ERR(h->ctx); GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx)); - h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); + h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(h->hws)) { err = PTR_ERR(h->hws); goto err_ctx; } - h->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(h->obj)) { err = PTR_ERR(h->obj); goto err_hws; @@ -85,7 +85,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915) h->seqno = memset(vaddr, 0xff, PAGE_SIZE); vaddr = i915_gem_object_pin_map(h->obj, - i915_coherent_map_type(i915)); + i915_coherent_map_type(gt->i915)); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto err_unpin_hws; @@ -127,35 +127,31 @@ static int move_to_active(struct i915_vma *vma, static struct i915_request * hang_create_request(struct hang *h, struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = h->i915; - struct i915_address_space *vm = h->ctx->vm ?: &i915->ggtt.vm; + struct intel_gt *gt = h->gt; + struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm; + struct drm_i915_gem_object *obj; struct i915_request *rq = NULL; struct i915_vma *hws, *vma; unsigned int flags; + void *vaddr; u32 *batch; int err; - if (i915_gem_object_is_active(h->obj)) { - struct drm_i915_gem_object *obj; - void *vaddr; - - obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); - vaddr = i915_gem_object_pin_map(obj, - i915_coherent_map_type(h->i915)); - if (IS_ERR(vaddr)) { - i915_gem_object_put(obj); - return ERR_CAST(vaddr); - } + vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915)); + if (IS_ERR(vaddr)) { + i915_gem_object_put(obj); + return ERR_CAST(vaddr); + } - i915_gem_object_unpin_map(h->obj); - i915_gem_object_put(h->obj); + i915_gem_object_unpin_map(h->obj); + i915_gem_object_put(h->obj); - h->obj = obj; - h->batch = vaddr; - } + h->obj = obj; + h->batch = vaddr; vma = i915_vma_instance(h->obj, vm, NULL); if (IS_ERR(vma)) @@ -188,7 +184,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) goto cancel_rq; batch = h->batch; - if (INTEL_GEN(i915) >= 8) { + if (INTEL_GEN(gt->i915) >= 8) { *batch++ = MI_STORE_DWORD_IMM_GEN4; *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = upper_32_bits(hws_address(hws, rq)); @@ -202,7 +198,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; *batch++ = lower_32_bits(vma->node.start); *batch++ = upper_32_bits(vma->node.start); - } else if (INTEL_GEN(i915) >= 6) { + } else if (INTEL_GEN(gt->i915) >= 6) { *batch++ = MI_STORE_DWORD_IMM_GEN4; *batch++ = 0; *batch++ = lower_32_bits(hws_address(hws, rq)); @@ -215,7 +211,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) *batch++ = MI_ARB_CHECK; *batch++ = MI_BATCH_BUFFER_START | 1 << 8; *batch++ = lower_32_bits(vma->node.start); - } else if (INTEL_GEN(i915) >= 4) { + } else if (INTEL_GEN(gt->i915) >= 4) { *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *batch++ = 0; *batch++ = lower_32_bits(hws_address(hws, rq)); @@ -242,7 +238,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) *batch++ = lower_32_bits(vma->node.start); } *batch++ = MI_BATCH_BUFFER_END; /* not reached */ - i915_gem_chipset_flush(h->i915); + intel_gt_chipset_flush(engine->gt); if (rq->engine->emit_init_breadcrumb) { err = rq->engine->emit_init_breadcrumb(rq); @@ -251,7 +247,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) } flags = 0; - if (INTEL_GEN(vm->i915) <= 5) + if (INTEL_GEN(gt->i915) <= 5) flags |= I915_DISPATCH_SECURE; err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); @@ -276,7 +272,7 @@ static u32 hws_seqno(const struct hang *h, const struct i915_request *rq) static void hang_fini(struct hang *h) { *h->batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(h->i915); + intel_gt_chipset_flush(h->gt); i915_gem_object_unpin_map(h->obj); i915_gem_object_put(h->obj); @@ -286,7 +282,7 @@ static void hang_fini(struct hang *h) kernel_context_close(h->ctx); - igt_flush_test(h->i915, I915_WAIT_LOCKED); + igt_flush_test(h->gt->i915, I915_WAIT_LOCKED); } static bool wait_until_running(struct hang *h, struct i915_request *rq) @@ -301,7 +297,7 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq) static int igt_hang_sanitycheck(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct i915_request *rq; struct intel_engine_cs *engine; enum intel_engine_id id; @@ -310,13 +306,13 @@ static int igt_hang_sanitycheck(void *arg) /* Basic check that we can execute our hanging batch */ - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); + mutex_lock(>->i915->drm.struct_mutex); + err = hang_init(&h, gt); if (err) goto unlock; - for_each_engine(engine, i915, id) { - struct igt_wedge_me w; + for_each_engine(engine, gt->i915, id) { + struct intel_wedge_me w; long timeout; if (!intel_engine_can_store_dword(engine)) @@ -333,15 +329,15 @@ static int igt_hang_sanitycheck(void *arg) i915_request_get(rq); *h.batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); + intel_gt_chipset_flush(engine->gt); i915_request_add(rq); timeout = 0; - igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) + intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */) timeout = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) timeout = -EIO; i915_request_put(rq); @@ -357,7 +353,7 @@ static int igt_hang_sanitycheck(void *arg) fini: hang_fini(&h); unlock: - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); return err; } @@ -368,37 +364,37 @@ static bool wait_for_idle(struct intel_engine_cs *engine) static int igt_reset_nop(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine; struct i915_gem_context *ctx; unsigned int reset_count, count; enum intel_engine_id id; - intel_wakeref_t wakeref; struct drm_file *file; IGT_TIMEOUT(end_time); int err = 0; /* Check that we can reset during non-user portions of requests */ - file = mock_file(i915); + file = mock_file(gt->i915); if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&i915->drm.struct_mutex); - ctx = live_context(i915, file); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + ctx = live_context(gt->i915, file); + mutex_unlock(>->i915->drm.struct_mutex); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; } i915_gem_context_clear_bannable(ctx); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - reset_count = i915_reset_count(&i915->gpu_error); + reset_count = i915_reset_count(global); count = 0; do { - mutex_lock(&i915->drm.struct_mutex); - for_each_engine(engine, i915, id) { + mutex_lock(>->i915->drm.struct_mutex); + + for_each_engine(engine, gt->i915, id) { int i; for (i = 0; i < 16; i++) { @@ -413,82 +409,78 @@ static int igt_reset_nop(void *arg) i915_request_add(rq); } } - mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_lock(i915); - i915_reset(i915, ALL_ENGINES, NULL); - igt_global_reset_unlock(i915); - if (i915_reset_failed(i915)) { + igt_global_reset_lock(gt); + intel_gt_reset(gt, ALL_ENGINES, NULL); + igt_global_reset_unlock(gt); + + mutex_unlock(>->i915->drm.struct_mutex); + if (intel_gt_is_wedged(gt)) { err = -EIO; break; } - if (i915_reset_count(&i915->gpu_error) != - reset_count + ++count) { + if (i915_reset_count(global) != reset_count + ++count) { pr_err("Full GPU reset not recorded!\n"); err = -EINVAL; break; } - err = igt_flush_test(i915, 0); + err = igt_flush_test(gt->i915, 0); if (err) break; } while (time_before(jiffies, end_time)); pr_info("%s: %d resets\n", __func__, count); - mutex_lock(&i915->drm.struct_mutex); - err = igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); - - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + mutex_lock(>->i915->drm.struct_mutex); + err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); + mutex_unlock(>->i915->drm.struct_mutex); out: - mock_file_free(i915, file); - if (i915_reset_failed(i915)) + mock_file_free(gt->i915, file); + if (intel_gt_is_wedged(gt)) err = -EIO; return err; } static int igt_reset_nop_engine(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine; struct i915_gem_context *ctx; enum intel_engine_id id; - intel_wakeref_t wakeref; struct drm_file *file; int err = 0; /* Check that we can engine-reset during non-user portions */ - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(gt->i915)) return 0; - file = mock_file(i915); + file = mock_file(gt->i915); if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&i915->drm.struct_mutex); - ctx = live_context(i915, file); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + ctx = live_context(gt->i915, file); + mutex_unlock(>->i915->drm.struct_mutex); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; } i915_gem_context_clear_bannable(ctx); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { unsigned int reset_count, reset_engine_count; unsigned int count; IGT_TIMEOUT(end_time); - reset_count = i915_reset_count(&i915->gpu_error); - reset_engine_count = i915_reset_engine_count(&i915->gpu_error, - engine); + reset_count = i915_reset_count(global); + reset_engine_count = i915_reset_engine_count(global, engine); count = 0; - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { int i; @@ -499,7 +491,7 @@ static int igt_reset_nop_engine(void *arg) break; } - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); for (i = 0; i < 16; i++) { struct i915_request *rq; @@ -511,21 +503,20 @@ static int igt_reset_nop_engine(void *arg) i915_request_add(rq); } - mutex_unlock(&i915->drm.struct_mutex); - - err = i915_reset_engine(engine, NULL); + err = intel_engine_reset(engine, NULL); + mutex_unlock(>->i915->drm.struct_mutex); if (err) { pr_err("i915_reset_engine failed\n"); break; } - if (i915_reset_count(&i915->gpu_error) != reset_count) { + if (i915_reset_count(global) != reset_count) { pr_err("Full GPU reset recorded! (engine reset expected)\n"); err = -EINVAL; break; } - if (i915_reset_engine_count(&i915->gpu_error, engine) != + if (i915_reset_engine_count(global, engine) != reset_engine_count + ++count) { pr_err("%s engine reset not recorded!\n", engine->name); @@ -533,31 +524,31 @@ static int igt_reset_nop_engine(void *arg) break; } } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + clear_bit(I915_RESET_ENGINE + id, >->reset.flags); pr_info("%s(%s): %d resets\n", __func__, engine->name, count); if (err) break; - err = igt_flush_test(i915, 0); + err = igt_flush_test(gt->i915, 0); if (err) break; } - mutex_lock(&i915->drm.struct_mutex); - err = igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); + mutex_unlock(>->i915->drm.struct_mutex); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); out: - mock_file_free(i915, file); - if (i915_reset_failed(i915)) + mock_file_free(gt->i915, file); + if (intel_gt_is_wedged(gt)) err = -EIO; return err; } -static int __igt_reset_engine(struct drm_i915_private *i915, bool active) +static int __igt_reset_engine(struct intel_gt *gt, bool active) { + struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine; enum intel_engine_id id; struct hang h; @@ -565,18 +556,18 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) /* Check that we can issue an engine reset on an idle engine (no-op) */ - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(gt->i915)) return 0; if (active) { - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + err = hang_init(&h, gt); + mutex_unlock(>->i915->drm.struct_mutex); if (err) return err; } - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { unsigned int reset_count, reset_engine_count; IGT_TIMEOUT(end_time); @@ -590,30 +581,29 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) break; } - reset_count = i915_reset_count(&i915->gpu_error); - reset_engine_count = i915_reset_engine_count(&i915->gpu_error, - engine); + reset_count = i915_reset_count(global); + reset_engine_count = i915_reset_engine_count(global, engine); intel_engine_pm_get(engine); - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { if (active) { struct i915_request *rq; - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); rq = hang_create_request(&h, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); break; } i915_request_get(rq); i915_request_add(rq); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); @@ -628,19 +618,19 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) i915_request_put(rq); } - err = i915_reset_engine(engine, NULL); + err = intel_engine_reset(engine, NULL); if (err) { pr_err("i915_reset_engine failed\n"); break; } - if (i915_reset_count(&i915->gpu_error) != reset_count) { + if (i915_reset_count(global) != reset_count) { pr_err("Full GPU reset recorded! (engine reset expected)\n"); err = -EINVAL; break; } - if (i915_reset_engine_count(&i915->gpu_error, engine) != + if (i915_reset_engine_count(global, engine) != ++reset_engine_count) { pr_err("%s engine reset not recorded!\n", engine->name); @@ -648,24 +638,24 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) break; } } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + clear_bit(I915_RESET_ENGINE + id, >->reset.flags); intel_engine_pm_put(engine); if (err) break; - err = igt_flush_test(i915, 0); + err = igt_flush_test(gt->i915, 0); if (err) break; } - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) err = -EIO; if (active) { - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); hang_fini(&h); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); } return err; @@ -707,7 +697,7 @@ static int active_request_put(struct i915_request *rq) rq->fence.seqno); GEM_TRACE_DUMP(); - i915_gem_set_wedged(rq->i915); + intel_gt_set_wedged(rq->engine->gt); err = -EIO; } @@ -784,10 +774,11 @@ err_file: return err; } -static int __igt_reset_engines(struct drm_i915_private *i915, +static int __igt_reset_engines(struct intel_gt *gt, const char *test_name, unsigned int flags) { + struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine, *other; enum intel_engine_id id, tmp; struct hang h; @@ -797,13 +788,13 @@ static int __igt_reset_engines(struct drm_i915_private *i915, * with any other engine. */ - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(gt->i915)) return 0; if (flags & TEST_ACTIVE) { - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + err = hang_init(&h, gt); + mutex_unlock(>->i915->drm.struct_mutex); if (err) return err; @@ -811,9 +802,9 @@ static int __igt_reset_engines(struct drm_i915_private *i915, h.ctx->sched.priority = 1024; } - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { struct active_engine threads[I915_NUM_ENGINES] = {}; - unsigned long global = i915_reset_count(&i915->gpu_error); + unsigned long device = i915_reset_count(global); unsigned long count = 0, reported; IGT_TIMEOUT(end_time); @@ -829,12 +820,11 @@ static int __igt_reset_engines(struct drm_i915_private *i915, } memset(threads, 0, sizeof(threads)); - for_each_engine(other, i915, tmp) { + for_each_engine(other, gt->i915, tmp) { struct task_struct *tsk; threads[tmp].resets = - i915_reset_engine_count(&i915->gpu_error, - other); + i915_reset_engine_count(global, other); if (!(flags & TEST_OTHERS)) continue; @@ -857,25 +847,25 @@ static int __igt_reset_engines(struct drm_i915_private *i915, } intel_engine_pm_get(engine); - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { struct i915_request *rq = NULL; if (flags & TEST_ACTIVE) { - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); rq = hang_create_request(&h, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); break; } i915_request_get(rq); i915_request_add(rq); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); @@ -888,7 +878,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, } } - err = i915_reset_engine(engine, NULL); + err = intel_engine_reset(engine, NULL); if (err) { pr_err("i915_reset_engine(%s:%s): failed, err=%d\n", engine->name, test_name, err); @@ -900,7 +890,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, if (rq) { if (i915_request_wait(rq, 0, HZ / 5) < 0) { struct drm_printer p = - drm_info_printer(i915->drm.dev); + drm_info_printer(gt->i915->drm.dev); pr_err("i915_reset_engine(%s:%s):" " failed to complete request after reset\n", @@ -910,7 +900,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, i915_request_put(rq); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); err = -EIO; break; } @@ -920,7 +910,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, if (!(flags & TEST_SELF) && !wait_for_idle(engine)) { struct drm_printer p = - drm_info_printer(i915->drm.dev); + drm_info_printer(gt->i915->drm.dev); pr_err("i915_reset_engine(%s:%s):" " failed to idle after reset\n", @@ -932,12 +922,12 @@ static int __igt_reset_engines(struct drm_i915_private *i915, break; } } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + clear_bit(I915_RESET_ENGINE + id, >->reset.flags); intel_engine_pm_put(engine); pr_info("i915_reset_engine(%s:%s): %lu resets\n", engine->name, test_name, count); - reported = i915_reset_engine_count(&i915->gpu_error, engine); + reported = i915_reset_engine_count(global, engine); reported -= threads[engine->id].resets; if (reported != count) { pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n", @@ -947,7 +937,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, } unwind: - for_each_engine(other, i915, tmp) { + for_each_engine(other, gt->i915, tmp) { int ret; if (!threads[tmp].task) @@ -962,22 +952,21 @@ unwind: } put_task_struct(threads[tmp].task); - if (other != engine && + if (other->uabi_class != engine->uabi_class && threads[tmp].resets != - i915_reset_engine_count(&i915->gpu_error, other)) { + i915_reset_engine_count(global, other)) { pr_err("Innocent engine %s was reset (count=%ld)\n", other->name, - i915_reset_engine_count(&i915->gpu_error, - other) - + i915_reset_engine_count(global, other) - threads[tmp].resets); if (!err) err = -EINVAL; } } - if (global != i915_reset_count(&i915->gpu_error)) { + if (device != i915_reset_count(global)) { pr_err("Global reset (count=%ld)!\n", - i915_reset_count(&i915->gpu_error) - global); + i915_reset_count(global) - device); if (!err) err = -EINVAL; } @@ -985,20 +974,20 @@ unwind: if (err) break; - mutex_lock(&i915->drm.struct_mutex); - err = igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); + mutex_unlock(>->i915->drm.struct_mutex); if (err) break; } - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) err = -EIO; if (flags & TEST_ACTIVE) { - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); hang_fini(&h); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); } return err; @@ -1024,13 +1013,13 @@ static int igt_reset_engines(void *arg) }, { } }; - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; typeof(*phases) *p; int err; for (p = phases; p->name; p++) { if (p->flags & TEST_PRIORITY) { - if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) + if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) continue; } @@ -1042,38 +1031,39 @@ static int igt_reset_engines(void *arg) return 0; } -static u32 fake_hangcheck(struct drm_i915_private *i915, - intel_engine_mask_t mask) +static u32 fake_hangcheck(struct intel_gt *gt, intel_engine_mask_t mask) { - u32 count = i915_reset_count(&i915->gpu_error); + u32 count = i915_reset_count(>->i915->gpu_error); - i915_reset(i915, mask, NULL); + intel_gt_reset(gt, mask, NULL); return count; } static int igt_reset_wait(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; + struct intel_engine_cs *engine = gt->i915->engine[RCS0]; struct i915_request *rq; unsigned int reset_count; struct hang h; long timeout; int err; - if (!intel_engine_can_store_dword(i915->engine[RCS0])) + if (!engine || !intel_engine_can_store_dword(engine)) return 0; /* Check that we detect a stuck waiter and issue a reset */ - igt_global_reset_lock(i915); + igt_global_reset_lock(gt); - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); + mutex_lock(>->i915->drm.struct_mutex); + err = hang_init(&h, gt); if (err) goto unlock; - rq = hang_create_request(&h, i915->engine[RCS0]); + rq = hang_create_request(&h, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto fini; @@ -1083,19 +1073,19 @@ static int igt_reset_wait(void *arg) i915_request_add(rq); if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); err = -EIO; goto out_rq; } - reset_count = fake_hangcheck(i915, ALL_ENGINES); + reset_count = fake_hangcheck(gt, ALL_ENGINES); timeout = i915_request_wait(rq, 0, 10); if (timeout < 0) { @@ -1105,7 +1095,7 @@ static int igt_reset_wait(void *arg) goto out_rq; } - if (i915_reset_count(&i915->gpu_error) == reset_count) { + if (i915_reset_count(global) == reset_count) { pr_err("No GPU reset recorded!\n"); err = -EINVAL; goto out_rq; @@ -1116,10 +1106,10 @@ out_rq: fini: hang_fini(&h); unlock: - mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); + mutex_unlock(>->i915->drm.struct_mutex); + igt_global_reset_unlock(gt); - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) return -EIO; return err; @@ -1178,11 +1168,12 @@ out_unlock: return err; } -static int __igt_reset_evict_vma(struct drm_i915_private *i915, +static int __igt_reset_evict_vma(struct intel_gt *gt, struct i915_address_space *vm, int (*fn)(void *), unsigned int flags) { + struct intel_engine_cs *engine = gt->i915->engine[RCS0]; struct drm_i915_gem_object *obj; struct task_struct *tsk = NULL; struct i915_request *rq; @@ -1190,17 +1181,17 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, struct hang h; int err; - if (!intel_engine_can_store_dword(i915->engine[RCS0])) + if (!engine || !intel_engine_can_store_dword(engine)) return 0; /* Check that we can recover an unbind stuck on a hanging request */ - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); + mutex_lock(>->i915->drm.struct_mutex); + err = hang_init(&h, gt); if (err) goto unlock; - obj = i915_gem_object_create_internal(i915, SZ_1M); + obj = i915_gem_object_create_internal(gt->i915, SZ_1M); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto fini; @@ -1220,7 +1211,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, goto out_obj; } - rq = hang_create_request(&h, i915->engine[RCS0]); + rq = hang_create_request(&h, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out_obj; @@ -1258,16 +1249,16 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, if (err) goto out_rq; - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); goto out_reset; } @@ -1284,31 +1275,31 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, wait_for_completion(&arg.completion); if (wait_for(!list_empty(&rq->fence.cb_list), 10)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("igt/evict_vma kthread did not wait\n"); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); goto out_reset; } out_reset: - igt_global_reset_lock(i915); - fake_hangcheck(rq->i915, rq->engine->mask); - igt_global_reset_unlock(i915); + igt_global_reset_lock(gt); + fake_hangcheck(gt, rq->engine->mask); + igt_global_reset_unlock(gt); if (tsk) { - struct igt_wedge_me w; + struct intel_wedge_me w; /* The reset, even indirectly, should take less than 10ms. */ - igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) + intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */) err = kthread_stop(tsk); put_task_struct(tsk); } - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); out_rq: i915_request_put(rq); out_obj: @@ -1316,9 +1307,9 @@ out_obj: fini: hang_fini(&h); unlock: - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) return -EIO; return err; @@ -1326,26 +1317,26 @@ unlock: static int igt_reset_evict_ggtt(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; - return __igt_reset_evict_vma(i915, &i915->ggtt.vm, + return __igt_reset_evict_vma(gt, >->ggtt->vm, evict_vma, EXEC_OBJECT_WRITE); } static int igt_reset_evict_ppgtt(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct i915_gem_context *ctx; struct drm_file *file; int err; - file = mock_file(i915); + file = mock_file(gt->i915); if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&i915->drm.struct_mutex); - ctx = live_context(i915, file); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + ctx = live_context(gt->i915, file); + mutex_unlock(>->i915->drm.struct_mutex); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; @@ -1353,29 +1344,29 @@ static int igt_reset_evict_ppgtt(void *arg) err = 0; if (ctx->vm) /* aliasing == global gtt locking, covered above */ - err = __igt_reset_evict_vma(i915, ctx->vm, + err = __igt_reset_evict_vma(gt, ctx->vm, evict_vma, EXEC_OBJECT_WRITE); out: - mock_file_free(i915, file); + mock_file_free(gt->i915, file); return err; } static int igt_reset_evict_fence(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; - return __igt_reset_evict_vma(i915, &i915->ggtt.vm, + return __igt_reset_evict_vma(gt, >->ggtt->vm, evict_fence, EXEC_OBJECT_NEEDS_FENCE); } -static int wait_for_others(struct drm_i915_private *i915, +static int wait_for_others(struct intel_gt *gt, struct intel_engine_cs *exclude) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { if (engine == exclude) continue; @@ -1388,7 +1379,8 @@ static int wait_for_others(struct drm_i915_private *i915, static int igt_reset_queue(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine; enum intel_engine_id id; struct hang h; @@ -1396,14 +1388,14 @@ static int igt_reset_queue(void *arg) /* Check that we replay pending requests following a hang */ - igt_global_reset_lock(i915); + igt_global_reset_lock(gt); - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); + mutex_lock(>->i915->drm.struct_mutex); + err = hang_init(&h, gt); if (err) goto unlock; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { struct i915_request *prev; IGT_TIMEOUT(end_time); unsigned int count; @@ -1444,7 +1436,7 @@ static int igt_reset_queue(void *arg) * (hangcheck), or we focus on resetting just one * engine and so avoid repeatedly resetting innocents. */ - err = wait_for_others(i915, engine); + err = wait_for_others(gt, engine); if (err) { pr_err("%s(%s): Failed to idle other inactive engines after device reset\n", __func__, engine->name); @@ -1452,12 +1444,12 @@ static int igt_reset_queue(void *arg) i915_request_put(prev); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); goto fini; } if (!wait_until_running(&h, prev)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s(%s): Failed to start request %llx, at %x\n", __func__, engine->name, @@ -1468,13 +1460,13 @@ static int igt_reset_queue(void *arg) i915_request_put(rq); i915_request_put(prev); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); err = -EIO; goto fini; } - reset_count = fake_hangcheck(i915, BIT(id)); + reset_count = fake_hangcheck(gt, BIT(id)); if (prev->fence.error != -EIO) { pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n", @@ -1494,7 +1486,7 @@ static int igt_reset_queue(void *arg) goto fini; } - if (i915_reset_count(&i915->gpu_error) == reset_count) { + if (i915_reset_count(global) == reset_count) { pr_err("No GPU reset recorded!\n"); i915_request_put(rq); i915_request_put(prev); @@ -1509,11 +1501,11 @@ static int igt_reset_queue(void *arg) pr_info("%s: Completed %d resets\n", engine->name, count); *h.batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); + intel_gt_chipset_flush(engine->gt); i915_request_put(prev); - err = igt_flush_test(i915, I915_WAIT_LOCKED); + err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); if (err) break; } @@ -1521,10 +1513,10 @@ static int igt_reset_queue(void *arg) fini: hang_fini(&h); unlock: - mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); + mutex_unlock(>->i915->drm.struct_mutex); + igt_global_reset_unlock(gt); - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) return -EIO; return err; @@ -1532,8 +1524,9 @@ unlock: static int igt_handle_error(void *arg) { - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine = i915->engine[RCS0]; + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; + struct intel_engine_cs *engine = gt->i915->engine[RCS0]; struct hang h; struct i915_request *rq; struct i915_gpu_state *error; @@ -1541,15 +1534,15 @@ static int igt_handle_error(void *arg) /* Check that we can issue a global GPU and engine reset */ - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(gt->i915)) return 0; if (!engine || !intel_engine_can_store_dword(engine)) return 0; - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); - err = hang_init(&h, i915); + err = hang_init(&h, gt); if (err) goto err_unlock; @@ -1563,28 +1556,28 @@ static int igt_handle_error(void *arg) i915_request_add(rq); if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); err = -EIO; goto err_request; } - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); /* Temporarily disable error capture */ - error = xchg(&i915->gpu_error.first_error, (void *)-1); + error = xchg(&global->first_error, (void *)-1); - i915_handle_error(i915, engine->mask, 0, NULL); + intel_gt_handle_error(gt, engine->mask, 0, NULL); - xchg(&i915->gpu_error.first_error, error); + xchg(&global->first_error, error); - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); if (rq->fence.error != -EIO) { pr_err("Guilty request not identified!\n"); @@ -1597,7 +1590,7 @@ err_request: err_fini: hang_fini(&h); err_unlock: - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); return err; } @@ -1614,7 +1607,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine, tasklet_disable_nosync(t); p->critical_section_begin(); - err = i915_reset_engine(engine, NULL); + err = intel_engine_reset(engine, NULL); p->critical_section_end(); tasklet_enable(t); @@ -1629,7 +1622,6 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine, static int igt_atomic_reset_engine(struct intel_engine_cs *engine, const struct igt_atomic_section *p) { - struct drm_i915_private *i915 = engine->i915; struct i915_request *rq; struct hang h; int err; @@ -1638,7 +1630,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine, if (err) return err; - err = hang_init(&h, i915); + err = hang_init(&h, engine->gt); if (err) return err; @@ -1657,16 +1649,16 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine, pr_err("%s(%s): Failed to start request %llx, at %x\n", __func__, engine->name, rq->fence.seqno, hws_seqno(&h, rq)); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(engine->gt); err = -EIO; } if (err == 0) { - struct igt_wedge_me w; + struct intel_wedge_me w; - igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/) + intel_wedge_on_timeout(&w, engine->gt, HZ / 20 /* 50ms */) i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(engine->gt)) err = -EIO; } @@ -1678,30 +1670,30 @@ out: static int igt_reset_engines_atomic(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; const typeof(*igt_atomic_phases) *p; int err = 0; /* Check that the engines resets are usable from atomic context */ - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(gt->i915)) return 0; - if (USES_GUC_SUBMISSION(i915)) + if (USES_GUC_SUBMISSION(gt->i915)) return 0; - igt_global_reset_lock(i915); - mutex_lock(&i915->drm.struct_mutex); + igt_global_reset_lock(gt); + mutex_lock(>->i915->drm.struct_mutex); /* Flush any requests before we get started and check basics */ - if (!igt_force_reset(i915)) + if (!igt_force_reset(gt)) goto unlock; for (p = igt_atomic_phases; p->name; p++) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { err = igt_atomic_reset_engine(engine, p); if (err) goto out; @@ -1710,11 +1702,11 @@ static int igt_reset_engines_atomic(void *arg) out: /* As we poke around the guts, do a full reset before continuing. */ - igt_force_reset(i915); + igt_force_reset(gt); unlock: - mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); + mutex_unlock(>->i915->drm.struct_mutex); + igt_global_reset_unlock(gt); return err; } @@ -1736,28 +1728,29 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_reset_evict_fence), SUBTEST(igt_handle_error), }; + struct intel_gt *gt = &i915->gt; intel_wakeref_t wakeref; bool saved_hangcheck; int err; - if (!intel_has_gpu_reset(i915)) + if (!intel_has_gpu_reset(gt->i915)) return 0; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(gt)) return -EIO; /* we're long past hope of a successful reset */ - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_runtime_pm_get(>->i915->runtime_pm); saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); - drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */ + drain_delayed_work(>->hangcheck.work); /* flush param */ - err = i915_subtests(tests, i915); + err = intel_gt_live_subtests(tests, gt); - mutex_lock(&i915->drm.struct_mutex); - igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + igt_flush_test(gt->i915, I915_WAIT_LOCKED); + mutex_unlock(>->i915->drm.struct_mutex); i915_modparams.enable_hangcheck = saved_hangcheck; - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_runtime_pm_put(>->i915->runtime_pm, wakeref); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 401e8b539297..60f27e52d267 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -55,7 +55,7 @@ static int live_sanitycheck(void *arg) if (!igt_wait_for_spinner(&spin, rq)) { GEM_TRACE("spinner failed to start\n"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_ctx; } @@ -73,12 +73,231 @@ err_ctx: err_spin: igt_spinner_fini(&spin); err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } +static int +emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 10); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_NEQ_SDD; + *cs++ = 0; + *cs++ = i915_ggtt_offset(vma) + 4 * idx; + *cs++ = 0; + + if (idx > 0) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); + *cs++ = 0; + *cs++ = 1; + } else { + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + } + + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + + intel_ring_advance(rq, cs); + return 0; +} + +static struct i915_request * +semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx) +{ + struct i915_gem_context *ctx; + struct i915_request *rq; + int err; + + ctx = kernel_context(engine->i915); + if (!ctx) + return ERR_PTR(-ENOMEM); + + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) + goto out_ctx; + + err = emit_semaphore_chain(rq, vma, idx); + i915_request_add(rq); + if (err) + rq = ERR_PTR(err); + +out_ctx: + kernel_context_close(ctx); + return rq; +} + +static int +release_queue(struct intel_engine_cs *engine, + struct i915_vma *vma, + int idx) +{ + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), + }; + struct i915_request *rq; + u32 *cs; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + i915_request_add(rq); + return PTR_ERR(cs); + } + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); + *cs++ = 0; + *cs++ = 1; + + intel_ring_advance(rq, cs); + i915_request_add(rq); + + engine->schedule(rq, &attr); + + return 0; +} + +static int +slice_semaphore_queue(struct intel_engine_cs *outer, + struct i915_vma *vma, + int count) +{ + struct intel_engine_cs *engine; + struct i915_request *head; + enum intel_engine_id id; + int err, i, n = 0; + + head = semaphore_queue(outer, vma, n++); + if (IS_ERR(head)) + return PTR_ERR(head); + + i915_request_get(head); + for_each_engine(engine, outer->i915, id) { + for (i = 0; i < count; i++) { + struct i915_request *rq; + + rq = semaphore_queue(engine, vma, n++); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + } + } + + err = release_queue(outer, vma, n); + if (err) + goto out; + + if (i915_request_wait(head, + I915_WAIT_LOCKED, + 2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) { + pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n", + count, n); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(outer->gt); + err = -EIO; + } + +out: + i915_request_put(head); + return err; +} + +static int live_timeslice_preempt(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; + struct i915_vma *vma; + void *vaddr; + int err = 0; + int count; + + /* + * If a request takes too long, we would like to give other users + * a fair go on the GPU. In particular, users may create batches + * that wait upon external input, where that input may even be + * supplied by another GPU job. To avoid blocking forever, we + * need to preempt the current task and replace it with another + * ready task. + */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_unlock; + } + + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) + goto err_map; + + for_each_prime_number_from(count, 1, 16) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, i915, id) { + if (!intel_engine_has_preemption(engine)) + continue; + + memset(vaddr, 0, PAGE_SIZE); + + err = slice_semaphore_queue(engine, vma, count); + if (err) + goto err_pin; + + if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + err = -EIO; + goto err_pin; + } + } + } + +err_pin: + i915_vma_unpin(vma); +err_map: + i915_gem_object_unpin_map(obj); +err_obj: + i915_gem_object_put(obj); +err_unlock: + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + return err; +} + static int live_busywait_preempt(void *arg) { struct drm_i915_private *i915 = arg; @@ -138,6 +357,9 @@ static int live_busywait_preempt(void *arg) struct igt_live_test t; u32 *cs; + if (!intel_engine_has_preemption(engine)) + continue; + if (!intel_engine_can_store_dword(engine)) continue; @@ -229,7 +451,7 @@ static int live_busywait_preempt(void *arg) intel_engine_dump(engine, &p, "%s\n", engine->name); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_vma; } @@ -253,8 +475,6 @@ err_ctx_lo: err_ctx_hi: kernel_context_close(ctx_hi); err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -320,7 +540,7 @@ static int live_preempt(void *arg) if (!igt_wait_for_spinner(&spin_lo, rq)) { GEM_TRACE("lo spinner failed to start\n"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_ctx_lo; } @@ -337,7 +557,7 @@ static int live_preempt(void *arg) if (!igt_wait_for_spinner(&spin_hi, rq)) { GEM_TRACE("hi spinner failed to start\n"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_ctx_lo; } @@ -361,7 +581,6 @@ err_spin_lo: err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -398,6 +617,9 @@ static int live_late_preempt(void *arg) if (!ctx_lo) goto err_ctx_hi; + /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */ + ctx_lo->sched.priority = I915_USER_PRIORITY(1); + for_each_engine(engine, i915, id) { struct igt_live_test t; struct i915_request *rq; @@ -465,7 +687,6 @@ err_spin_lo: err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -473,7 +694,7 @@ err_unlock: err_wedged: igt_spinner_end(&spin_hi); igt_spinner_end(&spin_lo); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_ctx_lo; } @@ -506,6 +727,114 @@ static void preempt_client_fini(struct preempt_client *c) kernel_context_close(c->ctx); } +static int live_nopreempt(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct preempt_client a, b; + enum intel_engine_id id; + intel_wakeref_t wakeref; + int err = -ENOMEM; + + /* + * Verify that we can disable preemption for an individual request + * that may be being observed and not want to be interrupted. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + return 0; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + if (preempt_client_init(i915, &a)) + goto err_unlock; + if (preempt_client_init(i915, &b)) + goto err_client_a; + b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); + + for_each_engine(engine, i915, id) { + struct i915_request *rq_a, *rq_b; + + if (!intel_engine_has_preemption(engine)) + continue; + + engine->execlists.preempt_hang.count = 0; + + rq_a = igt_spinner_create_request(&a.spin, + a.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq_a)) { + err = PTR_ERR(rq_a); + goto err_client_b; + } + + /* Low priority client, but unpreemptable! */ + rq_a->flags |= I915_REQUEST_NOPREEMPT; + + i915_request_add(rq_a); + if (!igt_wait_for_spinner(&a.spin, rq_a)) { + pr_err("First client failed to start\n"); + goto err_wedged; + } + + rq_b = igt_spinner_create_request(&b.spin, + b.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq_b)) { + err = PTR_ERR(rq_b); + goto err_client_b; + } + + i915_request_add(rq_b); + + /* B is much more important than A! (But A is unpreemptable.) */ + GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a)); + + /* Wait long enough for preemption and timeslicing */ + if (igt_wait_for_spinner(&b.spin, rq_b)) { + pr_err("Second client started too early!\n"); + goto err_wedged; + } + + igt_spinner_end(&a.spin); + + if (!igt_wait_for_spinner(&b.spin, rq_b)) { + pr_err("Second client failed to start\n"); + goto err_wedged; + } + + igt_spinner_end(&b.spin); + + if (engine->execlists.preempt_hang.count) { + pr_err("Preemption recorded x%d; should have been suppressed!\n", + engine->execlists.preempt_hang.count); + err = -EINVAL; + goto err_wedged; + } + + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + goto err_wedged; + } + + err = 0; +err_client_b: + preempt_client_fini(&b); +err_client_a: + preempt_client_fini(&a); +err_unlock: + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + return err; + +err_wedged: + igt_spinner_end(&b.spin); + igt_spinner_end(&a.spin); + intel_gt_set_wedged(&i915->gt); + err = -EIO; + goto err_client_b; +} + static int live_suppress_self_preempt(void *arg) { struct drm_i915_private *i915 = arg; @@ -531,6 +860,9 @@ static int live_suppress_self_preempt(void *arg) if (USES_GUC_SUBMISSION(i915)) return 0; /* presume black blox */ + if (intel_vgpu_active(i915)) + return 0; /* GVT forces single port & request submission */ + mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); @@ -604,8 +936,6 @@ err_client_b: err_client_a: preempt_client_fini(&a); err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -613,7 +943,7 @@ err_unlock: err_wedged: igt_spinner_end(&b.spin); igt_spinner_end(&a.spin); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_client_b; } @@ -646,6 +976,10 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine) i915_sw_fence_init(&rq->submit, dummy_notify); set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); + spin_lock_init(&rq->lock); + rq->fence.lock = &rq->lock; + INIT_LIST_HEAD(&rq->fence.cb_list); + return rq; } @@ -773,8 +1107,6 @@ err_client_1: err_client_0: preempt_client_fini(&client[0]); err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -782,7 +1114,7 @@ err_unlock: err_wedged: for (i = 0; i < ARRAY_SIZE(client); i++) igt_spinner_end(&client[i].spin); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_client_3; } @@ -921,8 +1253,6 @@ err_client_lo: err_client_hi: preempt_client_fini(&hi); err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -930,7 +1260,7 @@ err_unlock: err_wedged: igt_spinner_end(&hi.spin); igt_spinner_end(&lo.spin); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_client_lo; } @@ -989,7 +1319,7 @@ static int live_preempt_hang(void *arg) if (!igt_wait_for_spinner(&spin_lo, rq)) { GEM_TRACE("lo spinner failed to start\n"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_ctx_lo; } @@ -1011,21 +1341,21 @@ static int live_preempt_hang(void *arg) HZ / 10)) { pr_err("Preemption did not occur within timeout!"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_ctx_lo; } - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - i915_reset_engine(engine, NULL); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + set_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags); + intel_engine_reset(engine, NULL); + clear_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags); engine->execlists.preempt_hang.inject_hang = false; if (!igt_wait_for_spinner(&spin_hi, rq)) { GEM_TRACE("hi spinner failed to start\n"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_ctx_lo; } @@ -1048,7 +1378,6 @@ err_spin_lo: err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -1406,7 +1735,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915, request[nc]->fence.context, request[nc]->fence.seqno); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); break; } } @@ -1553,7 +1882,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915, request[n]->fence.context, request[n]->fence.seqno); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto out; } @@ -1812,9 +2141,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_sanitycheck), + SUBTEST(live_timeslice_preempt), SUBTEST(live_busywait_preempt), SUBTEST(live_preempt), SUBTEST(live_late_preempt), + SUBTEST(live_nopreempt), SUBTEST(live_suppress_self_preempt), SUBTEST(live_suppress_wait_preempt), SUBTEST(live_chain_preempt), @@ -1828,8 +2159,8 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) if (!HAS_EXECLISTS(i915)) return 0; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; - return i915_subtests(tests, i915); + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 89da9e7cc1ba..00a4f60cdfd5 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -9,26 +9,29 @@ static int igt_global_reset(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; unsigned int reset_count; + intel_wakeref_t wakeref; int err = 0; /* Check that we can issue a global GPU reset */ - igt_global_reset_lock(i915); + igt_global_reset_lock(gt); + wakeref = intel_runtime_pm_get(>->i915->runtime_pm); - reset_count = i915_reset_count(&i915->gpu_error); + reset_count = i915_reset_count(>->i915->gpu_error); - i915_reset(i915, ALL_ENGINES, NULL); + intel_gt_reset(gt, ALL_ENGINES, NULL); - if (i915_reset_count(&i915->gpu_error) == reset_count) { + if (i915_reset_count(>->i915->gpu_error) == reset_count) { pr_err("No GPU reset recorded!\n"); err = -EINVAL; } - igt_global_reset_unlock(i915); + intel_runtime_pm_put(>->i915->runtime_pm, wakeref); + igt_global_reset_unlock(gt); - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) err = -EIO; return err; @@ -36,61 +39,123 @@ static int igt_global_reset(void *arg) static int igt_wedged_reset(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; intel_wakeref_t wakeref; /* Check that we can recover a wedged device with a GPU reset */ - igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + igt_global_reset_lock(gt); + wakeref = intel_runtime_pm_get(>->i915->runtime_pm); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); - GEM_BUG_ON(!i915_reset_failed(i915)); - i915_reset(i915, ALL_ENGINES, NULL); + GEM_BUG_ON(!intel_gt_is_wedged(gt)); + intel_gt_reset(gt, ALL_ENGINES, NULL); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - igt_global_reset_unlock(i915); + intel_runtime_pm_put(>->i915->runtime_pm, wakeref); + igt_global_reset_unlock(gt); - return i915_reset_failed(i915) ? -EIO : 0; + return intel_gt_is_wedged(gt) ? -EIO : 0; } static int igt_atomic_reset(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; const typeof(*igt_atomic_phases) *p; int err = 0; /* Check that the resets are usable from atomic context */ - igt_global_reset_lock(i915); - mutex_lock(&i915->drm.struct_mutex); + intel_gt_pm_get(gt); + igt_global_reset_lock(gt); /* Flush any requests before we get started and check basics */ - if (!igt_force_reset(i915)) + if (!igt_force_reset(gt)) goto unlock; for (p = igt_atomic_phases; p->name; p++) { - GEM_TRACE("intel_gpu_reset under %s\n", p->name); + intel_engine_mask_t awake; + + GEM_TRACE("__intel_gt_reset under %s\n", p->name); + awake = reset_prepare(gt); p->critical_section_begin(); - reset_prepare(i915); - err = intel_gpu_reset(i915, ALL_ENGINES); - reset_finish(i915); + + err = __intel_gt_reset(gt, ALL_ENGINES); + p->critical_section_end(); + reset_finish(gt, awake); if (err) { - pr_err("intel_gpu_reset failed under %s\n", p->name); + pr_err("__intel_gt_reset failed under %s\n", p->name); break; } } /* As we poke around the guts, do a full reset before continuing. */ - igt_force_reset(i915); + igt_force_reset(gt); unlock: - mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); + igt_global_reset_unlock(gt); + intel_gt_pm_put(gt); + + return err; +} + +static int igt_atomic_engine_reset(void *arg) +{ + struct intel_gt *gt = arg; + const typeof(*igt_atomic_phases) *p; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that the resets are usable from atomic context */ + + if (!intel_has_reset_engine(gt->i915)) + return 0; + + if (USES_GUC_SUBMISSION(gt->i915)) + return 0; + + intel_gt_pm_get(gt); + igt_global_reset_lock(gt); + + /* Flush any requests before we get started and check basics */ + if (!igt_force_reset(gt)) + goto out_unlock; + + for_each_engine(engine, gt->i915, id) { + tasklet_disable_nosync(&engine->execlists.tasklet); + intel_engine_pm_get(engine); + + for (p = igt_atomic_phases; p->name; p++) { + GEM_TRACE("intel_engine_reset(%s) under %s\n", + engine->name, p->name); + + p->critical_section_begin(); + err = intel_engine_reset(engine, NULL); + p->critical_section_end(); + + if (err) { + pr_err("intel_engine_reset(%s) failed under %s\n", + engine->name, p->name); + break; + } + } + + intel_engine_pm_put(engine); + tasklet_enable(&engine->execlists.tasklet); + if (err) + break; + } + + /* As we poke around the guts, do a full reset before continuing. */ + igt_force_reset(gt); + +out_unlock: + igt_global_reset_unlock(gt); + intel_gt_pm_put(gt); return err; } @@ -101,18 +166,15 @@ int intel_reset_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_global_reset), /* attempt to recover GPU first */ SUBTEST(igt_wedged_reset), SUBTEST(igt_atomic_reset), + SUBTEST(igt_atomic_engine_reset), }; - intel_wakeref_t wakeref; - int err = 0; + struct intel_gt *gt = &i915->gt; - if (!intel_has_gpu_reset(i915)) + if (!intel_has_gpu_reset(gt->i915)) return 0; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(gt)) return -EIO; /* we're long past hope of a successful reset */ - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - err = i915_subtests(tests, i915); - - return err; + return intel_gt_live_subtests(tests, gt); } diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index 76d3977f1d4b..f0a840030382 100644 --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -7,15 +7,16 @@ #include <linux/prime_numbers.h> #include "gem/i915_gem_pm.h" +#include "intel_gt.h" -#include "i915_random.h" -#include "i915_selftest.h" +#include "../selftests/i915_random.h" +#include "../i915_selftest.h" -#include "igt_flush_test.h" -#include "mock_gem_device.h" -#include "mock_timeline.h" +#include "../selftests/igt_flush_test.h" +#include "../selftests/mock_gem_device.h" +#include "selftests/mock_timeline.h" -static struct page *hwsp_page(struct i915_timeline *tl) +static struct page *hwsp_page(struct intel_timeline *tl) { struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj; @@ -23,7 +24,7 @@ static struct page *hwsp_page(struct i915_timeline *tl) return sg_page(obj->mm.pages->sgl); } -static unsigned long hwsp_cacheline(struct i915_timeline *tl) +static unsigned long hwsp_cacheline(struct intel_timeline *tl) { unsigned long address = (unsigned long)page_address(hwsp_page(tl)); @@ -35,7 +36,7 @@ static unsigned long hwsp_cacheline(struct i915_timeline *tl) struct mock_hwsp_freelist { struct drm_i915_private *i915; struct radix_tree_root cachelines; - struct i915_timeline **history; + struct intel_timeline **history; unsigned long count, max; struct rnd_state prng; }; @@ -46,12 +47,12 @@ enum { static void __mock_hwsp_record(struct mock_hwsp_freelist *state, unsigned int idx, - struct i915_timeline *tl) + struct intel_timeline *tl) { tl = xchg(&state->history[idx], tl); if (tl) { radix_tree_delete(&state->cachelines, hwsp_cacheline(tl)); - i915_timeline_put(tl); + intel_timeline_put(tl); } } @@ -59,14 +60,14 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state, unsigned int count, unsigned int flags) { - struct i915_timeline *tl; + struct intel_timeline *tl; unsigned int idx; while (count--) { unsigned long cacheline; int err; - tl = i915_timeline_create(state->i915, NULL); + tl = intel_timeline_create(&state->i915->gt, NULL); if (IS_ERR(tl)) return PTR_ERR(tl); @@ -77,7 +78,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state, pr_err("HWSP cacheline %lu already used; duplicate allocation!\n", cacheline); } - i915_timeline_put(tl); + intel_timeline_put(tl); return err; } @@ -162,21 +163,21 @@ struct __igt_sync { bool set; }; -static int __igt_sync(struct i915_timeline *tl, +static int __igt_sync(struct intel_timeline *tl, u64 ctx, const struct __igt_sync *p, const char *name) { int ret; - if (__i915_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) { + if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) { pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n", name, p->name, ctx, p->seqno, yesno(p->expected)); return -EINVAL; } if (p->set) { - ret = __i915_timeline_sync_set(tl, ctx, p->seqno); + ret = __intel_timeline_sync_set(tl, ctx, p->seqno); if (ret) return ret; } @@ -204,7 +205,7 @@ static int igt_sync(void *arg) { "unwrap", UINT_MAX, true, false }, {}, }, *p; - struct i915_timeline tl; + struct intel_timeline tl; int order, offset; int ret = -ENODEV; @@ -248,7 +249,7 @@ static unsigned int random_engine(struct rnd_state *rnd) static int bench_sync(void *arg) { struct rnd_state prng; - struct i915_timeline tl; + struct intel_timeline tl; unsigned long end_time, count; u64 prng32_1M; ktime_t kt; @@ -286,7 +287,7 @@ static int bench_sync(void *arg) do { u64 id = i915_prandom_u64_state(&prng); - __i915_timeline_sync_set(&tl, id, 0); + __intel_timeline_sync_set(&tl, id, 0); count++; } while (!time_after(jiffies, end_time)); kt = ktime_sub(ktime_get(), kt); @@ -301,7 +302,7 @@ static int bench_sync(void *arg) while (end_time--) { u64 id = i915_prandom_u64_state(&prng); - if (!__i915_timeline_sync_is_later(&tl, id, 0)) { + if (!__intel_timeline_sync_is_later(&tl, id, 0)) { mock_timeline_fini(&tl); pr_err("Lookup of %llu failed\n", id); return -EINVAL; @@ -322,7 +323,7 @@ static int bench_sync(void *arg) kt = ktime_get(); end_time = jiffies + HZ/10; do { - __i915_timeline_sync_set(&tl, count++, 0); + __intel_timeline_sync_set(&tl, count++, 0); } while (!time_after(jiffies, end_time)); kt = ktime_sub(ktime_get(), kt); pr_info("%s: %lu in-order insertions, %lluns/insert\n", @@ -332,7 +333,7 @@ static int bench_sync(void *arg) end_time = count; kt = ktime_get(); while (end_time--) { - if (!__i915_timeline_sync_is_later(&tl, end_time, 0)) { + if (!__intel_timeline_sync_is_later(&tl, end_time, 0)) { pr_err("Lookup of %lu failed\n", end_time); mock_timeline_fini(&tl); return -EINVAL; @@ -356,8 +357,8 @@ static int bench_sync(void *arg) u32 id = random_engine(&prng); u32 seqno = prandom_u32_state(&prng); - if (!__i915_timeline_sync_is_later(&tl, id, seqno)) - __i915_timeline_sync_set(&tl, id, seqno); + if (!__intel_timeline_sync_is_later(&tl, id, seqno)) + __intel_timeline_sync_set(&tl, id, seqno); count++; } while (!time_after(jiffies, end_time)); @@ -385,8 +386,8 @@ static int bench_sync(void *arg) */ u64 id = (u64)(count & mask) << order; - __i915_timeline_sync_is_later(&tl, id, 0); - __i915_timeline_sync_set(&tl, id, 0); + __intel_timeline_sync_is_later(&tl, id, 0); + __intel_timeline_sync_set(&tl, id, 0); count++; } while (!time_after(jiffies, end_time)); @@ -401,7 +402,7 @@ static int bench_sync(void *arg) return 0; } -int i915_timeline_mock_selftests(void) +int intel_timeline_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(mock_hwsp_freelist), @@ -443,14 +444,14 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) } static struct i915_request * -tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value) +tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value) { struct i915_request *rq; int err; - lockdep_assert_held(&tl->i915->drm.struct_mutex); /* lazy rq refs */ + lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */ - err = i915_timeline_pin(tl); + err = intel_timeline_pin(tl); if (err) { rq = ERR_PTR(err); goto out; @@ -466,26 +467,26 @@ tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value) rq = ERR_PTR(err); out_unpin: - i915_timeline_unpin(tl); + intel_timeline_unpin(tl); out: if (IS_ERR(rq)) pr_err("Failed to write to timeline!\n"); return rq; } -static struct i915_timeline * -checked_i915_timeline_create(struct drm_i915_private *i915) +static struct intel_timeline * +checked_intel_timeline_create(struct drm_i915_private *i915) { - struct i915_timeline *tl; + struct intel_timeline *tl; - tl = i915_timeline_create(i915, NULL); + tl = intel_timeline_create(&i915->gt, NULL); if (IS_ERR(tl)) return tl; if (*tl->hwsp_seqno != tl->seqno) { pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n", *tl->hwsp_seqno, tl->seqno); - i915_timeline_put(tl); + intel_timeline_put(tl); return ERR_PTR(-EINVAL); } @@ -496,7 +497,7 @@ static int live_hwsp_engine(void *arg) { #define NUM_TIMELINES 4096 struct drm_i915_private *i915 = arg; - struct i915_timeline **timelines; + struct intel_timeline **timelines; struct intel_engine_cs *engine; enum intel_engine_id id; intel_wakeref_t wakeref; @@ -523,10 +524,10 @@ static int live_hwsp_engine(void *arg) continue; for (n = 0; n < NUM_TIMELINES; n++) { - struct i915_timeline *tl; + struct intel_timeline *tl; struct i915_request *rq; - tl = checked_i915_timeline_create(i915); + tl = checked_intel_timeline_create(i915); if (IS_ERR(tl)) { err = PTR_ERR(tl); goto out; @@ -534,7 +535,7 @@ static int live_hwsp_engine(void *arg) rq = tl_write(tl, engine, count); if (IS_ERR(rq)) { - i915_timeline_put(tl); + intel_timeline_put(tl); err = PTR_ERR(rq); goto out; } @@ -548,14 +549,14 @@ out: err = -EIO; for (n = 0; n < count; n++) { - struct i915_timeline *tl = timelines[n]; + struct intel_timeline *tl = timelines[n]; if (!err && *tl->hwsp_seqno != n) { pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", n, *tl->hwsp_seqno); err = -EINVAL; } - i915_timeline_put(tl); + intel_timeline_put(tl); } intel_runtime_pm_put(&i915->runtime_pm, wakeref); @@ -571,7 +572,7 @@ static int live_hwsp_alternate(void *arg) { #define NUM_TIMELINES 4096 struct drm_i915_private *i915 = arg; - struct i915_timeline **timelines; + struct intel_timeline **timelines; struct intel_engine_cs *engine; enum intel_engine_id id; intel_wakeref_t wakeref; @@ -596,13 +597,13 @@ static int live_hwsp_alternate(void *arg) count = 0; for (n = 0; n < NUM_TIMELINES; n++) { for_each_engine(engine, i915, id) { - struct i915_timeline *tl; + struct intel_timeline *tl; struct i915_request *rq; if (!intel_engine_can_store_dword(engine)) continue; - tl = checked_i915_timeline_create(i915); + tl = checked_intel_timeline_create(i915); if (IS_ERR(tl)) { err = PTR_ERR(tl); goto out; @@ -610,7 +611,7 @@ static int live_hwsp_alternate(void *arg) rq = tl_write(tl, engine, count); if (IS_ERR(rq)) { - i915_timeline_put(tl); + intel_timeline_put(tl); err = PTR_ERR(rq); goto out; } @@ -624,14 +625,14 @@ out: err = -EIO; for (n = 0; n < count; n++) { - struct i915_timeline *tl = timelines[n]; + struct intel_timeline *tl = timelines[n]; if (!err && *tl->hwsp_seqno != n) { pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", n, *tl->hwsp_seqno); err = -EINVAL; } - i915_timeline_put(tl); + intel_timeline_put(tl); } intel_runtime_pm_put(&i915->runtime_pm, wakeref); @@ -647,7 +648,7 @@ static int live_hwsp_wrap(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; - struct i915_timeline *tl; + struct intel_timeline *tl; enum intel_engine_id id; intel_wakeref_t wakeref; int err = 0; @@ -660,7 +661,7 @@ static int live_hwsp_wrap(void *arg) mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); - tl = i915_timeline_create(i915, NULL); + tl = intel_timeline_create(&i915->gt, NULL); if (IS_ERR(tl)) { err = PTR_ERR(tl); goto out_rpm; @@ -668,7 +669,7 @@ static int live_hwsp_wrap(void *arg) if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) goto out_free; - err = i915_timeline_pin(tl); + err = intel_timeline_pin(tl); if (err) goto out_free; @@ -688,7 +689,7 @@ static int live_hwsp_wrap(void *arg) tl->seqno = -4u; - err = i915_timeline_get_seqno(tl, rq, &seqno[0]); + err = intel_timeline_get_seqno(tl, rq, &seqno[0]); if (err) { i915_request_add(rq); goto out; @@ -703,7 +704,7 @@ static int live_hwsp_wrap(void *arg) } hwsp_seqno[0] = tl->hwsp_seqno; - err = i915_timeline_get_seqno(tl, rq, &seqno[1]); + err = intel_timeline_get_seqno(tl, rq, &seqno[1]); if (err) { i915_request_add(rq); goto out; @@ -745,9 +746,9 @@ out: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - i915_timeline_unpin(tl); + intel_timeline_unpin(tl); out_free: - i915_timeline_put(tl); + intel_timeline_put(tl); out_rpm: intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); @@ -781,10 +782,10 @@ static int live_hwsp_recycle(void *arg) continue; do { - struct i915_timeline *tl; + struct intel_timeline *tl; struct i915_request *rq; - tl = checked_i915_timeline_create(i915); + tl = checked_intel_timeline_create(i915); if (IS_ERR(tl)) { err = PTR_ERR(tl); goto out; @@ -792,14 +793,14 @@ static int live_hwsp_recycle(void *arg) rq = tl_write(tl, engine, count); if (IS_ERR(rq)) { - i915_timeline_put(tl); + intel_timeline_put(tl); err = PTR_ERR(rq); goto out; } if (i915_request_wait(rq, 0, HZ / 5) < 0) { pr_err("Wait for timeline writes timed out!\n"); - i915_timeline_put(tl); + intel_timeline_put(tl); err = -EIO; goto out; } @@ -810,26 +811,24 @@ static int live_hwsp_recycle(void *arg) err = -EINVAL; } - i915_timeline_put(tl); + intel_timeline_put(tl); count++; if (err) goto out; - i915_timelines_park(i915); /* Encourage recycling! */ + intel_timelines_park(i915); /* Encourage recycling! */ } while (!__igt_timeout(end_time, NULL)); } out: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } -int i915_timeline_live_selftests(struct drm_i915_private *i915) +int intel_timeline_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_hwsp_recycle), @@ -838,8 +837,8 @@ int i915_timeline_live_selftests(struct drm_i915_private *i915) SUBTEST(live_hwsp_wrap), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; - return i915_subtests(tests, i915); + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 9eaf030affd0..ab147985fa74 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -5,13 +5,13 @@ */ #include "gem/i915_gem_pm.h" +#include "gt/intel_gt.h" #include "i915_selftest.h" #include "intel_reset.h" #include "selftests/igt_flush_test.h" #include "selftests/igt_reset.h" #include "selftests/igt_spinner.h" -#include "selftests/igt_wedge_me.h" #include "selftests/mock_drm.h" #include "gem/selftests/igt_gem_utils.h" @@ -24,11 +24,9 @@ static const struct wo_register { { INTEL_GEMINILAKE, 0x731c } }; -#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8) struct wa_lists { struct i915_wa_list gt_wa_list; struct { - char name[REF_NAME_MAX]; struct i915_wa_list wa_list; struct i915_wa_list ctx_wa_list; } engine[I915_NUM_ENGINES]; @@ -42,25 +40,20 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists) memset(lists, 0, sizeof(*lists)); - wa_init_start(&lists->gt_wa_list, "GT_REF"); + wa_init_start(&lists->gt_wa_list, "GT_REF", "global"); gt_init_workarounds(i915, &lists->gt_wa_list); wa_init_finish(&lists->gt_wa_list); for_each_engine(engine, i915, id) { struct i915_wa_list *wal = &lists->engine[id].wa_list; - char *name = lists->engine[id].name; - snprintf(name, REF_NAME_MAX, "%s_REF", engine->name); - - wa_init_start(wal, name); + wa_init_start(wal, "REF", engine->name); engine_init_workarounds(engine, wal); wa_init_finish(wal); - snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name); - __intel_engine_init_ctx_wa(engine, &lists->engine[id].ctx_wa_list, - name); + "CTX_REF"); } } @@ -102,7 +95,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) i915_gem_object_flush_map(result); i915_gem_object_unpin_map(result); - vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL); + vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_obj; @@ -184,7 +177,7 @@ static int check_whitelist(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { struct drm_i915_gem_object *results; - struct igt_wedge_me wedge; + struct intel_wedge_me wedge; u32 *vaddr; int err; int i; @@ -195,10 +188,10 @@ static int check_whitelist(struct i915_gem_context *ctx, err = 0; i915_gem_object_lock(results); - igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */ + intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */ err = i915_gem_object_set_to_cpu_domain(results, false); i915_gem_object_unlock(results); - if (i915_terminally_wedged(ctx->i915)) + if (intel_gt_is_wedged(&ctx->i915->gt)) err = -EIO; if (err) goto out_put; @@ -231,13 +224,13 @@ out_put: static int do_device_reset(struct intel_engine_cs *engine) { - i915_reset(engine->i915, engine->mask, "live_workarounds"); + intel_gt_reset(engine->gt, engine->mask, "live_workarounds"); return 0; } static int do_engine_reset(struct intel_engine_cs *engine) { - return i915_reset_engine(engine, "live_workarounds"); + return intel_engine_reset(engine, "live_workarounds"); } static int @@ -286,64 +279,67 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, const char *name) { struct drm_i915_private *i915 = engine->i915; - struct i915_gem_context *ctx; + struct i915_gem_context *ctx, *tmp; struct igt_spinner spin; intel_wakeref_t wakeref; int err; - pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n", - engine->whitelist.count, name); - - err = igt_spinner_init(&spin, i915); - if (err) - return err; + pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n", + engine->whitelist.count, engine->name, name); ctx = kernel_context(i915); if (IS_ERR(ctx)) return PTR_ERR(ctx); + err = igt_spinner_init(&spin, i915); + if (err) + goto out_ctx; + err = check_whitelist(ctx, engine); if (err) { pr_err("Invalid whitelist *before* %s reset!\n", name); - goto out; + goto out_spin; } err = switch_to_scratch_context(engine, &spin); if (err) - goto out; + goto out_spin; with_intel_runtime_pm(&i915->runtime_pm, wakeref) err = reset(engine); igt_spinner_end(&spin); - igt_spinner_fini(&spin); if (err) { pr_err("%s reset failed\n", name); - goto out; + goto out_spin; } err = check_whitelist(ctx, engine); if (err) { pr_err("Whitelist not preserved in context across %s reset!\n", name); - goto out; + goto out_spin; } + tmp = kernel_context(i915); + if (IS_ERR(tmp)) { + err = PTR_ERR(tmp); + goto out_spin; + } kernel_context_close(ctx); - - ctx = kernel_context(i915); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); + ctx = tmp; err = check_whitelist(ctx, engine); if (err) { pr_err("Invalid whitelist *after* %s reset in fresh context!\n", name); - goto out; + goto out_spin; } -out: +out_spin: + igt_spinner_fini(&spin); +out_ctx: kernel_context_close(ctx); return err; } @@ -393,6 +389,10 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg) enum intel_platform platform = INTEL_INFO(engine->i915)->platform; int i; + if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == + RING_FORCE_TO_NONPRIV_ACCESS_WR) + return true; + for (i = 0; i < ARRAY_SIZE(wo_registers); i++) { if (wo_registers[i].platform == platform && wo_registers[i].reg == reg) @@ -404,7 +404,8 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg) static bool ro_register(u32 reg) { - if (reg & RING_FORCE_TO_NONPRIV_RD) + if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == + RING_FORCE_TO_NONPRIV_ACCESS_RD) return true; return false; @@ -476,12 +477,12 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, u32 srm, lrm, rsvd; u32 expect; int idx; + bool ro_reg; if (wo_register(engine, reg)) continue; - if (ro_register(reg)) - continue; + ro_reg = ro_register(reg); srm = MI_STORE_REGISTER_MEM; lrm = MI_LOAD_REGISTER_MEM; @@ -542,7 +543,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, i915_gem_object_flush_map(batch->obj); i915_gem_object_unpin_map(batch->obj); - i915_gem_chipset_flush(ctx->i915); + intel_gt_chipset_flush(engine->gt); rq = igt_request_alloc(ctx, engine); if (IS_ERR(rq)) { @@ -570,7 +571,7 @@ err_request: if (i915_request_wait(rq, 0, HZ / 5) < 0) { pr_err("%s: Futzing %x timedout; cancelling test\n", engine->name, reg); - i915_gem_set_wedged(ctx->i915); + intel_gt_set_wedged(&ctx->i915->gt); err = -EIO; goto out_batch; } @@ -582,24 +583,35 @@ err_request: } GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff); - rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */ - if (!rsvd) { - pr_err("%s: Unable to write to whitelisted register %x\n", - engine->name, reg); - err = -EINVAL; - goto out_unpin; + if (!ro_reg) { + /* detect write masking */ + rsvd = results[ARRAY_SIZE(values)]; + if (!rsvd) { + pr_err("%s: Unable to write to whitelisted register %x\n", + engine->name, reg); + err = -EINVAL; + goto out_unpin; + } } expect = results[0]; idx = 1; for (v = 0; v < ARRAY_SIZE(values); v++) { - expect = reg_write(expect, values[v], rsvd); + if (ro_reg) + expect = results[0]; + else + expect = reg_write(expect, values[v], rsvd); + if (results[idx] != expect) err++; idx++; } for (v = 0; v < ARRAY_SIZE(values); v++) { - expect = reg_write(expect, ~values[v], rsvd); + if (ro_reg) + expect = results[0]; + else + expect = reg_write(expect, ~values[v], rsvd); + if (results[idx] != expect) err++; idx++; @@ -608,15 +620,22 @@ err_request: pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n", engine->name, err, reg); - pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n", - engine->name, reg, results[0], rsvd); + if (ro_reg) + pr_info("%s: Whitelisted read-only register: %x, original value %08x\n", + engine->name, reg, results[0]); + else + pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n", + engine->name, reg, results[0], rsvd); expect = results[0]; idx = 1; for (v = 0; v < ARRAY_SIZE(values); v++) { u32 w = values[v]; - expect = reg_write(expect, w, rsvd); + if (ro_reg) + expect = results[0]; + else + expect = reg_write(expect, w, rsvd); pr_info("Wrote %08x, read %08x, expect %08x\n", w, results[idx], expect); idx++; @@ -624,7 +643,10 @@ err_request: for (v = 0; v < ARRAY_SIZE(values); v++) { u32 w = ~values[v]; - expect = reg_write(expect, w, rsvd); + if (ro_reg) + expect = results[0]; + else + expect = reg_write(expect, w, rsvd); pr_info("Wrote %08x, read %08x, expect %08x\n", w, results[idx], expect); idx++; @@ -707,7 +729,7 @@ static int live_reset_whitelist(void *arg) if (!engine || engine->whitelist.count == 0) return 0; - igt_global_reset_lock(i915); + igt_global_reset_lock(&i915->gt); if (intel_has_reset_engine(i915)) { err = check_whitelist_across_reset(engine, @@ -726,7 +748,7 @@ static int live_reset_whitelist(void *arg) } out: - igt_global_reset_unlock(i915); + igt_global_reset_unlock(&i915->gt); return err; } @@ -756,8 +778,8 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx, u64 offset = results->node.start + sizeof(u32) * i; u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); - /* Clear RD only and WR only flags */ - reg &= ~(RING_FORCE_TO_NONPRIV_RD | RING_FORCE_TO_NONPRIV_WR); + /* Clear access permission field */ + reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK; *cs++ = srm; *cs++ = reg; @@ -806,7 +828,7 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx, *cs++ = MI_BATCH_BUFFER_END; i915_gem_object_flush_map(batch->obj); - i915_gem_chipset_flush(ctx->i915); + intel_gt_chipset_flush(engine->gt); rq = igt_request_alloc(ctx, engine); if (IS_ERR(rq)) { @@ -925,7 +947,13 @@ check_whitelisted_registers(struct intel_engine_cs *engine, err = 0; for (i = 0; i < engine->whitelist.count; i++) { - if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg)) + const struct i915_wa *wa = &engine->whitelist.list[i]; + + if (i915_mmio_reg_offset(wa->reg) & + RING_FORCE_TO_NONPRIV_ACCESS_RD) + continue; + + if (!fn(engine, a[i], b[i], wa->reg)) err = -EINVAL; } @@ -1089,7 +1117,7 @@ live_gpu_reset_workarounds(void *arg) pr_info("Verifying after GPU reset...\n"); - igt_global_reset_lock(i915); + igt_global_reset_lock(&i915->gt); wakeref = intel_runtime_pm_get(&i915->runtime_pm); reference_lists_init(i915, &lists); @@ -1098,7 +1126,7 @@ live_gpu_reset_workarounds(void *arg) if (!ok) goto out; - i915_reset(i915, ALL_ENGINES, "live_workarounds"); + intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds"); ok = verify_wa_lists(ctx, &lists, "after reset"); @@ -1106,7 +1134,7 @@ out: kernel_context_close(ctx); reference_lists_fini(i915, &lists); intel_runtime_pm_put(&i915->runtime_pm, wakeref); - igt_global_reset_unlock(i915); + igt_global_reset_unlock(&i915->gt); return ok ? 0 : -ESRCH; } @@ -1131,7 +1159,7 @@ live_engine_reset_workarounds(void *arg) if (IS_ERR(ctx)) return PTR_ERR(ctx); - igt_global_reset_lock(i915); + igt_global_reset_lock(&i915->gt); wakeref = intel_runtime_pm_get(&i915->runtime_pm); reference_lists_init(i915, &lists); @@ -1147,7 +1175,7 @@ live_engine_reset_workarounds(void *arg) goto err; } - i915_reset_engine(engine, "live_workarounds"); + intel_engine_reset(engine, "live_workarounds"); ok = verify_wa_lists(ctx, &lists, "after idle reset"); if (!ok) { @@ -1175,7 +1203,7 @@ live_engine_reset_workarounds(void *arg) goto err; } - i915_reset_engine(engine, "live_workarounds"); + intel_engine_reset(engine, "live_workarounds"); igt_spinner_end(&spin); igt_spinner_fini(&spin); @@ -1190,7 +1218,7 @@ live_engine_reset_workarounds(void *arg) err: reference_lists_fini(i915, &lists); intel_runtime_pm_put(&i915->runtime_pm, wakeref); - igt_global_reset_unlock(i915); + igt_global_reset_unlock(&i915->gt); kernel_context_close(ctx); igt_flush_test(i915, I915_WAIT_LOCKED); @@ -1209,7 +1237,7 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915) }; int err; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; mutex_lock(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c index 65b52be23d42..5c549205828a 100644 --- a/drivers/gpu/drm/i915/selftests/mock_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c @@ -4,13 +4,13 @@ * Copyright © 2017-2018 Intel Corporation */ -#include "../i915_timeline.h" +#include "../intel_timeline.h" #include "mock_timeline.h" -void mock_timeline_init(struct i915_timeline *timeline, u64 context) +void mock_timeline_init(struct intel_timeline *timeline, u64 context) { - timeline->i915 = NULL; + timeline->gt = NULL; timeline->fence_context = context; mutex_init(&timeline->mutex); @@ -23,7 +23,7 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context) INIT_LIST_HEAD(&timeline->link); } -void mock_timeline_fini(struct i915_timeline *timeline) +void mock_timeline_fini(struct intel_timeline *timeline) { i915_syncmap_free(&timeline->sync); } diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.h b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h index b6deaa61110d..689efc66c908 100644 --- a/drivers/gpu/drm/i915/selftests/mock_timeline.h +++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h @@ -7,9 +7,9 @@ #ifndef __MOCK_TIMELINE__ #define __MOCK_TIMELINE__ -struct i915_timeline; +struct intel_timeline; -void mock_timeline_init(struct i915_timeline *timeline, u64 context); -void mock_timeline_fini(struct i915_timeline *timeline); +void mock_timeline_init(struct intel_timeline *timeline, u64 context); +void mock_timeline_fini(struct intel_timeline *timeline); #endif /* !__MOCK_TIMELINE__ */ diff --git a/drivers/gpu/drm/i915/gt/uc/Makefile b/drivers/gpu/drm/i915/gt/uc/Makefile new file mode 100644 index 000000000000..bec94d434cb6 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/Makefile @@ -0,0 +1,5 @@ +# For building individual subdir files on the command line +subdir-ccflags-y += -I$(srctree)/$(src)/../.. + +# Extra header tests +header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index c40a6efdd33a..13fbbffd05c7 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -22,6 +22,7 @@ * */ +#include "gt/intel_gt.h" #include "intel_guc.h" #include "intel_guc_ads.h" #include "intel_guc_submission.h" @@ -29,16 +30,16 @@ static void gen8_guc_raise_irq(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); - I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER); + intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER); } static void gen11_guc_raise_irq(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); - I915_WRITE(GEN11_GUC_HOST_INTERRUPT, 0); + intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0); } static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) @@ -52,11 +53,11 @@ static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) void intel_guc_init_send_regs(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); enum forcewake_domains fw_domains = 0; unsigned int i; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(gt->i915) >= 11) { guc->send_regs.base = i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0)); guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; @@ -67,7 +68,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc) } for (i = 0; i < guc->send_regs.count; i++) { - fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore, + fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore, guc_send_reg(guc, i), FW_REG_READ | FW_REG_WRITE); } @@ -76,7 +77,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc) void intel_guc_init_early(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_i915(guc); + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; intel_guc_fw_init_early(guc); intel_guc_ct_init_early(&guc->ct); @@ -99,90 +100,6 @@ void intel_guc_init_early(struct intel_guc *guc) } } -static int guc_init_wq(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - - /* - * GuC log buffer flush work item has to do register access to - * send the ack to GuC and this work item, if not synced before - * suspend, can potentially get executed after the GFX device is - * suspended. - * By marking the WQ as freezable, we don't have to bother about - * flushing of this work item from the suspend hooks, the pending - * work item if any will be either executed before the suspend - * or scheduled later on resume. This way the handling of work - * item can be kept same between system suspend & rpm suspend. - */ - guc->log.relay.flush_wq = - alloc_ordered_workqueue("i915-guc_log", - WQ_HIGHPRI | WQ_FREEZABLE); - if (!guc->log.relay.flush_wq) { - DRM_ERROR("Couldn't allocate workqueue for GuC log\n"); - return -ENOMEM; - } - - /* - * Even though both sending GuC action, and adding a new workitem to - * GuC workqueue are serialized (each with its own locking), since - * we're using mutliple engines, it's possible that we're going to - * issue a preempt request with two (or more - each for different - * engine) workitems in GuC queue. In this situation, GuC may submit - * all of them, which will make us very confused. - * Our preemption contexts may even already be complete - before we - * even had the chance to sent the preempt action to GuC!. Rather - * than introducing yet another lock, we can just use ordered workqueue - * to make sure we're always sending a single preemption request with a - * single workitem. - */ - if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) && - USES_GUC_SUBMISSION(dev_priv)) { - guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt", - WQ_HIGHPRI); - if (!guc->preempt_wq) { - destroy_workqueue(guc->log.relay.flush_wq); - DRM_ERROR("Couldn't allocate workqueue for GuC " - "preemption\n"); - return -ENOMEM; - } - } - - return 0; -} - -static void guc_fini_wq(struct intel_guc *guc) -{ - struct workqueue_struct *wq; - - wq = fetch_and_zero(&guc->preempt_wq); - if (wq) - destroy_workqueue(wq); - - wq = fetch_and_zero(&guc->log.relay.flush_wq); - if (wq) - destroy_workqueue(wq); -} - -int intel_guc_init_misc(struct intel_guc *guc) -{ - struct drm_i915_private *i915 = guc_to_i915(guc); - int ret; - - ret = guc_init_wq(guc); - if (ret) - return ret; - - intel_uc_fw_fetch(i915, &guc->fw); - - return 0; -} - -void intel_guc_fini_misc(struct intel_guc *guc) -{ - intel_uc_fw_cleanup_fetch(&guc->fw); - guc_fini_wq(guc); -} - static int guc_shared_data_create(struct intel_guc *guc) { struct i915_vma *vma; @@ -209,66 +126,6 @@ static void guc_shared_data_destroy(struct intel_guc *guc) i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP); } -int intel_guc_init(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - int ret; - - ret = intel_uc_fw_init(&guc->fw); - if (ret) - goto err_fetch; - - ret = guc_shared_data_create(guc); - if (ret) - goto err_fw; - GEM_BUG_ON(!guc->shared_data); - - ret = intel_guc_log_create(&guc->log); - if (ret) - goto err_shared; - - ret = intel_guc_ads_create(guc); - if (ret) - goto err_log; - GEM_BUG_ON(!guc->ads_vma); - - ret = intel_guc_ct_init(&guc->ct); - if (ret) - goto err_ads; - - /* We need to notify the guc whenever we change the GGTT */ - i915_ggtt_enable_guc(dev_priv); - - return 0; - -err_ads: - intel_guc_ads_destroy(guc); -err_log: - intel_guc_log_destroy(&guc->log); -err_shared: - guc_shared_data_destroy(guc); -err_fw: - intel_uc_fw_fini(&guc->fw); -err_fetch: - intel_uc_fw_cleanup_fetch(&guc->fw); - return ret; -} - -void intel_guc_fini(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - - i915_ggtt_disable_guc(dev_priv); - - intel_guc_ct_fini(&guc->ct); - - intel_guc_ads_destroy(guc); - intel_guc_log_destroy(&guc->log); - guc_shared_data_destroy(guc); - intel_uc_fw_fini(&guc->fw); - intel_uc_fw_cleanup_fetch(&guc->fw); -} - static u32 guc_ctl_debug_flags(struct intel_guc *guc) { u32 level = intel_guc_log_get_level(&guc->log); @@ -287,7 +144,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc) { u32 flags = 0; - if (!USES_GUC_SUBMISSION(guc_to_i915(guc))) + if (!intel_uc_is_using_guc_submission(&guc_to_gt(guc)->uc)) flags |= GUC_CTL_DISABLE_SCHEDULER; return flags; @@ -297,7 +154,7 @@ static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc) { u32 flags = 0; - if (USES_GUC_SUBMISSION(guc_to_i915(guc))) { + if (intel_uc_is_using_guc_submission(&guc_to_gt(guc)->uc)) { u32 ctxnum, base; base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool); @@ -364,13 +221,12 @@ static u32 guc_ctl_ads_flags(struct intel_guc *guc) * transfer. These parameters are read by the firmware on startup * and cannot be changed thereafter. */ -void intel_guc_init_params(struct intel_guc *guc) +static void guc_init_params(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - u32 params[GUC_CTL_MAX_DWORDS]; + u32 *params = guc->params; int i; - memset(params, 0, sizeof(params)); + BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32)); params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); @@ -380,20 +236,109 @@ void intel_guc_init_params(struct intel_guc *guc) for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]); +} + +/* + * Initialise the GuC parameter block before starting the firmware + * transfer. These parameters are read by the firmware on startup + * and cannot be changed thereafter. + */ +void intel_guc_write_params(struct intel_guc *guc) +{ + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; + int i; /* * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and * they are power context saved so it's ok to release forcewake * when we are done here and take it again at xfer time. */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER); + intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER); - I915_WRITE(SOFT_SCRATCH(0), 0); + intel_uncore_write(uncore, SOFT_SCRATCH(0), 0); for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) - I915_WRITE(SOFT_SCRATCH(1 + i), params[i]); + intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]); + + intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER); +} - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER); +int intel_guc_init(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + int ret; + + ret = intel_uc_fw_init(&guc->fw); + if (ret) + goto err_fetch; + + ret = guc_shared_data_create(guc); + if (ret) + goto err_fw; + GEM_BUG_ON(!guc->shared_data); + + ret = intel_guc_log_create(&guc->log); + if (ret) + goto err_shared; + + ret = intel_guc_ads_create(guc); + if (ret) + goto err_log; + GEM_BUG_ON(!guc->ads_vma); + + ret = intel_guc_ct_init(&guc->ct); + if (ret) + goto err_ads; + + if (intel_uc_is_using_guc_submission(>->uc)) { + /* + * This is stuff we need to have available at fw load time + * if we are planning to enable submission later + */ + ret = intel_guc_submission_init(guc); + if (ret) + goto err_ct; + } + + /* now that everything is perma-pinned, initialize the parameters */ + guc_init_params(guc); + + /* We need to notify the guc whenever we change the GGTT */ + i915_ggtt_enable_guc(gt->ggtt); + + return 0; + +err_ct: + intel_guc_ct_fini(&guc->ct); +err_ads: + intel_guc_ads_destroy(guc); +err_log: + intel_guc_log_destroy(&guc->log); +err_shared: + guc_shared_data_destroy(guc); +err_fw: + intel_uc_fw_fini(&guc->fw); +err_fetch: + intel_uc_fw_cleanup_fetch(&guc->fw); + return ret; +} + +void intel_guc_fini(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + i915_ggtt_disable_guc(gt->ggtt); + + if (intel_uc_is_using_guc_submission(>->uc)) + intel_guc_submission_fini(guc); + + intel_guc_ct_fini(&guc->ct); + + intel_guc_ads_destroy(guc); + intel_guc_log_destroy(&guc->log); + guc_shared_data_destroy(guc); + intel_uc_fw_fini(&guc->fw); + intel_uc_fw_cleanup_fetch(&guc->fw); } int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, @@ -414,8 +359,7 @@ void intel_guc_to_host_event_handler_nop(struct intel_guc *guc) int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; u32 status; int i; int ret; @@ -464,7 +408,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, int count = min(response_buf_size, guc->send_regs.count - 1); for (i = 0; i < count; i++) - response_buf[i] = I915_READ(guc_send_reg(guc, i + 1)); + response_buf[i] = intel_uncore_read(uncore, + guc_send_reg(guc, i + 1)); } /* Use data from the GuC response as our return value */ @@ -497,7 +442,7 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, int intel_guc_sample_forcewake(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; u32 action[2]; action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; @@ -538,7 +483,7 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset) */ int intel_guc_suspend(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; int ret; u32 status; u32 action[] = { @@ -556,13 +501,14 @@ int intel_guc_suspend(struct intel_guc *guc) * in progress so we need to take care of that ourselves as well. */ - I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK); + intel_uncore_write(uncore, SOFT_SCRATCH(14), + INTEL_GUC_SLEEP_STATE_INVALID_MASK); ret = intel_guc_send(guc, action, ARRAY_SIZE(action)); if (ret) return ret; - ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14), + ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK, 0, 0, 10, &status); if (ret) @@ -658,17 +604,17 @@ int intel_guc_resume(struct intel_guc *guc) */ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); struct drm_i915_gem_object *obj; struct i915_vma *vma; u64 flags; int ret; - obj = i915_gem_object_create_shmem(dev_priv, size); + obj = i915_gem_object_create_shmem(gt->i915, size); if (IS_ERR(obj)) return ERR_CAST(obj); - vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL); + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); if (IS_ERR(vma)) goto err; diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 08c906abdfa2..714e9892aaff 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -35,10 +35,7 @@ #include "i915_utils.h" #include "i915_vma.h" -struct guc_preempt_work { - struct work_struct work; - struct intel_engine_cs *engine; -}; +struct __guc_ads_blob; /* * Top level structure of GuC. It handles firmware loading and manages client @@ -59,12 +56,14 @@ struct intel_guc { struct { bool enabled; - void (*reset)(struct drm_i915_private *i915); - void (*enable)(struct drm_i915_private *i915); - void (*disable)(struct drm_i915_private *i915); + void (*reset)(struct intel_guc *guc); + void (*enable)(struct intel_guc *guc); + void (*disable)(struct intel_guc *guc); } interrupts; struct i915_vma *ads_vma; + struct __guc_ads_blob *ads_blob; + struct i915_vma *stage_desc_pool; void *stage_desc_pool_vaddr; struct ida stage_ids; @@ -72,15 +71,14 @@ struct intel_guc { void *shared_data_vaddr; struct intel_guc_client *execbuf_client; - struct intel_guc_client *preempt_client; - - struct guc_preempt_work preempt_work[I915_NUM_ENGINES]; - struct workqueue_struct *preempt_wq; DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS); /* Cyclic counter mod pagesize */ u32 db_cacheline; + /* Control params for fw initialization */ + u32 params[GUC_CTL_MAX_DWORDS]; + /* GuC's FW specific registers used in MMIO send */ struct { u32 base; @@ -88,6 +86,9 @@ struct intel_guc { enum forcewake_domains fw_domains; } send_regs; + /* Store msg (e.g. log flush) that we see while CTBs are disabled */ + u32 mmio_msg; + /* To serialize the intel_guc_send actions */ struct mutex send_mutex; @@ -154,11 +155,9 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, void intel_guc_init_early(struct intel_guc *guc); void intel_guc_init_send_regs(struct intel_guc *guc); -void intel_guc_init_params(struct intel_guc *guc); -int intel_guc_init_misc(struct intel_guc *guc); +void intel_guc_write_params(struct intel_guc *guc); int intel_guc_init(struct intel_guc *guc); void intel_guc_fini(struct intel_guc *guc); -void intel_guc_fini_misc(struct intel_guc *guc); int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size); int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, @@ -173,14 +172,16 @@ int intel_guc_suspend(struct intel_guc *guc); int intel_guc_resume(struct intel_guc *guc); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); -static inline bool intel_guc_is_loaded(struct intel_guc *guc) +static inline bool intel_guc_is_running(struct intel_guc *guc) { - return intel_uc_fw_is_loaded(&guc->fw); + return intel_uc_fw_is_running(&guc->fw); } static inline int intel_guc_sanitize(struct intel_guc *guc) { intel_uc_fw_sanitize(&guc->fw); + guc->mmio_msg = 0; + return 0; } diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index ecb69fc94218..a0da80241f22 100644 --- a/drivers/gpu/drm/i915/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -22,6 +22,7 @@ * */ +#include "gt/intel_gt.h" #include "intel_guc_ads.h" #include "intel_uc.h" #include "i915_drv.h" @@ -83,18 +84,14 @@ struct __guc_ads_blob { u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE]; } __packed; -static int __guc_ads_init(struct intel_guc *guc) +static void __guc_ads_init(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct __guc_ads_blob *blob; + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; + struct __guc_ads_blob *blob = guc->ads_blob; const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; u32 base; u8 engine_class; - blob = i915_gem_object_pin_map(guc->ads_vma->obj, I915_MAP_WB); - if (IS_ERR(blob)) - return PTR_ERR(blob); - /* GuC scheduling policies */ guc_policies_init(&blob->policies); @@ -144,9 +141,7 @@ static int __guc_ads_init(struct intel_guc *guc) blob->ads.gt_system_info = base + ptr_offset(blob, system_info); blob->ads.clients_info = base + ptr_offset(blob, clients_info); - i915_gem_object_unpin_map(guc->ads_vma->obj); - - return 0; + i915_gem_object_flush_map(guc->ads_vma->obj); } /** @@ -160,6 +155,7 @@ int intel_guc_ads_create(struct intel_guc *guc) { const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob)); struct i915_vma *vma; + void *blob; int ret; GEM_BUG_ON(guc->ads_vma); @@ -168,11 +164,16 @@ int intel_guc_ads_create(struct intel_guc *guc) if (IS_ERR(vma)) return PTR_ERR(vma); + blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(blob)) { + ret = PTR_ERR(blob); + goto err_vma; + } + guc->ads_vma = vma; + guc->ads_blob = blob; - ret = __guc_ads_init(guc); - if (ret) - goto err_vma; + __guc_ads_init(guc); return 0; @@ -183,7 +184,7 @@ err_vma: void intel_guc_ads_destroy(struct intel_guc *guc) { - i915_vma_unpin_and_release(&guc->ads_vma, 0); + i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP); } /** diff --git a/drivers/gpu/drm/i915/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h index 7f40f9cd5fb9..7f40f9cd5fb9 100644 --- a/drivers/gpu/drm/i915/intel_guc_ads.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 3921809f812b..9e383a47609f 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -529,8 +529,8 @@ unlink: /* * Command Transport (CT) buffer based GuC send function. */ -static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, - u32 *response_buf, u32 response_buf_size) +int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size) { struct intel_guc_ct *ct = &guc->ct; struct intel_guc_ct_channel *ctch = &ct->host_channel; @@ -834,7 +834,7 @@ static void ct_process_host_channel(struct intel_guc_ct *ct) * When we're communicating with the GuC over CT, GuC uses events * to notify us about new messages being posted on the RECV buffer. */ -static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc) +void intel_guc_to_host_event_handler_ct(struct intel_guc *guc) { struct intel_guc_ct *ct = &guc->ct; @@ -892,20 +892,11 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct) { struct intel_guc *guc = ct_to_guc(ct); struct intel_guc_ct_channel *ctch = &ct->host_channel; - int err; if (ctch->enabled) return 0; - err = ctch_enable(guc, ctch); - if (unlikely(err)) - return err; - - /* Switch into cmd transport buffer based send() */ - guc->send = intel_guc_send_ct; - guc->handler = intel_guc_to_host_event_handler_ct; - DRM_INFO("CT: %s\n", enableddisabled(true)); - return 0; + return ctch_enable(guc, ctch); } /** @@ -921,9 +912,4 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct) return; ctch_disable(guc, ctch); - - /* Disable send */ - guc->send = intel_guc_send_nop; - guc->handler = intel_guc_to_host_event_handler_nop; - DRM_INFO("CT: %s\n", enableddisabled(false)); } diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h index 41ba593a4df7..8c1f6d133168 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h @@ -24,11 +24,14 @@ #ifndef _INTEL_GUC_CT_H_ #define _INTEL_GUC_CT_H_ -struct intel_guc; -struct i915_vma; +#include <linux/spinlock.h> +#include <linux/workqueue.h> #include "intel_guc_fwif.h" +struct i915_vma; +struct intel_guc; + /** * DOC: Command Transport (CT). * @@ -101,4 +104,8 @@ static inline void intel_guc_ct_stop(struct intel_guc_ct *ct) ct->host_channel.enabled = false; } +int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size); +void intel_guc_to_host_event_handler_ct(struct intel_guc *guc); + #endif /* _INTEL_GUC_CT_H_ */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c new file mode 100644 index 000000000000..28735c14b9a0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -0,0 +1,181 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Vinit Azad <vinit.azad@intel.com> + * Ben Widawsky <ben@bwidawsk.net> + * Dave Gordon <david.s.gordon@intel.com> + * Alex Dai <yu.dai@intel.com> + */ + +#include "gt/intel_gt.h" +#include "intel_guc_fw.h" +#include "i915_drv.h" + +/** + * intel_guc_fw_init_early() - initializes GuC firmware struct + * @guc: intel_guc struct + * + * On platforms with GuC selects firmware for uploading + */ +void intel_guc_fw_init_early(struct intel_guc *guc) +{ + intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, guc_to_gt(guc)->i915); +} + +static void guc_prepare_xfer(struct intel_uncore *uncore) +{ + u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES | + GUC_ENABLE_READ_CACHE_LOGIC | + GUC_ENABLE_MIA_CACHING | + GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | + GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | + GUC_ENABLE_MIA_CLOCK_GATING; + + /* Must program this register before loading the ucode with DMA */ + intel_uncore_write(uncore, GUC_SHIM_CONTROL, shim_flags); + + if (IS_GEN9_LP(uncore->i915)) + intel_uncore_write(uncore, GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE); + else + intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); + + if (IS_GEN(uncore->i915, 9)) { + /* DOP Clock Gating Enable for GuC clocks */ + intel_uncore_rmw(uncore, GEN7_MISCCPCTL, + 0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE); + + /* allows for 5us (in 10ns units) before GT can go to RC6 */ + intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF); + } +} + +/* Copy RSA signature from the fw image to HW for verification */ +static void guc_xfer_rsa(struct intel_uc_fw *guc_fw, + struct intel_uncore *uncore) +{ + u32 rsa[UOS_RSA_SCRATCH_COUNT]; + size_t copied; + int i; + + copied = intel_uc_fw_copy_rsa(guc_fw, rsa, sizeof(rsa)); + GEM_BUG_ON(copied < sizeof(rsa)); + + for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) + intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]); +} + +/* + * Read the GuC status register (GUC_STATUS) and store it in the + * specified location; then return a boolean indicating whether + * the value matches either of two values representing completion + * of the GuC boot process. + * + * This is used for polling the GuC status in a wait_for() + * loop below. + */ +static inline bool guc_ready(struct intel_uncore *uncore, u32 *status) +{ + u32 val = intel_uncore_read(uncore, GUC_STATUS); + u32 uk_val = val & GS_UKERNEL_MASK; + + *status = val; + return (uk_val == GS_UKERNEL_READY) || + ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE)); +} + +static int guc_wait_ucode(struct intel_uncore *uncore) +{ + u32 status; + int ret; + + /* + * Wait for the GuC to start up. + * NB: Docs recommend not using the interrupt for completion. + * Measurements indicate this should take no more than 20ms, so a + * timeout here indicates that the GuC has failed and is unusable. + * (Higher levels of the driver may decide to reset the GuC and + * attempt the ucode load again if this happens.) + */ + ret = wait_for(guc_ready(uncore, &status), 100); + DRM_DEBUG_DRIVER("GuC status %#x\n", status); + + if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { + DRM_ERROR("GuC firmware signature verification failed\n"); + ret = -ENOEXEC; + } + + if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) { + DRM_ERROR("GuC firmware exception. EIP: %#x\n", + intel_uncore_read(uncore, SOFT_SCRATCH(13))); + ret = -ENXIO; + } + + return ret; +} + +/** + * intel_guc_fw_upload() - load GuC uCode to device + * @guc: intel_guc structure + * + * Called from intel_uc_init_hw() during driver load, resume from sleep and + * after a GPU reset. + * + * The firmware image should have already been fetched into memory, so only + * check that fetch succeeded, and then transfer the image to the h/w. + * + * Return: non-zero code on error + */ +int intel_guc_fw_upload(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct intel_uncore *uncore = gt->uncore; + int ret; + + guc_prepare_xfer(uncore); + + /* + * Note that GuC needs the CSS header plus uKernel code to be copied + * by the DMA engine in one operation, whereas the RSA signature is + * loaded via MMIO. + */ + guc_xfer_rsa(&guc->fw, uncore); + + /* + * Current uCode expects the code to be loaded at 8k; locations below + * this are used for the stack. + */ + ret = intel_uc_fw_upload(&guc->fw, gt, 0x2000, UOS_MOVE); + if (ret) + goto out; + + ret = guc_wait_ucode(uncore); + if (ret) + goto out; + + guc->fw.status = INTEL_UC_FIRMWARE_RUNNING; + return 0; + +out: + guc->fw.status = INTEL_UC_FIRMWARE_FAIL; + return ret; +} diff --git a/drivers/gpu/drm/i915/intel_guc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h index 4ec5d3d9e2b0..4ec5d3d9e2b0 100644 --- a/drivers/gpu/drm/i915/intel_guc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index f55f3bc8524d..06a9bdfb0faf 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -23,6 +23,10 @@ #ifndef _INTEL_GUC_FWIF_H #define _INTEL_GUC_FWIF_H +#include <linux/bits.h> +#include <linux/compiler.h> +#include <linux/types.h> + #define GUC_CLIENT_PRIORITY_KMD_HIGH 0 #define GUC_CLIENT_PRIORITY_HIGH 1 #define GUC_CLIENT_PRIORITY_KMD_NORMAL 2 @@ -39,13 +43,8 @@ #define GUC_VIDEO_ENGINE2 4 #define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) -/* - * XXX: Beware that Gen9 firmware 32.x uses wrong definition for - * GUC_MAX_INSTANCES_PER_CLASS (1) but this is harmless for us now - * as we are not enabling GuC submission mode where this will be used - */ #define GUC_MAX_ENGINE_CLASSES 5 -#define GUC_MAX_INSTANCES_PER_CLASS 4 +#define GUC_MAX_INSTANCES_PER_CLASS 16 #define GUC_DOORBELL_INVALID 256 @@ -122,76 +121,6 @@ #define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */ -/** - * DOC: GuC Firmware Layout - * - * The GuC firmware layout looks like this: - * - * +-------------------------------+ - * | uc_css_header | - * | | - * | contains major/minor version | - * +-------------------------------+ - * | uCode | - * +-------------------------------+ - * | RSA signature | - * +-------------------------------+ - * | modulus key | - * +-------------------------------+ - * | exponent val | - * +-------------------------------+ - * - * The firmware may or may not have modulus key and exponent data. The header, - * uCode and RSA signature are must-have components that will be used by driver. - * Length of each components, which is all in dwords, can be found in header. - * In the case that modulus and exponent are not present in fw, a.k.a truncated - * image, the length value still appears in header. - * - * Driver will do some basic fw size validation based on the following rules: - * - * 1. Header, uCode and RSA are must-have components. - * 2. All firmware components, if they present, are in the sequence illustrated - * in the layout table above. - * 3. Length info of each component can be found in header, in dwords. - * 4. Modulus and exponent key are not required by driver. They may not appear - * in fw. So driver will load a truncated firmware in this case. - * - * HuC firmware layout is same as GuC firmware. - * Only HuC version information is saved in a different way. - */ - -struct uc_css_header { - u32 module_type; - /* header_size includes all non-uCode bits, including css_header, rsa - * key, modulus key and exponent data. */ - u32 header_size_dw; - u32 header_version; - u32 module_id; - u32 module_vendor; - u32 date; -#define CSS_DATE_DAY (0xFF << 0) -#define CSS_DATE_MONTH (0xFF << 8) -#define CSS_DATE_YEAR (0xFFFF << 16) - u32 size_dw; /* uCode plus header_size_dw */ - u32 key_size_dw; - u32 modulus_size_dw; - u32 exponent_size_dw; - u32 time; -#define CSS_TIME_HOUR (0xFF << 0) -#define CSS_DATE_MIN (0xFF << 8) -#define CSS_DATE_SEC (0xFFFF << 16) - char username[8]; - char buildnumber[12]; - u32 sw_version; -#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16) -#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8) -#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0) -#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16) -#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0) - u32 reserved[14]; - u32 header_info; -} __packed; - /* Work item for submitting workloads into work queue of GuC. */ struct guc_wq_item { u32 header; diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index e3b83ecb90b5..3460deca12c8 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -24,6 +24,7 @@ #include <linux/debugfs.h> +#include "gt/intel_gt.h" #include "intel_guc_log.h" #include "i915_drv.h" @@ -209,7 +210,7 @@ static bool guc_check_log_buf_overflow(struct intel_guc_log *log, log->stats[type].sampled_overflow += 16; } - dev_notice_ratelimited(guc_to_i915(log_to_guc(log))->drm.dev, + dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev, "GuC log buffer overflow\n"); } @@ -383,7 +384,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log) static int guc_log_relay_create(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; struct rchan *guc_log_relay_chan; size_t n_subbufs, subbuf_size; int ret; @@ -429,7 +430,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log) static void guc_log_capture_logs(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; intel_wakeref_t wakeref; guc_read_update_log_buffer(log); @@ -442,6 +443,29 @@ static void guc_log_capture_logs(struct intel_guc_log *log) guc_action_flush_log_complete(guc); } +static u32 __get_default_log_level(struct intel_guc_log *log) +{ + /* A negative value means "use platform/config default" */ + if (i915_modparams.guc_log_level < 0) { + return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || + IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? + GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE; + } + + if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) { + DRM_WARN("Incompatible option detected: %s=%d, %s!\n", + "guc_log_level", i915_modparams.guc_log_level, + "verbosity too high"); + return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || + IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? + GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED; + } + + GEM_BUG_ON(i915_modparams.guc_log_level < GUC_LOG_LEVEL_DISABLED); + GEM_BUG_ON(i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX); + return i915_modparams.guc_log_level; +} + int intel_guc_log_create(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); @@ -481,7 +505,11 @@ int intel_guc_log_create(struct intel_guc_log *log) log->vma = vma; - log->level = i915_modparams.guc_log_level; + log->level = __get_default_log_level(log); + DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n", + log->level, enableddisabled(log->level), + yesno(GUC_LOG_LEVEL_IS_VERBOSE(log->level)), + GUC_LOG_LEVEL_TO_VERBOSITY(log->level)); return 0; @@ -498,7 +526,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log) int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; intel_wakeref_t wakeref; int ret = 0; @@ -578,7 +606,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log) * the flush notification. This means that we need to unconditionally * flush on relay enabling, since GuC only notifies us once. */ - queue_work(log->relay.flush_wq, &log->relay.flush_work); + queue_work(system_highpri_wq, &log->relay.flush_work); return 0; @@ -593,7 +621,7 @@ out_unlock: void intel_guc_log_relay_flush(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *i915 = guc_to_i915(guc); + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; intel_wakeref_t wakeref; /* @@ -612,10 +640,10 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) void intel_guc_log_relay_close(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *i915 = guc_to_i915(guc); + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; guc_log_disable_flush_events(log); - synchronize_irq(i915->drm.irq); + intel_synchronize_irq(i915); flush_work(&log->relay.flush_work); @@ -628,5 +656,5 @@ void intel_guc_log_relay_close(struct intel_guc_log *log) void intel_guc_log_handle_flush_event(struct intel_guc_log *log) { - queue_work(log->relay.flush_wq, &log->relay.flush_work); + queue_work(system_highpri_wq, &log->relay.flush_work); } diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h index 7bc763f10c03..1969572f1f79 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h @@ -66,7 +66,6 @@ struct intel_guc_log { struct i915_vma *vma; struct { void *buf_addr; - struct workqueue_struct *flush_wq; struct work_struct flush_work; struct rchan *channel; struct mutex lock; diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h index a214f8b71929..e3cbb23299ce 100644 --- a/drivers/gpu/drm/i915/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h @@ -24,6 +24,11 @@ #ifndef _INTEL_GUC_REG_H_ #define _INTEL_GUC_REG_H_ +#include <linux/compiler.h> +#include <linux/types.h> + +#include "i915_reg.h" + /* Definitions of GuC H/W registers, bits, etc */ #define GUC_STATUS _MMIO(0xc000) @@ -37,6 +42,7 @@ #define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT) #define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT) #define GS_UKERNEL_DPC_ERROR (0x60 << GS_UKERNEL_SHIFT) +#define GS_UKERNEL_EXCEPTION (0x70 << GS_UKERNEL_SHIFT) #define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT) #define GS_MIA_SHIFT 16 #define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) @@ -135,21 +141,21 @@ struct guc_doorbell_info { #define GUC_PM_P24C_IER _MMIO(0xC55C) /* GuC Interrupt Vector */ -#define GEN11_GUC_INTR_GUC2HOST (1 << 15) -#define GEN11_GUC_INTR_EXEC_ERROR (1 << 14) -#define GEN11_GUC_INTR_DISPLAY_EVENT (1 << 13) -#define GEN11_GUC_INTR_SEM_SIG (1 << 12) -#define GEN11_GUC_INTR_IOMMU2GUC (1 << 11) -#define GEN11_GUC_INTR_DOORBELL_RANG (1 << 10) -#define GEN11_GUC_INTR_DMA_DONE (1 << 9) -#define GEN11_GUC_INTR_FATAL_ERROR (1 << 8) -#define GEN11_GUC_INTR_NOTIF_ERROR (1 << 7) -#define GEN11_GUC_INTR_SW_INT_6 (1 << 6) -#define GEN11_GUC_INTR_SW_INT_5 (1 << 5) -#define GEN11_GUC_INTR_SW_INT_4 (1 << 4) -#define GEN11_GUC_INTR_SW_INT_3 (1 << 3) -#define GEN11_GUC_INTR_SW_INT_2 (1 << 2) -#define GEN11_GUC_INTR_SW_INT_1 (1 << 1) -#define GEN11_GUC_INTR_SW_INT_0 (1 << 0) +#define GUC_INTR_GUC2HOST BIT(15) +#define GUC_INTR_EXEC_ERROR BIT(14) +#define GUC_INTR_DISPLAY_EVENT BIT(13) +#define GUC_INTR_SEM_SIG BIT(12) +#define GUC_INTR_IOMMU2GUC BIT(11) +#define GUC_INTR_DOORBELL_RANG BIT(10) +#define GUC_INTR_DMA_DONE BIT(9) +#define GUC_INTR_FATAL_ERROR BIT(8) +#define GUC_INTR_NOTIF_ERROR BIT(7) +#define GUC_INTR_SW_INT_6 BIT(6) +#define GUC_INTR_SW_INT_5 BIT(5) +#define GUC_INTR_SW_INT_4 BIT(4) +#define GUC_INTR_SW_INT_3 BIT(3) +#define GUC_INTR_SW_INT_2 BIT(2) +#define GUC_INTR_SW_INT_1 BIT(1) +#define GUC_INTR_SW_INT_0 BIT(0) #endif diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index db531ebc7704..b4238fe16a03 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -24,15 +24,21 @@ #include <linux/circ_buf.h> -#include "gt/intel_engine_pm.h" -#include "gt/intel_lrc_reg.h" -#include "gt/intel_context.h" #include "gem/i915_gem_context.h" +#include "gt/intel_context.h" +#include "gt/intel_engine_pm.h" +#include "gt/intel_gt.h" +#include "gt/intel_lrc_reg.h" #include "intel_guc_submission.h" + #include "i915_drv.h" -#define GUC_PREEMPT_FINISHED 0x1 +enum { + GUC_PREEMPT_NONE = 0, + GUC_PREEMPT_INPROGRESS, + GUC_PREEMPT_FINISHED, +}; #define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8 #define GUC_PREEMPT_BREADCRUMB_BYTES \ (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS) @@ -42,11 +48,10 @@ * * GuC client: * A intel_guc_client refers to a submission path through GuC. Currently, there - * are two clients. One of them (the execbuf_client) is charged with all - * submissions to the GuC, the other one (preempt_client) is responsible for - * preempting the execbuf_client. This struct is the owner of a doorbell, a - * process descriptor and a workqueue (all of them inside a single gem object - * that contains all required pages for these elements). + * is only one client, which is charged with all submissions to the GuC. This + * struct is the owner of a doorbell, a process descriptor and a workqueue (all + * of them inside a single gem object that contains all required pages for these + * elements). * * GuC stage descriptor: * During initialization, the driver allocates a static pool of 1024 such @@ -84,12 +89,6 @@ * */ -static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine) -{ - return (i915_ggtt_offset(engine->status_page.vma) + - I915_GEM_HWS_PREEMPT_ADDR); -} - static inline struct i915_priolist *to_priolist(struct rb_node *rb) { return rb_entry(rb, struct i915_priolist, node); @@ -203,10 +202,10 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client) static bool __doorbell_valid(struct intel_guc *guc, u16 db_id) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); - return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID; + return intel_uncore_read(uncore, GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID; } static void __init_doorbell(struct intel_guc_client *client) @@ -366,10 +365,7 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc) static void guc_stage_desc_init(struct intel_guc_client *client) { struct intel_guc *guc = client->guc; - struct i915_gem_context *ctx = client->owner; - struct i915_gem_engines_iter it; struct guc_stage_desc *desc; - struct intel_context *ce; u32 gfx_addr; desc = __get_stage_desc(client); @@ -383,55 +379,6 @@ static void guc_stage_desc_init(struct intel_guc_client *client) desc->priority = client->priority; desc->db_id = client->doorbell_id; - for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - struct guc_execlist_context *lrc; - - if (!(ce->engine->mask & client->engines)) - continue; - - /* TODO: We have a design issue to be solved here. Only when we - * receive the first batch, we know which engine is used by the - * user. But here GuC expects the lrc and ring to be pinned. It - * is not an issue for default context, which is the only one - * for now who owns a GuC client. But for future owner of GuC - * client, need to make sure lrc is pinned prior to enter here. - */ - if (!ce->state) - break; /* XXX: continue? */ - - /* - * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy - * submission or, in other words, not using a direct submission - * model) the KMD's LRCA is not used for any work submission. - * Instead, the GuC uses the LRCA of the user mode context (see - * guc_add_request below). - */ - lrc = &desc->lrc[ce->engine->guc_id]; - lrc->context_desc = lower_32_bits(ce->lrc_desc); - - /* The state page is after PPHWSP */ - lrc->ring_lrca = intel_guc_ggtt_offset(guc, ce->state) + - LRC_STATE_PN * PAGE_SIZE; - - /* XXX: In direct submission, the GuC wants the HW context id - * here. In proxy submission, it wants the stage id - */ - lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) | - (ce->engine->guc_id << GUC_ELC_ENGINE_OFFSET); - - lrc->ring_begin = intel_guc_ggtt_offset(guc, ce->ring->vma); - lrc->ring_end = lrc->ring_begin + ce->ring->size - 1; - lrc->ring_next_free_location = lrc->ring_begin; - lrc->ring_current_tail_pointer_value = 0; - - desc->engines_used |= BIT(ce->engine->guc_id); - } - i915_gem_context_unlock_engines(ctx); - - DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n", - client->engines, desc->engines_used); - WARN_ON(desc->engines_used == 0); - /* * The doorbell, process descriptor, and workqueue are all parts * of the client object, which the GuC will reference via the GGTT @@ -537,15 +484,11 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc); u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); - spin_lock(&client->wq_lock); - guc_wq_item_append(client, engine->guc_id, ctx_desc, ring_tail, rq->fence.seqno); guc_ring_doorbell(client); client->submissions[engine->id] += 1; - - spin_unlock(&client->wq_lock); } /* @@ -563,207 +506,72 @@ static void flush_ggtt_writes(struct i915_vma *vma) intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS); } -static void inject_preempt_context(struct work_struct *work) +static void guc_submit(struct intel_engine_cs *engine, + struct i915_request **out, + struct i915_request **end) { - struct guc_preempt_work *preempt_work = - container_of(work, typeof(*preempt_work), work); - struct intel_engine_cs *engine = preempt_work->engine; - struct intel_guc *guc = container_of(preempt_work, typeof(*guc), - preempt_work[engine->id]); - struct intel_guc_client *client = guc->preempt_client; - struct guc_stage_desc *stage_desc = __get_stage_desc(client); - struct intel_context *ce = engine->preempt_context; - u32 data[7]; - - if (!ce->ring->emit) { /* recreate upon load/resume */ - u32 addr = intel_hws_preempt_done_address(engine); - u32 *cs; - - cs = ce->ring->vaddr; - if (engine->class == RENDER_CLASS) { - cs = gen8_emit_ggtt_write_rcs(cs, - GUC_PREEMPT_FINISHED, - addr, - PIPE_CONTROL_CS_STALL); - } else { - cs = gen8_emit_ggtt_write(cs, - GUC_PREEMPT_FINISHED, - addr, - 0); - *cs++ = MI_NOOP; - *cs++ = MI_NOOP; - } - *cs++ = MI_USER_INTERRUPT; - *cs++ = MI_NOOP; - - ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES; - GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit); + struct intel_guc *guc = &engine->gt->uc.guc; + struct intel_guc_client *client = guc->execbuf_client; - flush_ggtt_writes(ce->ring->vma); - } + spin_lock(&client->wq_lock); - spin_lock_irq(&client->wq_lock); - guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc), - GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0); - spin_unlock_irq(&client->wq_lock); + do { + struct i915_request *rq = *out++; - /* - * If GuC firmware performs an engine reset while that engine had - * a preemption pending, it will set the terminated attribute bit - * on our preemption stage descriptor. GuC firmware retains all - * pending work items for a high-priority GuC client, unlike the - * normal-priority GuC client where work items are dropped. It - * wants to make sure the preempt-to-idle work doesn't run when - * scheduling resumes, and uses this bit to inform its scheduler - * and presumably us as well. Our job is to clear it for the next - * preemption after reset, otherwise that and future preemptions - * will never complete. We'll just clear it every time. - */ - stage_desc->attribute &= ~GUC_STAGE_DESC_ATTR_TERMINATED; - - data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION; - data[1] = client->stage_id; - data[2] = INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q | - INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q; - data[3] = engine->guc_id; - data[4] = guc->execbuf_client->priority; - data[5] = guc->execbuf_client->stage_id; - data[6] = intel_guc_ggtt_offset(guc, guc->shared_data); - - if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) { - execlists_clear_active(&engine->execlists, - EXECLISTS_ACTIVE_PREEMPT); - tasklet_schedule(&engine->execlists.tasklet); - } + flush_ggtt_writes(rq->ring->vma); + guc_add_request(guc, rq); + } while (out != end); - (void)I915_SELFTEST_ONLY(engine->execlists.preempt_hang.count++); + spin_unlock(&client->wq_lock); } -/* - * We're using user interrupt and HWSP value to mark that preemption has - * finished and GPU is idle. Normally, we could unwind and continue similar to - * execlists submission path. Unfortunately, with GuC we also need to wait for - * it to finish its own postprocessing, before attempting to submit. Otherwise - * GuC may silently ignore our submissions, and thus we risk losing request at - * best, executing out-of-order and causing kernel panic at worst. - */ -#define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10 -static void wait_for_guc_preempt_report(struct intel_engine_cs *engine) +static inline int rq_prio(const struct i915_request *rq) { - struct intel_guc *guc = &engine->i915->guc; - struct guc_shared_ctx_data *data = guc->shared_data_vaddr; - struct guc_ctx_report *report = - &data->preempt_ctx_report[engine->guc_id]; - - if (wait_for_atomic(report->report_return_status == - INTEL_GUC_REPORT_STATUS_COMPLETE, - GUC_PREEMPT_POSTPROCESS_DELAY_MS)) - DRM_ERROR("Timed out waiting for GuC preemption report\n"); - /* - * GuC is expecting that we're also going to clear the affected context - * counter, let's also reset the return status to not depend on GuC - * resetting it after recieving another preempt action - */ - report->affected_count = 0; - report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN; + return rq->sched.attr.priority | __NO_PREEMPTION; } -static void complete_preempt_context(struct intel_engine_cs *engine) +static struct i915_request *schedule_in(struct i915_request *rq, int idx) { - struct intel_engine_execlists *execlists = &engine->execlists; - - GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)); - - if (inject_preempt_hang(execlists)) - return; + trace_i915_request_in(rq, idx); - execlists_cancel_port_requests(execlists); - execlists_unwind_incomplete_requests(execlists); + if (!rq->hw_context->inflight) + rq->hw_context->inflight = rq->engine; + intel_context_inflight_inc(rq->hw_context); - wait_for_guc_preempt_report(engine); - intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, 0); + return i915_request_get(rq); } -/** - * guc_submit() - Submit commands through GuC - * @engine: engine associated with the commands - * - * The only error here arises if the doorbell hardware isn't functioning - * as expected, which really shouln't happen. - */ -static void guc_submit(struct intel_engine_cs *engine) +static void schedule_out(struct i915_request *rq) { - struct intel_guc *guc = &engine->i915->guc; - struct intel_engine_execlists * const execlists = &engine->execlists; - struct execlist_port *port = execlists->port; - unsigned int n; - - for (n = 0; n < execlists_num_ports(execlists); n++) { - struct i915_request *rq; - unsigned int count; - - rq = port_unpack(&port[n], &count); - if (rq && count == 0) { - port_set(&port[n], port_pack(rq, ++count)); - - flush_ggtt_writes(rq->ring->vma); - - guc_add_request(guc, rq); - } - } -} + trace_i915_request_out(rq); -static void port_assign(struct execlist_port *port, struct i915_request *rq) -{ - GEM_BUG_ON(port_isset(port)); + intel_context_inflight_dec(rq->hw_context); + if (!intel_context_inflight_count(rq->hw_context)) + rq->hw_context->inflight = NULL; - port_set(port, i915_request_get(rq)); + i915_request_put(rq); } -static inline int rq_prio(const struct i915_request *rq) -{ - return rq->sched.attr.priority; -} - -static inline int port_prio(const struct execlist_port *port) -{ - return rq_prio(port_request(port)) | __NO_PREEMPTION; -} - -static bool __guc_dequeue(struct intel_engine_cs *engine) +static void __guc_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - struct execlist_port *port = execlists->port; - struct i915_request *last = NULL; - const struct execlist_port * const last_port = - &execlists->port[execlists->port_mask]; + struct i915_request **first = execlists->inflight; + struct i915_request ** const last_port = first + execlists->port_mask; + struct i915_request *last = first[0]; + struct i915_request **port; bool submit = false; struct rb_node *rb; lockdep_assert_held(&engine->active.lock); - if (port_isset(port)) { - if (intel_engine_has_preemption(engine)) { - struct guc_preempt_work *preempt_work = - &engine->i915->guc.preempt_work[engine->id]; - int prio = execlists->queue_priority_hint; - - if (i915_scheduler_need_preempt(prio, - port_prio(port))) { - execlists_set_active(execlists, - EXECLISTS_ACTIVE_PREEMPT); - queue_work(engine->i915->guc.preempt_wq, - &preempt_work->work); - return false; - } - } + if (last) { + if (*++first) + return; - port++; - if (port_isset(port)) - return false; + last = NULL; } - GEM_BUG_ON(port_isset(port)); + port = first; while ((rb = rb_first_cached(&execlists->queue))) { struct i915_priolist *p = to_priolist(rb); struct i915_request *rq, *rn; @@ -774,18 +582,15 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) if (port == last_port) goto done; - if (submit) - port_assign(port, last); + *port = schedule_in(last, + port - execlists->inflight); port++; } list_del_init(&rq->sched.link); - __i915_request_submit(rq); - trace_i915_request_in(rq, port_index(port, execlists)); - - last = rq; submit = true; + last = rq; } rb_erase_cached(&p->node, &execlists->queue); @@ -794,58 +599,36 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) done: execlists->queue_priority_hint = rb ? to_priolist(rb)->priority : INT_MIN; - if (submit) - port_assign(port, last); - if (last) - execlists_user_begin(execlists, execlists->port); - - /* We must always keep the beast fed if we have work piled up */ - GEM_BUG_ON(port_isset(execlists->port) && - !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); - GEM_BUG_ON(rb_first_cached(&execlists->queue) && - !port_isset(execlists->port)); - - return submit; -} - -static void guc_dequeue(struct intel_engine_cs *engine) -{ - if (__guc_dequeue(engine)) - guc_submit(engine); + if (submit) { + *port = schedule_in(last, port - execlists->inflight); + *++port = NULL; + guc_submit(engine, first, port); + } + execlists->active = execlists->inflight; } static void guc_submission_tasklet(unsigned long data) { struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; struct intel_engine_execlists * const execlists = &engine->execlists; - struct execlist_port *port = execlists->port; - struct i915_request *rq; + struct i915_request **port, *rq; unsigned long flags; spin_lock_irqsave(&engine->active.lock, flags); - rq = port_request(port); - while (rq && i915_request_completed(rq)) { - trace_i915_request_out(rq); - i915_request_put(rq); - - port = execlists_port_complete(execlists, port); - if (port_isset(port)) { - execlists_user_begin(execlists, port); - rq = port_request(port); - } else { - execlists_user_end(execlists); - rq = NULL; - } - } + for (port = execlists->inflight; (rq = *port); port++) { + if (!i915_request_completed(rq)) + break; - if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) && - intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) == - GUC_PREEMPT_FINISHED) - complete_preempt_context(engine); + schedule_out(rq); + } + if (port != execlists->inflight) { + int idx = port - execlists->inflight; + int rem = ARRAY_SIZE(execlists->inflight) - idx; + memmove(execlists->inflight, port, rem * sizeof(*port)); + } - if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) - guc_dequeue(engine); + __guc_dequeue(engine); spin_unlock_irqrestore(&engine->active.lock, flags); } @@ -866,16 +649,6 @@ static void guc_reset_prepare(struct intel_engine_cs *engine) * prevents the race. */ __tasklet_disable_sync_once(&execlists->tasklet); - - /* - * We're using worker to queue preemption requests from the tasklet in - * GuC submission mode. - * Even though tasklet was disabled, we may still have a worker queued. - * Let's make sure that all workers scheduled before disabling the - * tasklet are completed before continuing with the reset. - */ - if (engine->i915->guc.preempt_wq) - flush_workqueue(engine->i915->guc.preempt_wq); } static void guc_reset(struct intel_engine_cs *engine, bool stalled) @@ -896,7 +669,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled) if (!i915_request_started(rq)) stalled = false; - i915_reset_request(rq, stalled); + __i915_request_reset(rq, stalled); intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled); out_unlock: @@ -959,7 +732,6 @@ static void guc_cancel_requests(struct intel_engine_cs *engine) execlists->queue_priority_hint = INT_MIN; execlists->queue = RB_ROOT_CACHED; - GEM_BUG_ON(port_isset(execlists->port)); spin_unlock_irqrestore(&engine->active.lock, flags); } @@ -1014,25 +786,18 @@ static bool guc_verify_doorbells(struct intel_guc *guc) /** * guc_client_alloc() - Allocate an intel_guc_client - * @dev_priv: driver private data structure - * @engines: The set of engines to enable for this client + * @guc: the intel_guc structure * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW * The kernel client to replace ExecList submission is created with * NORMAL priority. Priority of a client for scheduler can be HIGH, * while a preemption context can use CRITICAL. - * @ctx: the context that owns the client (we use the default render - * context) * * Return: An intel_guc_client object if success, else NULL. */ static struct intel_guc_client * -guc_client_alloc(struct drm_i915_private *dev_priv, - u32 engines, - u32 priority, - struct i915_gem_context *ctx) +guc_client_alloc(struct intel_guc *guc, u32 priority) { struct intel_guc_client *client; - struct intel_guc *guc = &dev_priv->guc; struct i915_vma *vma; void *vaddr; int ret; @@ -1042,8 +807,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv, return ERR_PTR(-ENOMEM); client->guc = guc; - client->owner = ctx; - client->engines = engines; client->priority = priority; client->doorbell_id = GUC_DOORBELL_INVALID; spin_lock_init(&client->wq_lock); @@ -1088,8 +851,8 @@ guc_client_alloc(struct drm_i915_private *dev_priv, else client->proc_desc_offset = (GUC_DB_SIZE / 2); - DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n", - priority, client, client->engines, client->stage_id); + DRM_DEBUG_DRIVER("new priority %u client %p: stage_id %u\n", + priority, client, client->stage_id); DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n", client->doorbell_id, client->doorbell_offset); @@ -1129,36 +892,17 @@ static inline bool ctx_save_restore_disabled(struct intel_context *ce) static int guc_clients_create(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); struct intel_guc_client *client; GEM_BUG_ON(guc->execbuf_client); - GEM_BUG_ON(guc->preempt_client); - client = guc_client_alloc(dev_priv, - INTEL_INFO(dev_priv)->engine_mask, - GUC_CLIENT_PRIORITY_KMD_NORMAL, - dev_priv->kernel_context); + client = guc_client_alloc(guc, GUC_CLIENT_PRIORITY_KMD_NORMAL); if (IS_ERR(client)) { DRM_ERROR("Failed to create GuC client for submission!\n"); return PTR_ERR(client); } guc->execbuf_client = client; - if (dev_priv->preempt_context) { - client = guc_client_alloc(dev_priv, - INTEL_INFO(dev_priv)->engine_mask, - GUC_CLIENT_PRIORITY_KMD_HIGH, - dev_priv->preempt_context); - if (IS_ERR(client)) { - DRM_ERROR("Failed to create GuC client for preemption!\n"); - guc_client_free(guc->execbuf_client); - guc->execbuf_client = NULL; - return PTR_ERR(client); - } - guc->preempt_client = client; - } - return 0; } @@ -1166,10 +910,6 @@ static void guc_clients_destroy(struct intel_guc *guc) { struct intel_guc_client *client; - client = fetch_and_zero(&guc->preempt_client); - if (client) - guc_client_free(client); - client = fetch_and_zero(&guc->execbuf_client); if (client) guc_client_free(client); @@ -1201,7 +941,7 @@ static void __guc_client_disable(struct intel_guc_client *client) * the case, instead of trying (in vain) to communicate with it, let's * just cleanup the doorbell HW and our internal state. */ - if (intel_guc_is_loaded(client->guc)) + if (intel_guc_is_running(client->guc)) destroy_doorbell(client); else __fini_doorbell(client); @@ -1212,28 +952,11 @@ static void __guc_client_disable(struct intel_guc_client *client) static int guc_clients_enable(struct intel_guc *guc) { - int ret; - - ret = __guc_client_enable(guc->execbuf_client); - if (ret) - return ret; - - if (guc->preempt_client) { - ret = __guc_client_enable(guc->preempt_client); - if (ret) { - __guc_client_disable(guc->execbuf_client); - return ret; - } - } - - return 0; + return __guc_client_enable(guc->execbuf_client); } static void guc_clients_disable(struct intel_guc *guc) { - if (guc->preempt_client) - __guc_client_disable(guc->preempt_client); - if (guc->execbuf_client) __guc_client_disable(guc->execbuf_client); } @@ -1244,9 +967,6 @@ static void guc_clients_disable(struct intel_guc *guc) */ int intel_guc_submission_init(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_engine_cs *engine; - enum intel_engine_id id; int ret; if (guc->stage_desc_pool) @@ -1266,11 +986,6 @@ int intel_guc_submission_init(struct intel_guc *guc) if (ret) goto err_pool; - for_each_engine(engine, dev_priv, id) { - guc->preempt_work[id].engine = engine; - INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context); - } - return 0; err_pool: @@ -1280,13 +995,6 @@ err_pool: void intel_guc_submission_fini(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, dev_priv, id) - cancel_work_sync(&guc->preempt_work[id].work); - guc_clients_destroy(guc); WARN_ON(!guc_verify_doorbells(guc)); @@ -1294,9 +1002,10 @@ void intel_guc_submission_fini(struct intel_guc *guc) guc_stage_desc_pool_destroy(guc); } -static void guc_interrupts_capture(struct drm_i915_private *dev_priv) +static void guc_interrupts_capture(struct intel_gt *gt) { - struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_rps *rps = >->i915->gt_pm.rps; + struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; enum intel_engine_id id; int irqs; @@ -1305,16 +1014,16 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv) * to GuC */ irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, dev_priv, id) + for_each_engine(engine, gt->i915, id) ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); /* route USER_INTERRUPT to Host, all others are sent to GuC. */ irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; /* These three registers have the same bit definitions */ - I915_WRITE(GUC_BCS_RCS_IER, ~irqs); - I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs); - I915_WRITE(GUC_WD_VECS_IER, ~irqs); + intel_uncore_write(uncore, GUC_BCS_RCS_IER, ~irqs); + intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, ~irqs); + intel_uncore_write(uncore, GUC_WD_VECS_IER, ~irqs); /* * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all @@ -1339,9 +1048,10 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv) rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; } -static void guc_interrupts_release(struct drm_i915_private *dev_priv) +static void guc_interrupts_release(struct intel_gt *gt) { - struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_rps *rps = >->i915->gt_pm.rps; + struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; enum intel_engine_id id; int irqs; @@ -1352,13 +1062,13 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv) */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, dev_priv, id) + for_each_engine(engine, gt->i915, id) ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); /* route all GT interrupts to the host */ - I915_WRITE(GUC_BCS_RCS_IER, 0); - I915_WRITE(GUC_VCS2_VCS1_IER, 0); - I915_WRITE(GUC_WD_VECS_IER, 0); + intel_uncore_write(uncore, GUC_BCS_RCS_IER, 0); + intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, 0); + intel_uncore_write(uncore, GUC_WD_VECS_IER, 0); rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK; @@ -1408,7 +1118,7 @@ static void guc_set_default_submission(struct intel_engine_cs *engine) int intel_guc_submission_enable(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); struct intel_engine_cs *engine; enum intel_engine_id id; int err; @@ -1422,7 +1132,7 @@ int intel_guc_submission_enable(struct intel_guc *guc) * and it is guaranteed that it will remove the work item from the * queue before our request is completed. */ - BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) * + BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) * sizeof(struct guc_wq_item) * I915_NUM_ENGINES > GUC_WQ_SIZE); @@ -1433,9 +1143,9 @@ int intel_guc_submission_enable(struct intel_guc *guc) return err; /* Take over from manual control of ELSP (execlists) */ - guc_interrupts_capture(dev_priv); + guc_interrupts_capture(gt); - for_each_engine(engine, dev_priv, id) { + for_each_engine(engine, gt->i915, id) { engine->set_default_submission = guc_set_default_submission; engine->set_default_submission(engine); } @@ -1445,14 +1155,14 @@ int intel_guc_submission_enable(struct intel_guc *guc) void intel_guc_submission_disable(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); - GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */ + GEM_BUG_ON(gt->awake); /* GT should be parked first */ - guc_interrupts_release(dev_priv); + guc_interrupts_release(gt); guc_clients_disable(guc); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftests/intel_guc.c" +#include "selftest_guc.c" #endif diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h index 7d823a513b9c..87a38cb6faf3 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -58,11 +58,9 @@ struct drm_i915_private; struct intel_guc_client { struct i915_vma *vma; void *vaddr; - struct i915_gem_context *owner; struct intel_guc *guc; /* bitmap of (host) engine ids */ - u32 engines; u32 priority; u32 stage_id; u32 proc_desc_offset; diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index fb6f693d3cac..c9535caba844 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -24,12 +24,13 @@ #include <linux/types.h> +#include "gt/intel_gt.h" #include "intel_huc.h" #include "i915_drv.h" void intel_huc_init_early(struct intel_huc *huc) { - struct drm_i915_private *i915 = huc_to_i915(huc); + struct drm_i915_private *i915 = huc_to_gt(huc)->i915; intel_huc_fw_init_early(huc); @@ -44,19 +45,12 @@ void intel_huc_init_early(struct intel_huc *huc) } } -int intel_huc_init_misc(struct intel_huc *huc) -{ - struct drm_i915_private *i915 = huc_to_i915(huc); - - intel_uc_fw_fetch(i915, &huc->fw); - return 0; -} - static int intel_huc_rsa_data_create(struct intel_huc *huc) { - struct drm_i915_private *i915 = huc_to_i915(huc); - struct intel_guc *guc = &i915->guc; + struct intel_gt *gt = huc_to_gt(huc); + struct intel_guc *guc = >->uc.guc; struct i915_vma *vma; + size_t copied; void *vaddr; /* @@ -69,6 +63,7 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc) * the authentication since its GGTT offset will be GuC * accessible. */ + GEM_BUG_ON(huc->fw.rsa_size > PAGE_SIZE); vma = intel_guc_allocate_vma(guc, PAGE_SIZE); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -79,26 +74,43 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc) return PTR_ERR(vaddr); } + copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size); + GEM_BUG_ON(copied < huc->fw.rsa_size); + + i915_gem_object_unpin_map(vma->obj); + huc->rsa_data = vma; - huc->rsa_data_vaddr = vaddr; return 0; } static void intel_huc_rsa_data_destroy(struct intel_huc *huc) { - i915_vma_unpin_and_release(&huc->rsa_data, I915_VMA_RELEASE_MAP); + i915_vma_unpin_and_release(&huc->rsa_data, 0); } int intel_huc_init(struct intel_huc *huc) { int err; - err = intel_huc_rsa_data_create(huc); + err = intel_uc_fw_init(&huc->fw); if (err) return err; - return intel_uc_fw_init(&huc->fw); + /* + * HuC firmware image is outside GuC accessible range. + * Copy the RSA signature out of the image into + * a perma-pinned region set aside for it + */ + err = intel_huc_rsa_data_create(huc); + if (err) + goto out_fini; + + return 0; + +out_fini: + intel_uc_fw_fini(&huc->fw); + return err; } void intel_huc_fini(struct intel_huc *huc) @@ -120,12 +132,12 @@ void intel_huc_fini(struct intel_huc *huc) */ int intel_huc_auth(struct intel_huc *huc) { - struct drm_i915_private *i915 = huc_to_i915(huc); - struct intel_guc *guc = &i915->guc; + struct intel_gt *gt = huc_to_gt(huc); + struct intel_guc *guc = >->uc.guc; int ret; - if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) - return -ENOEXEC; + GEM_BUG_ON(!intel_uc_fw_is_loaded(&huc->fw)); + GEM_BUG_ON(intel_huc_is_authenticated(huc)); ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->rsa_data)); @@ -135,7 +147,7 @@ int intel_huc_auth(struct intel_huc *huc) } /* Check authentication status, it should be done by now */ - ret = __intel_wait_for_register(&i915->uncore, + ret = __intel_wait_for_register(gt->uncore, huc->status.reg, huc->status.mask, huc->status.value, @@ -145,10 +157,12 @@ int intel_huc_auth(struct intel_huc *huc) goto fail; } + huc->fw.status = INTEL_UC_FIRMWARE_RUNNING; + return 0; fail: - huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL; + huc->fw.status = INTEL_UC_FIRMWARE_FAIL; DRM_ERROR("HuC: Authentication failed %d\n", ret); return ret; @@ -167,16 +181,15 @@ fail: */ int intel_huc_check_status(struct intel_huc *huc) { - struct drm_i915_private *dev_priv = huc_to_i915(huc); + struct intel_gt *gt = huc_to_gt(huc); intel_wakeref_t wakeref; - bool status = false; + u32 status = 0; - if (!HAS_HUC(dev_priv)) + if (!intel_uc_is_using_huc(>->uc)) return -ENODEV; - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) - status = (I915_READ(huc->status.reg) & huc->status.mask) == - huc->status.value; + with_intel_runtime_pm(>->i915->runtime_pm, wakeref) + status = intel_uncore_read(gt->uncore, huc->status.reg); - return status; + return (status & huc->status.mask) == huc->status.value; } diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index 2a6c94e79f17..4465209ce233 100644 --- a/drivers/gpu/drm/i915/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -35,7 +35,6 @@ struct intel_huc { /* HuC-specific additions */ struct i915_vma *rsa_data; - void *rsa_data_vaddr; struct { i915_reg_t reg; @@ -45,21 +44,20 @@ struct intel_huc { }; void intel_huc_init_early(struct intel_huc *huc); -int intel_huc_init_misc(struct intel_huc *huc); int intel_huc_init(struct intel_huc *huc); void intel_huc_fini(struct intel_huc *huc); int intel_huc_auth(struct intel_huc *huc); int intel_huc_check_status(struct intel_huc *huc); -static inline void intel_huc_fini_misc(struct intel_huc *huc) -{ - intel_uc_fw_cleanup_fetch(&huc->fw); -} - static inline int intel_huc_sanitize(struct intel_huc *huc) { intel_uc_fw_sanitize(&huc->fw); return 0; } +static inline bool intel_huc_is_authenticated(struct intel_huc *huc) +{ + return intel_uc_fw_is_running(&huc->fw); +} + #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c new file mode 100644 index 000000000000..0e885859c828 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#include "gt/intel_gt.h" +#include "intel_huc_fw.h" +#include "i915_drv.h" + +/** + * DOC: HuC Firmware + * + * Motivation: + * GEN9 introduces a new dedicated firmware for usage in media HEVC (High + * Efficiency Video Coding) operations. Userspace can use the firmware + * capabilities by adding HuC specific commands to batch buffers. + * + * Implementation: + * The same firmware loader is used as the GuC. However, the actual + * loading to HW is deferred until GEM initialization is done. + * + * Note that HuC firmware loading must be done before GuC loading. + */ + +/** + * intel_huc_fw_init_early() - initializes HuC firmware struct + * @huc: intel_huc struct + * + * On platforms with HuC selects firmware for uploading + */ +void intel_huc_fw_init_early(struct intel_huc *huc) +{ + intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, huc_to_gt(huc)->i915); +} + +/** + * intel_huc_fw_upload() - load HuC uCode to device + * @huc: intel_huc structure + * + * Called from intel_uc_init_hw() during driver load, resume from sleep and + * after a GPU reset. Note that HuC must be loaded before GuC. + * + * The firmware image should have already been fetched into memory, so only + * check that fetch succeeded, and then transfer the image to the h/w. + * + * Return: non-zero code on error + */ +int intel_huc_fw_upload(struct intel_huc *huc) +{ + /* HW doesn't look at destination address for HuC, so set it to 0 */ + return intel_uc_fw_upload(&huc->fw, huc_to_gt(huc), 0, HUC_UKERNEL); +} diff --git a/drivers/gpu/drm/i915/intel_huc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h index 8a00a0ebddc5..8a00a0ebddc5 100644 --- a/drivers/gpu/drm/i915/intel_huc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c new file mode 100644 index 000000000000..6eb8bb3fa252 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -0,0 +1,570 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "gt/intel_gt.h" +#include "gt/intel_reset.h" +#include "intel_guc.h" +#include "intel_guc_ads.h" +#include "intel_guc_submission.h" +#include "intel_uc.h" + +#include "i915_drv.h" + +static void guc_free_load_err_log(struct intel_guc *guc); + +/* Reset GuC providing us with fresh state for both GuC and HuC. + */ +static int __intel_uc_reset_hw(struct intel_uc *uc) +{ + struct intel_gt *gt = uc_to_gt(uc); + int ret; + u32 guc_status; + + ret = intel_reset_guc(gt); + if (ret) { + DRM_ERROR("Failed to reset GuC, ret = %d\n", ret); + return ret; + } + + guc_status = intel_uncore_read(gt->uncore, GUC_STATUS); + WARN(!(guc_status & GS_MIA_IN_RESET), + "GuC status: 0x%x, MIA core expected to be in reset\n", + guc_status); + + return ret; +} + +static int __get_platform_enable_guc(struct intel_uc *uc) +{ + struct intel_uc_fw *guc_fw = &uc->guc.fw; + struct intel_uc_fw *huc_fw = &uc->huc.fw; + int enable_guc = 0; + + if (!HAS_GT_UC(uc_to_gt(uc)->i915)) + return 0; + + /* We don't want to enable GuC/HuC on pre-Gen11 by default */ + if (INTEL_GEN(uc_to_gt(uc)->i915) < 11) + return 0; + + if (intel_uc_fw_supported(guc_fw) && intel_uc_fw_supported(huc_fw)) + enable_guc |= ENABLE_GUC_LOAD_HUC; + + return enable_guc; +} + +/** + * sanitize_options_early - sanitize uC related modparam options + * @uc: the intel_uc structure + * + * In case of "enable_guc" option this function will attempt to modify + * it only if it was initially set to "auto(-1)". Default value for this + * modparam varies between platforms and it is hardcoded in driver code. + * Any other modparam value is only monitored against availability of the + * related hardware or firmware definitions. + */ +static void sanitize_options_early(struct intel_uc *uc) +{ + struct intel_uc_fw *guc_fw = &uc->guc.fw; + struct intel_uc_fw *huc_fw = &uc->huc.fw; + + /* A negative value means "use platform default" */ + if (i915_modparams.enable_guc < 0) + i915_modparams.enable_guc = __get_platform_enable_guc(uc); + + DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n", + i915_modparams.enable_guc, + yesno(intel_uc_is_using_guc_submission(uc)), + yesno(intel_uc_is_using_huc(uc))); + + /* Verify GuC firmware availability */ + if (intel_uc_is_using_guc(uc) && !intel_uc_fw_supported(guc_fw)) { + DRM_WARN("Incompatible option detected: enable_guc=%d, " + "but GuC is not supported!\n", + i915_modparams.enable_guc); + DRM_INFO("Disabling GuC/HuC loading!\n"); + i915_modparams.enable_guc = 0; + } + + /* Verify HuC firmware availability */ + if (intel_uc_is_using_huc(uc) && !intel_uc_fw_supported(huc_fw)) { + DRM_WARN("Incompatible option detected: enable_guc=%d, " + "but HuC is not supported!\n", + i915_modparams.enable_guc); + DRM_INFO("Disabling HuC loading!\n"); + i915_modparams.enable_guc &= ~ENABLE_GUC_LOAD_HUC; + } + + /* XXX: GuC submission is unavailable for now */ + if (intel_uc_is_using_guc_submission(uc)) { + DRM_INFO("Incompatible option detected: enable_guc=%d, " + "but GuC submission is not supported!\n", + i915_modparams.enable_guc); + DRM_INFO("Switching to non-GuC submission mode!\n"); + i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION; + } + + /* Make sure that sanitization was done */ + GEM_BUG_ON(i915_modparams.enable_guc < 0); +} + +void intel_uc_init_early(struct intel_uc *uc) +{ + intel_guc_init_early(&uc->guc); + intel_huc_init_early(&uc->huc); + + sanitize_options_early(uc); +} + +void intel_uc_cleanup_early(struct intel_uc *uc) +{ + guc_free_load_err_log(&uc->guc); +} + +/** + * intel_uc_init_mmio - setup uC MMIO access + * @uc: the intel_uc structure + * + * Setup minimal state necessary for MMIO accesses later in the + * initialization sequence. + */ +void intel_uc_init_mmio(struct intel_uc *uc) +{ + intel_guc_init_send_regs(&uc->guc); +} + +static void guc_capture_load_err_log(struct intel_guc *guc) +{ + if (!guc->log.vma || !intel_guc_log_get_level(&guc->log)) + return; + + if (!guc->load_err_log) + guc->load_err_log = i915_gem_object_get(guc->log.vma->obj); + + return; +} + +static void guc_free_load_err_log(struct intel_guc *guc) +{ + if (guc->load_err_log) + i915_gem_object_put(guc->load_err_log); +} + +/* + * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 + * register using the same bits used in the CT message payload. Since our + * communication channel with guc is turned off at this point, we can save the + * message and handle it after we turn it back on. + */ +static void guc_clear_mmio_msg(struct intel_guc *guc) +{ + intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0); +} + +static void guc_get_mmio_msg(struct intel_guc *guc) +{ + u32 val; + + spin_lock_irq(&guc->irq_lock); + + val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15)); + guc->mmio_msg |= val & guc->msg_enabled_mask; + + /* + * clear all events, including the ones we're not currently servicing, + * to make sure we don't try to process a stale message if we enable + * handling of more events later. + */ + guc_clear_mmio_msg(guc); + + spin_unlock_irq(&guc->irq_lock); +} + +static void guc_handle_mmio_msg(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + + /* we need communication to be enabled to reply to GuC */ + GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop); + + if (!guc->mmio_msg) + return; + + spin_lock_irq(&i915->irq_lock); + intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1); + spin_unlock_irq(&i915->irq_lock); + + guc->mmio_msg = 0; +} + +static void guc_reset_interrupts(struct intel_guc *guc) +{ + guc->interrupts.reset(guc); +} + +static void guc_enable_interrupts(struct intel_guc *guc) +{ + guc->interrupts.enable(guc); +} + +static void guc_disable_interrupts(struct intel_guc *guc) +{ + guc->interrupts.disable(guc); +} + +static int guc_enable_communication(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + int ret; + + ret = intel_guc_ct_enable(&guc->ct); + if (ret) + return ret; + + guc->send = intel_guc_send_ct; + guc->handler = intel_guc_to_host_event_handler_ct; + + /* check for mmio messages received before/during the CT enable */ + guc_get_mmio_msg(guc); + guc_handle_mmio_msg(guc); + + guc_enable_interrupts(guc); + + /* check for CT messages received before we enabled interrupts */ + spin_lock_irq(&i915->irq_lock); + intel_guc_to_host_event_handler_ct(guc); + spin_unlock_irq(&i915->irq_lock); + + DRM_INFO("GuC communication enabled\n"); + + return 0; +} + +static void guc_stop_communication(struct intel_guc *guc) +{ + intel_guc_ct_stop(&guc->ct); + + guc->send = intel_guc_send_nop; + guc->handler = intel_guc_to_host_event_handler_nop; + + guc_clear_mmio_msg(guc); +} + +static void guc_disable_communication(struct intel_guc *guc) +{ + /* + * Events generated during or after CT disable are logged by guc in + * via mmio. Make sure the register is clear before disabling CT since + * all events we cared about have already been processed via CT. + */ + guc_clear_mmio_msg(guc); + + guc_disable_interrupts(guc); + + guc->send = intel_guc_send_nop; + guc->handler = intel_guc_to_host_event_handler_nop; + + intel_guc_ct_disable(&guc->ct); + + /* + * Check for messages received during/after the CT disable. We do not + * expect any messages to have arrived via CT between the interrupt + * disable and the CT disable because GuC should've been idle until we + * triggered the CT disable protocol. + */ + guc_get_mmio_msg(guc); + + DRM_INFO("GuC communication disabled\n"); +} + +void intel_uc_fetch_firmwares(struct intel_uc *uc) +{ + struct drm_i915_private *i915 = uc_to_gt(uc)->i915; + + if (!intel_uc_is_using_guc(uc)) + return; + + intel_uc_fw_fetch(&uc->guc.fw, i915); + + if (intel_uc_is_using_huc(uc)) + intel_uc_fw_fetch(&uc->huc.fw, i915); +} + +void intel_uc_cleanup_firmwares(struct intel_uc *uc) +{ + if (!intel_uc_is_using_guc(uc)) + return; + + if (intel_uc_is_using_huc(uc)) + intel_uc_fw_cleanup_fetch(&uc->huc.fw); + + intel_uc_fw_cleanup_fetch(&uc->guc.fw); +} + +int intel_uc_init(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + struct intel_huc *huc = &uc->huc; + int ret; + + if (!intel_uc_is_using_guc(uc)) + return 0; + + if (!intel_uc_fw_supported(&guc->fw)) + return -ENODEV; + + /* XXX: GuC submission is unavailable for now */ + GEM_BUG_ON(intel_uc_is_using_guc_submission(uc)); + + ret = intel_guc_init(guc); + if (ret) + return ret; + + if (intel_uc_is_using_huc(uc)) { + ret = intel_huc_init(huc); + if (ret) + goto err_guc; + } + + return 0; + +err_guc: + intel_guc_fini(guc); + return ret; +} + +void intel_uc_fini(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + + if (!intel_uc_is_using_guc(uc)) + return; + + GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); + + if (intel_uc_is_using_huc(uc)) + intel_huc_fini(&uc->huc); + + intel_guc_fini(guc); +} + +static void __uc_sanitize(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + struct intel_huc *huc = &uc->huc; + + GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); + + intel_huc_sanitize(huc); + intel_guc_sanitize(guc); + + __intel_uc_reset_hw(uc); +} + +void intel_uc_sanitize(struct intel_uc *uc) +{ + if (!intel_uc_is_using_guc(uc)) + return; + + __uc_sanitize(uc); +} + +int intel_uc_init_hw(struct intel_uc *uc) +{ + struct drm_i915_private *i915 = uc_to_gt(uc)->i915; + struct intel_guc *guc = &uc->guc; + struct intel_huc *huc = &uc->huc; + int ret, attempts; + + if (!intel_uc_is_using_guc(uc)) + return 0; + + GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); + + guc_reset_interrupts(guc); + + /* WaEnableuKernelHeaderValidFix:skl */ + /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ + if (IS_GEN(i915, 9)) + attempts = 3; + else + attempts = 1; + + while (attempts--) { + /* + * Always reset the GuC just before (re)loading, so + * that the state and timing are fairly predictable + */ + ret = __intel_uc_reset_hw(uc); + if (ret) + goto err_out; + + if (intel_uc_is_using_huc(uc)) { + ret = intel_huc_fw_upload(huc); + if (ret && intel_uc_fw_is_overridden(&huc->fw)) + goto err_out; + } + + intel_guc_ads_reset(guc); + intel_guc_write_params(guc); + ret = intel_guc_fw_upload(guc); + if (ret == 0) + break; + + DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and " + "retry %d more time(s)\n", ret, attempts); + } + + /* Did we succeded or run out of retries? */ + if (ret) + goto err_log_capture; + + ret = guc_enable_communication(guc); + if (ret) + goto err_log_capture; + + if (intel_uc_fw_is_loaded(&huc->fw)) { + ret = intel_huc_auth(huc); + if (ret && intel_uc_fw_is_overridden(&huc->fw)) + goto err_communication; + } + + ret = intel_guc_sample_forcewake(guc); + if (ret) + goto err_communication; + + if (intel_uc_is_using_guc_submission(uc)) { + ret = intel_guc_submission_enable(guc); + if (ret) + goto err_communication; + } + + dev_info(i915->drm.dev, "GuC firmware version %u.%u\n", + guc->fw.major_ver_found, guc->fw.minor_ver_found); + dev_info(i915->drm.dev, "GuC submission %s\n", + enableddisabled(intel_uc_is_using_guc_submission(uc))); + dev_info(i915->drm.dev, "HuC %s\n", + enableddisabled(intel_huc_is_authenticated(huc))); + + return 0; + + /* + * We've failed to load the firmware :( + */ +err_communication: + guc_disable_communication(guc); +err_log_capture: + guc_capture_load_err_log(guc); +err_out: + __uc_sanitize(uc); + + /* + * Note that there is no fallback as either user explicitly asked for + * the GuC or driver default option was to run with the GuC enabled. + */ + if (GEM_WARN_ON(ret == -EIO)) + ret = -EINVAL; + + dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret); + return ret; +} + +void intel_uc_fini_hw(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + + if (!intel_guc_is_running(guc)) + return; + + GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); + + if (intel_uc_is_using_guc_submission(uc)) + intel_guc_submission_disable(guc); + + guc_disable_communication(guc); + __uc_sanitize(uc); +} + +/** + * intel_uc_reset_prepare - Prepare for reset + * @uc: the intel_uc structure + * + * Preparing for full gpu reset. + */ +void intel_uc_reset_prepare(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + + if (!intel_guc_is_running(guc)) + return; + + guc_stop_communication(guc); + __uc_sanitize(uc); +} + +void intel_uc_runtime_suspend(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + int err; + + if (!intel_guc_is_running(guc)) + return; + + err = intel_guc_suspend(guc); + if (err) + DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err); + + guc_disable_communication(guc); +} + +void intel_uc_suspend(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + intel_wakeref_t wakeref; + + if (!intel_guc_is_running(guc)) + return; + + with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) + intel_uc_runtime_suspend(uc); +} + +int intel_uc_resume(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + int err; + + if (!intel_guc_is_running(guc)) + return 0; + + guc_enable_communication(guc); + + err = intel_guc_resume(guc); + if (err) { + DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err); + return err; + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index 3ea06c87dfcd..fe3362fd7706 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -28,34 +28,39 @@ #include "intel_huc.h" #include "i915_params.h" -void intel_uc_init_early(struct drm_i915_private *dev_priv); -void intel_uc_cleanup_early(struct drm_i915_private *dev_priv); -void intel_uc_init_mmio(struct drm_i915_private *dev_priv); -int intel_uc_init_misc(struct drm_i915_private *dev_priv); -void intel_uc_fini_misc(struct drm_i915_private *dev_priv); -void intel_uc_sanitize(struct drm_i915_private *dev_priv); -int intel_uc_init_hw(struct drm_i915_private *dev_priv); -void intel_uc_fini_hw(struct drm_i915_private *dev_priv); -int intel_uc_init(struct drm_i915_private *dev_priv); -void intel_uc_fini(struct drm_i915_private *dev_priv); -void intel_uc_reset_prepare(struct drm_i915_private *i915); -void intel_uc_suspend(struct drm_i915_private *i915); -void intel_uc_runtime_suspend(struct drm_i915_private *i915); -int intel_uc_resume(struct drm_i915_private *dev_priv); +struct intel_uc { + struct intel_guc guc; + struct intel_huc huc; +}; -static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915) +void intel_uc_init_early(struct intel_uc *uc); +void intel_uc_cleanup_early(struct intel_uc *uc); +void intel_uc_init_mmio(struct intel_uc *uc); +void intel_uc_fetch_firmwares(struct intel_uc *uc); +void intel_uc_cleanup_firmwares(struct intel_uc *uc); +void intel_uc_sanitize(struct intel_uc *uc); +int intel_uc_init_hw(struct intel_uc *uc); +void intel_uc_fini_hw(struct intel_uc *uc); +int intel_uc_init(struct intel_uc *uc); +void intel_uc_fini(struct intel_uc *uc); +void intel_uc_reset_prepare(struct intel_uc *uc); +void intel_uc_suspend(struct intel_uc *uc); +void intel_uc_runtime_suspend(struct intel_uc *uc); +int intel_uc_resume(struct intel_uc *uc); + +static inline bool intel_uc_is_using_guc(struct intel_uc *uc) { GEM_BUG_ON(i915_modparams.enable_guc < 0); return i915_modparams.enable_guc > 0; } -static inline bool intel_uc_is_using_guc_submission(struct drm_i915_private *i915) +static inline bool intel_uc_is_using_guc_submission(struct intel_uc *uc) { GEM_BUG_ON(i915_modparams.enable_guc < 0); return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION; } -static inline bool intel_uc_is_using_huc(struct drm_i915_private *i915) +static inline bool intel_uc_is_using_huc(struct intel_uc *uc) { GEM_BUG_ON(i915_modparams.enable_guc < 0); return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c new file mode 100644 index 000000000000..ac91e3efd02b --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -0,0 +1,540 @@ +/* + * Copyright © 2016-2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/bitfield.h> +#include <linux/firmware.h> +#include <drm/drm_print.h> + +#include "intel_uc_fw.h" +#include "intel_uc_fw_abi.h" +#include "i915_drv.h" + +/* + * List of required GuC and HuC binaries per-platform. + * Must be ordered based on platform + revid, from newer to older. + */ +#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ + fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 8, 4, 3238)) \ + fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ + fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 03, 01, 2893)) \ + fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ + fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 01, 8, 2893)) \ + fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 01, 07, 1398)) + +#define __MAKE_UC_FW_PATH(prefix_, name_, separator_, major_, minor_, patch_) \ + "i915/" \ + __stringify(prefix_) name_ \ + __stringify(major_) separator_ \ + __stringify(minor_) separator_ \ + __stringify(patch_) ".bin" + +#define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \ + __MAKE_UC_FW_PATH(prefix_, "_guc_", ".", major_, minor_, patch_) + +#define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \ + __MAKE_UC_FW_PATH(prefix_, "_huc_ver", "_", major_, minor_, bld_num_) + +/* All blobs need to be declared via MODULE_FIRMWARE() */ +#define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \ + MODULE_FIRMWARE(guc_); \ + MODULE_FIRMWARE(huc_); + +INTEL_UC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH, MAKE_HUC_FW_PATH) + +/* The below structs and macros are used to iterate across the list of blobs */ +struct __packed uc_fw_blob { + u8 major; + u8 minor; + const char *path; +}; + +#define UC_FW_BLOB(major_, minor_, path_) \ + { .major = major_, .minor = minor_, .path = path_ } + +#define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \ + UC_FW_BLOB(major_, minor_, \ + MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_)) + +#define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \ + UC_FW_BLOB(major_, minor_, \ + MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_)) + +struct __packed uc_fw_platform_requirement { + enum intel_platform p; + u8 rev; /* first platform rev using this FW */ + const struct uc_fw_blob blobs[INTEL_UC_FW_NUM_TYPES]; +}; + +#define MAKE_FW_LIST(platform_, revid_, guc_, huc_) \ +{ \ + .p = INTEL_##platform_, \ + .rev = revid_, \ + .blobs[INTEL_UC_FW_TYPE_GUC] = guc_, \ + .blobs[INTEL_UC_FW_TYPE_HUC] = huc_, \ +}, + +static void +__uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev) +{ + static const struct uc_fw_platform_requirement fw_blobs[] = { + INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, HUC_FW_BLOB) + }; + int i; + + for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) { + if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) { + const struct uc_fw_blob *blob = + &fw_blobs[i].blobs[uc_fw->type]; + uc_fw->path = blob->path; + uc_fw->major_ver_wanted = blob->major; + uc_fw->minor_ver_wanted = blob->minor; + break; + } + } + + /* make sure the list is ordered as expected */ + if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) { + for (i = 1; i < ARRAY_SIZE(fw_blobs); i++) { + if (fw_blobs[i].p < fw_blobs[i - 1].p) + continue; + + if (fw_blobs[i].p == fw_blobs[i - 1].p && + fw_blobs[i].rev < fw_blobs[i - 1].rev) + continue; + + pr_err("invalid FW blob order: %s r%u comes before %s r%u\n", + intel_platform_name(fw_blobs[i - 1].p), + fw_blobs[i - 1].rev, + intel_platform_name(fw_blobs[i].p), + fw_blobs[i].rev); + + uc_fw->path = NULL; + } + } +} + +static bool +__uc_fw_override(struct intel_uc_fw *uc_fw) +{ + switch (uc_fw->type) { + case INTEL_UC_FW_TYPE_GUC: + uc_fw->path = i915_modparams.guc_firmware_path; + break; + case INTEL_UC_FW_TYPE_HUC: + uc_fw->path = i915_modparams.huc_firmware_path; + break; + } + + uc_fw->user_overridden = uc_fw->path; + return uc_fw->user_overridden; +} + +/** + * intel_uc_fw_init_early - initialize the uC object and select the firmware + * @i915: device private + * @uc_fw: uC firmware + * @type: type of uC + * + * Initialize the state of our uC object and relevant tracking and select the + * firmware to fetch and load. + */ +void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, + enum intel_uc_fw_type type, + struct drm_i915_private *i915) +{ + /* + * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status + * before we're looked at the HW caps to see if we have uc support + */ + BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED); + GEM_BUG_ON(uc_fw->status); + GEM_BUG_ON(uc_fw->path); + + uc_fw->type = type; + + if (HAS_GT_UC(i915) && likely(!__uc_fw_override(uc_fw))) + __uc_fw_auto_select(uc_fw, INTEL_INFO(i915)->platform, + INTEL_REVID(i915)); + + if (uc_fw->path && *uc_fw->path) + uc_fw->status = INTEL_UC_FIRMWARE_SELECTED; + else + uc_fw->status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; +} + +/** + * intel_uc_fw_fetch - fetch uC firmware + * + * @uc_fw: uC firmware + * @i915: device private + * + * Fetch uC firmware into GEM obj. + */ +void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915) +{ + struct drm_i915_gem_object *obj; + const struct firmware *fw = NULL; + struct uc_css_header *css; + size_t size; + int err; + + GEM_BUG_ON(!intel_uc_fw_supported(uc_fw)); + + err = request_firmware(&fw, uc_fw->path, i915->drm.dev); + if (err) + goto fail; + + DRM_DEBUG_DRIVER("%s fw size %zu ptr %p\n", + intel_uc_fw_type_repr(uc_fw->type), fw->size, fw); + + /* Check the size of the blob before examining buffer contents */ + if (fw->size < sizeof(struct uc_css_header)) { + DRM_WARN("%s: Unexpected firmware size (%zu, min %zu)\n", + intel_uc_fw_type_repr(uc_fw->type), + fw->size, sizeof(struct uc_css_header)); + err = -ENODATA; + goto fail; + } + + css = (struct uc_css_header *)fw->data; + + /* Check integrity of size values inside CSS header */ + size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw - + css->exponent_size_dw) * sizeof(u32); + if (size != sizeof(struct uc_css_header)) { + DRM_WARN("%s: Mismatched firmware header definition\n", + intel_uc_fw_type_repr(uc_fw->type)); + err = -ENOEXEC; + goto fail; + } + + /* uCode size must calculated from other sizes */ + uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32); + + /* now RSA */ + if (css->key_size_dw != UOS_RSA_SCRATCH_COUNT) { + DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n", + intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw); + err = -ENOEXEC; + goto fail; + } + uc_fw->rsa_size = css->key_size_dw * sizeof(u32); + + /* At least, it should have header, uCode and RSA. Size of all three. */ + size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size; + if (fw->size < size) { + DRM_WARN("%s: Truncated firmware (%zu, expected %zu)\n", + intel_uc_fw_type_repr(uc_fw->type), fw->size, size); + err = -ENOEXEC; + goto fail; + } + + /* Get version numbers from the CSS header */ + switch (uc_fw->type) { + case INTEL_UC_FW_TYPE_GUC: + uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR, + css->sw_version); + uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR, + css->sw_version); + break; + + case INTEL_UC_FW_TYPE_HUC: + uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR, + css->sw_version); + uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR, + css->sw_version); + break; + + default: + MISSING_CASE(uc_fw->type); + break; + } + + DRM_DEBUG_DRIVER("%s fw version %u.%u (wanted %u.%u)\n", + intel_uc_fw_type_repr(uc_fw->type), + uc_fw->major_ver_found, uc_fw->minor_ver_found, + uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); + + if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) { + DRM_NOTE("%s: Skipping firmware version check\n", + intel_uc_fw_type_repr(uc_fw->type)); + } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted || + uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) { + DRM_NOTE("%s: Wrong firmware version (%u.%u, required %u.%u)\n", + intel_uc_fw_type_repr(uc_fw->type), + uc_fw->major_ver_found, uc_fw->minor_ver_found, + uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); + err = -ENOEXEC; + goto fail; + } + + obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + DRM_DEBUG_DRIVER("%s fw object_create err=%d\n", + intel_uc_fw_type_repr(uc_fw->type), err); + goto fail; + } + + uc_fw->obj = obj; + uc_fw->size = fw->size; + uc_fw->status = INTEL_UC_FIRMWARE_AVAILABLE; + + release_firmware(fw); + return; + +fail: + uc_fw->status = INTEL_UC_FIRMWARE_MISSING; + + DRM_WARN("%s: Failed to fetch firmware %s (error %d)\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); + DRM_INFO("%s: Firmware can be downloaded from %s\n", + intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL); + + release_firmware(fw); /* OK even if fw is NULL */ +} + +static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt) +{ + struct drm_mm_node *node = &ggtt->uc_fw; + + GEM_BUG_ON(!node->allocated); + GEM_BUG_ON(upper_32_bits(node->start)); + GEM_BUG_ON(upper_32_bits(node->start + node->size - 1)); + + return lower_32_bits(node->start); +} + +static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw, + struct intel_gt *gt) +{ + struct drm_i915_gem_object *obj = uc_fw->obj; + struct i915_ggtt *ggtt = gt->ggtt; + struct i915_vma dummy = { + .node.start = uc_fw_ggtt_offset(uc_fw, ggtt), + .node.size = obj->base.size, + .pages = obj->mm.pages, + .vm = &ggtt->vm, + }; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size); + + /* uc_fw->obj cache domains were not controlled across suspend */ + drm_clflush_sg(dummy.pages); + + ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0); +} + +static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw, + struct intel_gt *gt) +{ + struct drm_i915_gem_object *obj = uc_fw->obj; + struct i915_ggtt *ggtt = gt->ggtt; + u64 start = uc_fw_ggtt_offset(uc_fw, ggtt); + + ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size); +} + +static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt, + u32 wopcm_offset, u32 dma_flags) +{ + struct intel_uncore *uncore = gt->uncore; + u64 offset; + int ret; + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + /* Set the source address for the uCode */ + offset = uc_fw_ggtt_offset(uc_fw, gt->ggtt); + GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000); + intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset)); + intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset)); + + /* Set the DMA destination */ + intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, wopcm_offset); + intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); + + /* + * Set the transfer size. The header plus uCode will be copied to WOPCM + * via DMA, excluding any other components + */ + intel_uncore_write_fw(uncore, DMA_COPY_SIZE, + sizeof(struct uc_css_header) + uc_fw->ucode_size); + + /* Start the DMA */ + intel_uncore_write_fw(uncore, DMA_CTRL, + _MASKED_BIT_ENABLE(dma_flags | START_DMA)); + + /* Wait for DMA to finish */ + ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100); + if (ret) + dev_err(gt->i915->drm.dev, "DMA for %s fw failed, DMA_CTRL=%u\n", + intel_uc_fw_type_repr(uc_fw->type), + intel_uncore_read_fw(uncore, DMA_CTRL)); + + /* Disable the bits once DMA is over */ + intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags)); + + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + + return ret; +} + +/** + * intel_uc_fw_upload - load uC firmware using custom loader + * @uc_fw: uC firmware + * @gt: the intel_gt structure + * @wopcm_offset: destination offset in wopcm + * @dma_flags: flags for flags for dma ctrl + * + * Loads uC firmware and updates internal flags. + * + * Return: 0 on success, non-zero on failure. + */ +int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, + u32 wopcm_offset, u32 dma_flags) +{ + int err; + + DRM_DEBUG_DRIVER("%s fw load %s\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); + + /* make sure the status was cleared the last time we reset the uc */ + GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw)); + + if (!intel_uc_fw_is_available(uc_fw)) + return -ENOEXEC; + /* Call custom loader */ + intel_uc_fw_ggtt_bind(uc_fw, gt); + err = uc_fw_xfer(uc_fw, gt, wopcm_offset, dma_flags); + intel_uc_fw_ggtt_unbind(uc_fw, gt); + if (err) + goto fail; + + uc_fw->status = INTEL_UC_FIRMWARE_TRANSFERRED; + DRM_DEBUG_DRIVER("%s fw xfer completed\n", + intel_uc_fw_type_repr(uc_fw->type)); + + DRM_INFO("%s: Loaded firmware %s (version %u.%u)\n", + intel_uc_fw_type_repr(uc_fw->type), + uc_fw->path, + uc_fw->major_ver_found, uc_fw->minor_ver_found); + + return 0; + +fail: + uc_fw->status = INTEL_UC_FIRMWARE_FAIL; + DRM_DEBUG_DRIVER("%s fw load failed\n", + intel_uc_fw_type_repr(uc_fw->type)); + + DRM_WARN("%s: Failed to load firmware %s (error %d)\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); + + return err; +} + +int intel_uc_fw_init(struct intel_uc_fw *uc_fw) +{ + int err; + + /* this should happen before the load! */ + GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw)); + + if (!intel_uc_fw_is_available(uc_fw)) + return -ENOEXEC; + + err = i915_gem_object_pin_pages(uc_fw->obj); + if (err) + DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n", + intel_uc_fw_type_repr(uc_fw->type), err); + + return err; +} + +void intel_uc_fw_fini(struct intel_uc_fw *uc_fw) +{ + if (!intel_uc_fw_is_available(uc_fw)) + return; + + i915_gem_object_unpin_pages(uc_fw->obj); +} + +/** + * intel_uc_fw_cleanup_fetch - cleanup uC firmware + * + * @uc_fw: uC firmware + * + * Cleans up uC firmware by releasing the firmware GEM obj. + */ +void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw) +{ + struct drm_i915_gem_object *obj; + + obj = fetch_and_zero(&uc_fw->obj); + if (obj) + i915_gem_object_put(obj); + + uc_fw->status = INTEL_UC_FIRMWARE_SELECTED; +} + +/** + * intel_uc_fw_copy_rsa - copy fw RSA to buffer + * + * @uc_fw: uC firmware + * @dst: dst buffer + * @max_len: max number of bytes to copy + * + * Return: number of copied bytes. + */ +size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len) +{ + struct sg_table *pages = uc_fw->obj->mm.pages; + u32 size = min_t(u32, uc_fw->rsa_size, max_len); + u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size; + + GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw)); + + return sg_pcopy_to_buffer(pages->sgl, pages->nents, dst, size, offset); +} + +/** + * intel_uc_fw_dump - dump information about uC firmware + * @uc_fw: uC firmware + * @p: the &drm_printer + * + * Pretty printer for uC firmware. + */ +void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p) +{ + drm_printf(p, "%s firmware: %s\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); + drm_printf(p, "\tstatus: %s\n", + intel_uc_fw_status_repr(uc_fw->status)); + drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n", + uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted, + uc_fw->major_ver_found, uc_fw->minor_ver_found); + drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size); + drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size); +} diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index ff98f8661d72..6b64b8073703 100644 --- a/drivers/gpu/drm/i915/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -25,34 +25,45 @@ #ifndef _INTEL_UC_FW_H_ #define _INTEL_UC_FW_H_ +#include <linux/types.h> +#include "intel_uc_fw_abi.h" +#include "i915_gem.h" + struct drm_printer; struct drm_i915_private; +struct intel_gt; /* Home of GuC, HuC and DMC firmwares */ #define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915" enum intel_uc_fw_status { - INTEL_UC_FIRMWARE_FAIL = -1, - INTEL_UC_FIRMWARE_NONE = 0, - INTEL_UC_FIRMWARE_PENDING, - INTEL_UC_FIRMWARE_SUCCESS + INTEL_UC_FIRMWARE_FAIL = -3, /* failed to xfer or init/auth the fw */ + INTEL_UC_FIRMWARE_MISSING = -2, /* blob not found on the system */ + INTEL_UC_FIRMWARE_NOT_SUPPORTED = -1, /* no uc HW */ + INTEL_UC_FIRMWARE_UNINITIALIZED = 0, /* used to catch checks done too early */ + INTEL_UC_FIRMWARE_SELECTED, /* selected the blob we want to load */ + INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */ + INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */ + INTEL_UC_FIRMWARE_RUNNING /* init/auth done */ }; enum intel_uc_fw_type { - INTEL_UC_FW_TYPE_GUC, + INTEL_UC_FW_TYPE_GUC = 0, INTEL_UC_FW_TYPE_HUC }; +#define INTEL_UC_FW_NUM_TYPES 2 /* * This structure encapsulates all the data needed during the process * of fetching, caching, and loading the firmware image into the uC. */ struct intel_uc_fw { + enum intel_uc_fw_type type; + enum intel_uc_fw_status status; const char *path; + bool user_overridden; size_t size; struct drm_i915_gem_object *obj; - enum intel_uc_fw_status fetch_status; - enum intel_uc_fw_status load_status; /* * The firmware build process will generate a version header file with major and @@ -64,13 +75,8 @@ struct intel_uc_fw { u16 major_ver_found; u16 minor_ver_found; - enum intel_uc_fw_type type; - u32 header_size; - u32 header_offset; u32 rsa_size; - u32 rsa_offset; u32 ucode_size; - u32 ucode_offset; }; static inline @@ -79,12 +85,20 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status) switch (status) { case INTEL_UC_FIRMWARE_FAIL: return "FAIL"; - case INTEL_UC_FIRMWARE_NONE: - return "NONE"; - case INTEL_UC_FIRMWARE_PENDING: - return "PENDING"; - case INTEL_UC_FIRMWARE_SUCCESS: - return "SUCCESS"; + case INTEL_UC_FIRMWARE_MISSING: + return "MISSING"; + case INTEL_UC_FIRMWARE_NOT_SUPPORTED: + return "N/A"; + case INTEL_UC_FIRMWARE_UNINITIALIZED: + return "UNINITIALIZED"; + case INTEL_UC_FIRMWARE_SELECTED: + return "SELECTED"; + case INTEL_UC_FIRMWARE_AVAILABLE: + return "AVAILABLE"; + case INTEL_UC_FIRMWARE_TRANSFERRED: + return "TRANSFERRED"; + case INTEL_UC_FIRMWARE_RUNNING: + return "RUNNING"; } return "<invalid>"; } @@ -100,30 +114,43 @@ static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type) return "uC"; } -static inline -void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, - enum intel_uc_fw_type type) +static inline enum intel_uc_fw_status +__intel_uc_fw_status(struct intel_uc_fw *uc_fw) { - uc_fw->path = NULL; - uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE; - uc_fw->load_status = INTEL_UC_FIRMWARE_NONE; - uc_fw->type = type; + /* shouldn't call this before checking hw/blob availability */ + GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED); + return uc_fw->status; } -static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw) +static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw) { - return uc_fw->path != NULL; + return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE; } static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw) { - return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS; + return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_TRANSFERRED; +} + +static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw) +{ + return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING; +} + +static inline bool intel_uc_fw_supported(struct intel_uc_fw *uc_fw) +{ + return __intel_uc_fw_status(uc_fw) != INTEL_UC_FIRMWARE_NOT_SUPPORTED; +} + +static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw) +{ + return uc_fw->user_overridden; } static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) { if (intel_uc_fw_is_loaded(uc_fw)) - uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING; + uc_fw->status = INTEL_UC_FIRMWARE_AVAILABLE; } /** @@ -136,20 +163,23 @@ static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) */ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) { - if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) + if (!intel_uc_fw_is_available(uc_fw)) return 0; - return uc_fw->header_size + uc_fw->ucode_size; + return sizeof(struct uc_css_header) + uc_fw->ucode_size; } -void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, - struct intel_uc_fw *uc_fw); +void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, + enum intel_uc_fw_type type, + struct drm_i915_private *i915); +void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, + struct drm_i915_private *i915); void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw); -int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, - int (*xfer)(struct intel_uc_fw *uc_fw)); +int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, + u32 wopcm_offset, u32 dma_flags); int intel_uc_fw_init(struct intel_uc_fw *uc_fw); void intel_uc_fw_fini(struct intel_uc_fw *uc_fw); -u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw); +size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len); void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h new file mode 100644 index 000000000000..ae58e8a8c53b --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef _INTEL_UC_FW_ABI_H +#define _INTEL_UC_FW_ABI_H + +#include <linux/types.h> +#include <linux/build_bug.h> + +/** + * DOC: Firmware Layout + * + * The GuC/HuC firmware layout looks like this:: + * + * +======================================================================+ + * | Firmware blob | + * +===============+===============+============+============+============+ + * | CSS header | uCode | RSA key | modulus | exponent | + * +===============+===============+============+============+============+ + * <-header size-> <---header size continued -----------> + * <--- size -----------------------------------------------------------> + * <-key size-> + * <-mod size-> + * <-exp size-> + * + * The firmware may or may not have modulus key and exponent data. The header, + * uCode and RSA signature are must-have components that will be used by driver. + * Length of each components, which is all in dwords, can be found in header. + * In the case that modulus and exponent are not present in fw, a.k.a truncated + * image, the length value still appears in header. + * + * Driver will do some basic fw size validation based on the following rules: + * + * 1. Header, uCode and RSA are must-have components. + * 2. All firmware components, if they present, are in the sequence illustrated + * in the layout table above. + * 3. Length info of each component can be found in header, in dwords. + * 4. Modulus and exponent key are not required by driver. They may not appear + * in fw. So driver will load a truncated firmware in this case. + * + * The only difference between GuC and HuC firmwares is how the version + * information is saved. + */ + +struct uc_css_header { + u32 module_type; + /* + * header_size includes all non-uCode bits, including css_header, rsa + * key, modulus key and exponent data. + */ + u32 header_size_dw; + u32 header_version; + u32 module_id; + u32 module_vendor; + u32 date; +#define CSS_DATE_DAY (0xFF << 0) +#define CSS_DATE_MONTH (0xFF << 8) +#define CSS_DATE_YEAR (0xFFFF << 16) + u32 size_dw; /* uCode plus header_size_dw */ + u32 key_size_dw; + u32 modulus_size_dw; + u32 exponent_size_dw; + u32 time; +#define CSS_TIME_HOUR (0xFF << 0) +#define CSS_DATE_MIN (0xFF << 8) +#define CSS_DATE_SEC (0xFFFF << 16) + char username[8]; + char buildnumber[12]; + u32 sw_version; +#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16) +#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8) +#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0) +#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16) +#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0) + u32 reserved[14]; + u32 header_info; +} __packed; +static_assert(sizeof(struct uc_css_header) == 128); + +#endif /* _INTEL_UC_FW_ABI_H */ diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c index 6ca8584cd64c..371f7a60c987 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -103,17 +103,9 @@ static int ring_doorbell_nop(struct intel_guc_client *client) /* * Basic client sanity check, handy to validate create_clients. */ -static int validate_client(struct intel_guc_client *client, - int client_priority, - bool is_preempt_client) +static int validate_client(struct intel_guc_client *client, int client_priority) { - struct drm_i915_private *dev_priv = guc_to_i915(client->guc); - struct i915_gem_context *ctx_owner = is_preempt_client ? - dev_priv->preempt_context : dev_priv->kernel_context; - - if (client->owner != ctx_owner || - client->engines != INTEL_INFO(dev_priv)->engine_mask || - client->priority != client_priority || + if (client->priority != client_priority || client->doorbell_id == GUC_DOORBELL_INVALID) return -EINVAL; else @@ -142,11 +134,11 @@ static int igt_guc_clients(void *args) struct intel_guc *guc; int err = 0; - GEM_BUG_ON(!HAS_GUC(dev_priv)); + GEM_BUG_ON(!HAS_GT_UC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - guc = &dev_priv->guc; + guc = &dev_priv->gt.uc.guc; if (!guc) { pr_err("No guc object!\n"); err = -EINVAL; @@ -163,7 +155,7 @@ static int igt_guc_clients(void *args) */ guc_clients_disable(guc); guc_clients_destroy(guc); - if (guc->execbuf_client || guc->preempt_client) { + if (guc->execbuf_client) { pr_err("guc_clients_destroy lied!\n"); err = -EINVAL; goto unlock; @@ -177,24 +169,14 @@ static int igt_guc_clients(void *args) GEM_BUG_ON(!guc->execbuf_client); err = validate_client(guc->execbuf_client, - GUC_CLIENT_PRIORITY_KMD_NORMAL, false); + GUC_CLIENT_PRIORITY_KMD_NORMAL); if (err) { pr_err("execbug client validation failed\n"); goto out; } - if (guc->preempt_client) { - err = validate_client(guc->preempt_client, - GUC_CLIENT_PRIORITY_KMD_HIGH, true); - if (err) { - pr_err("preempt client validation failed\n"); - goto out; - } - } - - /* each client should now have reserved a doorbell */ - if (!has_doorbell(guc->execbuf_client) || - (guc->preempt_client && !has_doorbell(guc->preempt_client))) { + /* the client should now have reserved a doorbell */ + if (!has_doorbell(guc->execbuf_client)) { pr_err("guc_clients_create didn't reserve doorbells\n"); err = -EINVAL; goto out; @@ -204,8 +186,7 @@ static int igt_guc_clients(void *args) guc_clients_enable(guc); /* each client should now have received a doorbell */ - if (!client_doorbell_in_sync(guc->execbuf_client) || - !client_doorbell_in_sync(guc->preempt_client)) { + if (!client_doorbell_in_sync(guc->execbuf_client)) { pr_err("failed to initialize the doorbells\n"); err = -EINVAL; goto out; @@ -245,11 +226,11 @@ static int igt_guc_doorbells(void *arg) int i, err = 0; u16 db_id; - GEM_BUG_ON(!HAS_GUC(dev_priv)); + GEM_BUG_ON(!HAS_GT_UC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - guc = &dev_priv->guc; + guc = &dev_priv->gt.uc.guc; if (!guc) { pr_err("No guc object!\n"); err = -EINVAL; @@ -261,10 +242,7 @@ static int igt_guc_doorbells(void *arg) goto unlock; for (i = 0; i < ATTEMPTS; i++) { - clients[i] = guc_client_alloc(dev_priv, - INTEL_INFO(dev_priv)->engine_mask, - i % GUC_CLIENT_PRIORITY_NUM, - dev_priv->kernel_context); + clients[i] = guc_client_alloc(guc, i % GUC_CLIENT_PRIORITY_NUM); if (!clients[i]) { pr_err("[%d] No guc client\n", i); @@ -300,8 +278,7 @@ static int igt_guc_doorbells(void *arg) goto out; } - err = validate_client(clients[i], - i % GUC_CLIENT_PRIORITY_NUM, false); + err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM); if (err) { pr_err("[%d] client_alloc sanity check failed!\n", i); err = -EINVAL; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 6ea88270c818..b09dc315e2da 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -2674,11 +2674,6 @@ static int scan_workload(struct intel_vgpu_workload *workload) gma_head == gma_tail) return 0; - if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { - ret = -EINVAL; - goto out; - } - ret = ip_gma_set(&s, gma_head); if (ret) goto out; @@ -2724,11 +2719,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) s.workload = workload; s.is_ctx_wa = true; - if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { - ret = -EINVAL; - goto out; - } - ret = ip_gma_set(&s, gma_head); if (ret) goto out; diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 65e847392aea..8bb292b01271 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -245,7 +245,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->hw_format = fmt; plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) + if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); @@ -368,7 +368,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, alpha_plane, alpha_force); plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) + if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); @@ -472,7 +472,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, plane->drm_format = drm_format; plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) + if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 53115bdae12b..4b04af569c05 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2141,11 +2141,20 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; unsigned long index = off >> info->gtt_entry_size_shift; + unsigned long gma; struct intel_gvt_gtt_entry e; if (bytes != 4 && bytes != 8) return -EINVAL; + gma = index << I915_GTT_PAGE_SHIFT; + if (!intel_gvt_ggtt_validate_range(vgpu, + gma, 1 << I915_GTT_PAGE_SHIFT)) { + gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma); + memset(p_data, 0, bytes); + return 0; + } + ggtt_get_guest_entry(ggtt_mm, &e, index); memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), bytes); diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 42d0394f0de2..88789316807d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -205,17 +205,18 @@ struct intel_vgpu_gtt { struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; }; -extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); -extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); +int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); +void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); -extern int intel_gvt_init_gtt(struct intel_gvt *gvt); +int intel_gvt_init_gtt(struct intel_gvt *gvt); void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu); -extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); +void intel_gvt_clean_gtt(struct intel_gvt *gvt); -extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, - int page_table_level, void *root_entry); +struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, + int page_table_level, + void *root_entry); struct intel_vgpu_oos_page { struct intel_vgpu_ppgtt_spt *spt; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 144301b778df..23aa3e50cbf8 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1911,6 +1911,18 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); if (ret) goto err_unmap; + } else if (entry->size != size) { + /* the same gfn with different size: unmap and re-map */ + gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size); + __gvt_cache_remove_entry(vgpu, entry); + + ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); + if (ret) + goto err_unlock; + + ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); + if (ret) + goto err_unmap; } else { kref_get(&entry->ref); *dma_addr = entry->dma_addr; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 2144fb46d0e1..f40524b0e300 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -364,16 +364,13 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) wa_ctx->indirect_ctx.shadow_va = NULL; } -static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, - struct i915_gem_context *ctx) +static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, + struct i915_gem_context *ctx) { struct intel_vgpu_mm *mm = workload->shadow_mm; struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm); int i = 0; - if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) - return -EINVAL; - if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0]; } else { @@ -384,8 +381,6 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; } } - - return 0; } static int @@ -614,6 +609,8 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) static int prepare_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; + int ring = workload->ring_id; int ret = 0; ret = intel_vgpu_pin_mm(workload->shadow_mm); @@ -622,8 +619,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload) return ret; } + if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || + !workload->shadow_mm->ppgtt_mm.shadowed) { + gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); + return -EINVAL; + } + update_shadow_pdps(workload); + set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context); + ret = intel_vgpu_sync_oos_pages(workload->vgpu); if (ret) { gvt_vgpu_err("fail to vgpu sync oos pages\n"); @@ -674,7 +679,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - struct intel_vgpu_submission *s = &vgpu->submission; struct i915_request *rq; int ring_id = workload->ring_id; int ret; @@ -685,13 +689,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) mutex_lock(&vgpu->vgpu_lock); mutex_lock(&dev_priv->drm.struct_mutex); - ret = set_context_ppgtt_from_shadow(workload, - s->shadow[ring_id]->gem_context); - if (ret < 0) { - gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); - goto err_req; - } - ret = intel_gvt_workload_req_alloc(workload); if (ret) goto err_req; @@ -990,6 +987,7 @@ static int workload_thread(void *priv) int ret; bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9); DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm; kfree(p); @@ -1013,6 +1011,8 @@ static int workload_thread(void *priv) workload->ring_id, workload, workload->vgpu->id); + intel_runtime_pm_get(rpm); + gvt_dbg_sched("ring id %d will dispatch workload %p\n", workload->ring_id, workload); @@ -1042,6 +1042,7 @@ complete: intel_uncore_forcewake_put(&gvt->dev_priv->uncore, FORCEWAKE_ALL); + intel_runtime_pm_put_unchecked(rpm); if (ret && (vgpu_is_vm_unhealthy(ret))) enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); } @@ -1156,7 +1157,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); - i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm)); + i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm)); for_each_engine(engine, vgpu->gvt->dev_priv, id) intel_context_unpin(s->shadow[id]); @@ -1492,6 +1493,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); + if (!intel_gvt_ggtt_validate_range(vgpu, start, + _RING_CTL_BUF_SIZE(ctl))) { + gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start); + return ERR_PTR(-EINVAL); + } + workload = alloc_workload(vgpu); if (IS_ERR(workload)) return workload; @@ -1516,9 +1523,31 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, workload->wa_ctx.indirect_ctx.size = (indirect_ctx & INDIRECT_CTX_SIZE_MASK) * CACHELINE_BYTES; + + if (workload->wa_ctx.indirect_ctx.size != 0) { + if (!intel_gvt_ggtt_validate_range(vgpu, + workload->wa_ctx.indirect_ctx.guest_gma, + workload->wa_ctx.indirect_ctx.size)) { + kmem_cache_free(s->workloads, workload); + gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n", + workload->wa_ctx.indirect_ctx.guest_gma); + return ERR_PTR(-EINVAL); + } + } + workload->wa_ctx.per_ctx.guest_gma = per_ctx & PER_CTX_ADDR_MASK; workload->wa_ctx.per_ctx.valid = per_ctx & 1; + if (workload->wa_ctx.per_ctx.valid) { + if (!intel_gvt_ggtt_validate_range(vgpu, + workload->wa_ctx.per_ctx.guest_gma, + CACHELINE_BYTES)) { + kmem_cache_free(s->workloads, workload); + gvt_vgpu_err("invalid per_ctx at: 0x%lx\n", + workload->wa_ctx.per_ctx.guest_gma); + return ERR_PTR(-EINVAL); + } + } } gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", diff --git a/drivers/gpu/drm/i915/gvt/trace_points.c b/drivers/gpu/drm/i915/gvt/trace_points.c index a3deed692b9c..fe552e877e09 100644 --- a/drivers/gpu/drm/i915/gvt/trace_points.c +++ b/drivers/gpu/drm/i915/gvt/trace_points.c @@ -28,8 +28,6 @@ * */ -#include "trace.h" - #ifndef __CHECKER__ #define CREATE_TRACE_POINTS #include "trace.h" diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 293e5bcc4b6c..d32db8a4db5c 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -4,6 +4,8 @@ * Copyright © 2019 Intel Corporation */ +#include <linux/debugobjects.h> + #include "gt/intel_engine_pm.h" #include "i915_drv.h" @@ -31,49 +33,108 @@ struct active_node { u64 timeline; }; -static void -__active_park(struct i915_active *ref) +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS) + +static void *active_debug_hint(void *addr) { - struct active_node *it, *n; + struct i915_active *ref = addr; - rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { - GEM_BUG_ON(i915_active_request_isset(&it->base)); - kmem_cache_free(global.slab_cache, it); - } - ref->tree = RB_ROOT; + return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref; +} + +static struct debug_obj_descr active_debug_desc = { + .name = "i915_active", + .debug_hint = active_debug_hint, +}; + +static void debug_active_init(struct i915_active *ref) +{ + debug_object_init(ref, &active_debug_desc); } +static void debug_active_activate(struct i915_active *ref) +{ + debug_object_activate(ref, &active_debug_desc); +} + +static void debug_active_deactivate(struct i915_active *ref) +{ + debug_object_deactivate(ref, &active_debug_desc); +} + +static void debug_active_fini(struct i915_active *ref) +{ + debug_object_free(ref, &active_debug_desc); +} + +static void debug_active_assert(struct i915_active *ref) +{ + debug_object_assert_init(ref, &active_debug_desc); +} + +#else + +static inline void debug_active_init(struct i915_active *ref) { } +static inline void debug_active_activate(struct i915_active *ref) { } +static inline void debug_active_deactivate(struct i915_active *ref) { } +static inline void debug_active_fini(struct i915_active *ref) { } +static inline void debug_active_assert(struct i915_active *ref) { } + +#endif + static void __active_retire(struct i915_active *ref) { - GEM_BUG_ON(!ref->count); - if (--ref->count) - return; + struct active_node *it, *n; + struct rb_root root; + bool retire = false; + + lockdep_assert_held(&ref->mutex); + + /* return the unused nodes to our slabcache -- flushing the allocator */ + if (atomic_dec_and_test(&ref->count)) { + debug_active_deactivate(ref); + root = ref->tree; + ref->tree = RB_ROOT; + ref->cache = NULL; + retire = true; + } - /* return the unused nodes to our slabcache */ - __active_park(ref); + mutex_unlock(&ref->mutex); + if (!retire) + return; ref->retire(ref); + + rbtree_postorder_for_each_entry_safe(it, n, &root, node) { + GEM_BUG_ON(i915_active_request_isset(&it->base)); + kmem_cache_free(global.slab_cache, it); + } } static void -node_retire(struct i915_active_request *base, struct i915_request *rq) +active_retire(struct i915_active *ref) { - __active_retire(container_of(base, struct active_node, base)->ref); + GEM_BUG_ON(!atomic_read(&ref->count)); + if (atomic_add_unless(&ref->count, -1, 1)) + return; + + /* One active may be flushed from inside the acquire of another */ + mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); + __active_retire(ref); } static void -last_retire(struct i915_active_request *base, struct i915_request *rq) +node_retire(struct i915_active_request *base, struct i915_request *rq) { - __active_retire(container_of(base, struct i915_active, last)); + active_retire(container_of(base, struct active_node, base)->ref); } static struct i915_active_request * active_instance(struct i915_active *ref, u64 idx) { - struct active_node *node; + struct active_node *node, *prealloc; struct rb_node **p, *parent; - struct i915_request *old; /* * We track the most recently used timeline to skip a rbtree search @@ -81,20 +142,18 @@ active_instance(struct i915_active *ref, u64 idx) * at all. We can reuse the last slot if it is empty, that is * after the previous activity has been retired, or if it matches the * current timeline. - * - * Note that we allow the timeline to be active simultaneously in - * the rbtree and the last cache. We do this to avoid having - * to search and replace the rbtree element for a new timeline, with - * the cost being that we must be aware that the ref may be retired - * twice for the same timeline (as the older rbtree element will be - * retired before the new request added to last). */ - old = i915_active_request_raw(&ref->last, BKL(ref)); - if (!old || old->fence.context == idx) - goto out; + node = READ_ONCE(ref->cache); + if (node && node->timeline == idx) + return &node->base; - /* Move the currently active fence into the rbtree */ - idx = old->fence.context; + /* Preallocate a replacement, just in case */ + prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); + if (!prealloc) + return NULL; + + mutex_lock(&ref->mutex); + GEM_BUG_ON(i915_active_is_idle(ref)); parent = NULL; p = &ref->tree.rb_node; @@ -102,8 +161,10 @@ active_instance(struct i915_active *ref, u64 idx) parent = *p; node = rb_entry(parent, struct active_node, node); - if (node->timeline == idx) - goto replace; + if (node->timeline == idx) { + kmem_cache_free(global.slab_cache, prealloc); + goto out; + } if (node->timeline < idx) p = &parent->rb_right; @@ -111,17 +172,7 @@ active_instance(struct i915_active *ref, u64 idx) p = &parent->rb_left; } - node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); - - /* kmalloc may retire the ref->last (thanks shrinker)! */ - if (unlikely(!i915_active_request_raw(&ref->last, BKL(ref)))) { - kmem_cache_free(global.slab_cache, node); - goto out; - } - - if (unlikely(!node)) - return ERR_PTR(-ENOMEM); - + node = prealloc; i915_active_request_init(&node->base, NULL, node_retire); node->ref = ref; node->timeline = idx; @@ -129,38 +180,30 @@ active_instance(struct i915_active *ref, u64 idx) rb_link_node(&node->node, parent, p); rb_insert_color(&node->node, &ref->tree); -replace: - /* - * Overwrite the previous active slot in the rbtree with last, - * leaving last zeroed. If the previous slot is still active, - * we must be careful as we now only expect to receive one retire - * callback not two, and so much undo the active counting for the - * overwritten slot. - */ - if (i915_active_request_isset(&node->base)) { - /* Retire ourselves from the old rq->active_list */ - __list_del_entry(&node->base.link); - ref->count--; - GEM_BUG_ON(!ref->count); - } - GEM_BUG_ON(list_empty(&ref->last.link)); - list_replace_init(&ref->last.link, &node->base.link); - node->base.request = fetch_and_zero(&ref->last.request); - out: - return &ref->last; + ref->cache = node; + mutex_unlock(&ref->mutex); + + return &node->base; } -void i915_active_init(struct drm_i915_private *i915, - struct i915_active *ref, - void (*retire)(struct i915_active *ref)) +void __i915_active_init(struct drm_i915_private *i915, + struct i915_active *ref, + int (*active)(struct i915_active *ref), + void (*retire)(struct i915_active *ref), + struct lock_class_key *key) { + debug_active_init(ref); + ref->i915 = i915; + ref->flags = 0; + ref->active = active; ref->retire = retire; ref->tree = RB_ROOT; - i915_active_request_init(&ref->last, NULL, last_retire); + ref->cache = NULL; init_llist_head(&ref->barriers); - ref->count = 0; + atomic_set(&ref->count, 0); + __mutex_init(&ref->mutex, "i915_active", key); } int i915_active_ref(struct i915_active *ref, @@ -168,60 +211,123 @@ int i915_active_ref(struct i915_active *ref, struct i915_request *rq) { struct i915_active_request *active; - int err = 0; + int err; /* Prevent reaping in case we malloc/wait while building the tree */ - i915_active_acquire(ref); + err = i915_active_acquire(ref); + if (err) + return err; active = active_instance(ref, timeline); - if (IS_ERR(active)) { - err = PTR_ERR(active); + if (!active) { + err = -ENOMEM; goto out; } if (!i915_active_request_isset(active)) - ref->count++; + atomic_inc(&ref->count); __i915_active_request_set(active, rq); - GEM_BUG_ON(!ref->count); out: i915_active_release(ref); return err; } -bool i915_active_acquire(struct i915_active *ref) +int i915_active_acquire(struct i915_active *ref) { - lockdep_assert_held(BKL(ref)); - return !ref->count++; + int err; + + debug_active_assert(ref); + if (atomic_add_unless(&ref->count, 1, 0)) + return 0; + + err = mutex_lock_interruptible(&ref->mutex); + if (err) + return err; + + if (!atomic_read(&ref->count) && ref->active) + err = ref->active(ref); + if (!err) { + debug_active_activate(ref); + atomic_inc(&ref->count); + } + + mutex_unlock(&ref->mutex); + + return err; } void i915_active_release(struct i915_active *ref) { - lockdep_assert_held(BKL(ref)); - __active_retire(ref); + debug_active_assert(ref); + active_retire(ref); +} + +static void __active_ungrab(struct i915_active *ref) +{ + clear_and_wake_up_bit(I915_ACTIVE_GRAB_BIT, &ref->flags); +} + +bool i915_active_trygrab(struct i915_active *ref) +{ + debug_active_assert(ref); + + if (test_and_set_bit(I915_ACTIVE_GRAB_BIT, &ref->flags)) + return false; + + if (!atomic_add_unless(&ref->count, 1, 0)) { + __active_ungrab(ref); + return false; + } + + return true; +} + +void i915_active_ungrab(struct i915_active *ref) +{ + GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags)); + + active_retire(ref); + __active_ungrab(ref); } int i915_active_wait(struct i915_active *ref) { struct active_node *it, *n; - int ret = 0; + int err; - if (i915_active_acquire(ref)) - goto out_release; + might_sleep(); + might_lock(&ref->mutex); + + if (i915_active_is_idle(ref)) + return 0; + + err = mutex_lock_interruptible(&ref->mutex); + if (err) + return err; - ret = i915_active_request_retire(&ref->last, BKL(ref)); - if (ret) - goto out_release; + if (!atomic_add_unless(&ref->count, 1, 0)) { + mutex_unlock(&ref->mutex); + return 0; + } rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { - ret = i915_active_request_retire(&it->base, BKL(ref)); - if (ret) + err = i915_active_request_retire(&it->base, BKL(ref)); + if (err) break; } -out_release: - i915_active_release(ref); - return ret; + __active_retire(ref); + if (err) + return err; + + if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE)) + return -EINTR; + + if (!i915_active_is_idle(ref)) + return -EBUSY; + + return 0; } int i915_request_await_active_request(struct i915_request *rq, @@ -236,23 +342,24 @@ int i915_request_await_active_request(struct i915_request *rq, int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) { struct active_node *it, *n; - int err = 0; + int err; - /* await allocates and so we need to avoid hitting the shrinker */ - if (i915_active_acquire(ref)) - goto out; /* was idle */ + if (RB_EMPTY_ROOT(&ref->tree)) + return 0; - err = i915_request_await_active_request(rq, &ref->last); + /* await allocates and so we need to avoid hitting the shrinker */ + err = i915_active_acquire(ref); if (err) - goto out; + return err; + mutex_lock(&ref->mutex); rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { err = i915_request_await_active_request(rq, &it->base); if (err) - goto out; + break; } + mutex_unlock(&ref->mutex); -out: i915_active_release(ref); return err; } @@ -260,9 +367,10 @@ out: #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) void i915_active_fini(struct i915_active *ref) { - GEM_BUG_ON(i915_active_request_isset(&ref->last)); + debug_active_fini(ref); GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree)); - GEM_BUG_ON(ref->count); + GEM_BUG_ON(atomic_read(&ref->count)); + mutex_destroy(&ref->mutex); } #endif @@ -270,12 +378,12 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; + intel_engine_mask_t tmp, mask = engine->mask; struct llist_node *pos, *next; - unsigned long tmp; int err; - GEM_BUG_ON(!engine->mask); - for_each_engine_masked(engine, i915, engine->mask, tmp) { + GEM_BUG_ON(!mask); + for_each_engine_masked(engine, i915, mask, tmp) { struct intel_context *kctx = engine->kernel_context; struct active_node *node; @@ -289,7 +397,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, (void *)engine, node_retire); node->timeline = kctx->ring->timeline->fence_context; node->ref = ref; - ref->count++; + atomic_inc(&ref->count); intel_engine_pm_get(engine); llist_add((struct llist_node *)&node->base.link, @@ -316,8 +424,9 @@ void i915_active_acquire_barrier(struct i915_active *ref) { struct llist_node *pos, *next; - i915_active_acquire(ref); + GEM_BUG_ON(i915_active_is_idle(ref)); + mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) { struct intel_engine_cs *engine; struct active_node *node; @@ -347,7 +456,7 @@ void i915_active_acquire_barrier(struct i915_active *ref) &engine->barrier_tasks); intel_engine_pm_put(engine); } - i915_active_release(ref); + mutex_unlock(&ref->mutex); } void i915_request_add_barriers(struct i915_request *rq) diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index c14eebf6d074..ba68b077ec6c 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -369,9 +369,16 @@ i915_active_request_retire(struct i915_active_request *active, * synchronisation. */ -void i915_active_init(struct drm_i915_private *i915, - struct i915_active *ref, - void (*retire)(struct i915_active *ref)); +void __i915_active_init(struct drm_i915_private *i915, + struct i915_active *ref, + int (*active)(struct i915_active *ref), + void (*retire)(struct i915_active *ref), + struct lock_class_key *key); +#define i915_active_init(i915, ref, active, retire) do { \ + static struct lock_class_key __key; \ + \ + __i915_active_init(i915, ref, active, retire, &__key); \ +} while (0) int i915_active_ref(struct i915_active *ref, u64 timeline, @@ -384,20 +391,17 @@ int i915_request_await_active(struct i915_request *rq, int i915_request_await_active_request(struct i915_request *rq, struct i915_active_request *active); -bool i915_active_acquire(struct i915_active *ref); - -static inline void i915_active_cancel(struct i915_active *ref) -{ - GEM_BUG_ON(ref->count != 1); - ref->count = 0; -} - +int i915_active_acquire(struct i915_active *ref); void i915_active_release(struct i915_active *ref); +void __i915_active_release_nested(struct i915_active *ref, int subclass); + +bool i915_active_trygrab(struct i915_active *ref); +void i915_active_ungrab(struct i915_active *ref); static inline bool i915_active_is_idle(const struct i915_active *ref) { - return !ref->count; + return !atomic_read(&ref->count); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h index c025991b9233..74743dd0d5f0 100644 --- a/drivers/gpu/drm/i915/i915_active_types.h +++ b/drivers/gpu/drm/i915/i915_active_types.h @@ -7,7 +7,9 @@ #ifndef _I915_ACTIVE_TYPES_H_ #define _I915_ACTIVE_TYPES_H_ +#include <linux/atomic.h> #include <linux/llist.h> +#include <linux/mutex.h> #include <linux/rbtree.h> #include <linux/rcupdate.h> @@ -24,13 +26,20 @@ struct i915_active_request { i915_active_retire_fn retire; }; +struct active_node; + struct i915_active { struct drm_i915_private *i915; + struct active_node *cache; struct rb_root tree; - struct i915_active_request last; - unsigned int count; + struct mutex mutex; + atomic_t count; + + unsigned long flags; +#define I915_ACTIVE_GRAB_BIT 0 + int (*active)(struct i915_active *ref); void (*retire)(struct i915_active *ref); struct llist_head barriers; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 62cf34db9280..24787bb48c9f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -40,12 +40,12 @@ #include "gem/i915_gem_context.h" #include "gt/intel_reset.h" +#include "gt/uc/intel_guc_submission.h" #include "i915_debugfs.h" #include "i915_irq.h" #include "intel_csr.h" #include "intel_drv.h" -#include "intel_guc_submission.h" #include "intel_pm.h" #include "intel_sideband.h" @@ -75,11 +75,6 @@ static int i915_capabilities(struct seq_file *m, void *data) return 0; } -static char get_active_flag(struct drm_i915_gem_object *obj) -{ - return i915_gem_object_is_active(obj) ? '*' : ' '; -} - static char get_pin_flag(struct drm_i915_gem_object *obj) { return obj->pin_global ? 'p' : ' '; @@ -144,9 +139,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) unsigned int frontbuffer_bits; int pin_count = 0; - seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s", + seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s", &obj->base, - get_active_flag(obj), get_pin_flag(obj), get_tiling_flag(obj), get_global_flag(obj), @@ -1080,17 +1074,16 @@ static void i915_instdone_info(struct drm_i915_private *dev_priv, static int i915_hangcheck_info(struct seq_file *m, void *unused) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_i915_private *i915 = node_to_i915(m->private); + struct intel_gt *gt = &i915->gt; struct intel_engine_cs *engine; - u64 acthd[I915_NUM_ENGINES]; - struct intel_instdone instdone; intel_wakeref_t wakeref; enum intel_engine_id id; - seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags); - if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) + seq_printf(m, "Reset flags: %lx\n", gt->reset.flags); + if (test_bit(I915_WEDGED, >->reset.flags)) seq_puts(m, "\tWedged\n"); - if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) + if (test_bit(I915_RESET_BACKOFF, >->reset.flags)) seq_puts(m, "\tDevice (global) reset in progress\n"); if (!i915_modparams.enable_hangcheck) { @@ -1098,42 +1091,37 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) return 0; } - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - for_each_engine(engine, dev_priv, id) - acthd[id] = intel_engine_get_active_head(engine); - - intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone); - } - - if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) + if (timer_pending(>->hangcheck.work.timer)) seq_printf(m, "Hangcheck active, timer fires in %dms\n", - jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - + jiffies_to_msecs(gt->hangcheck.work.timer.expires - jiffies)); - else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) + else if (delayed_work_pending(>->hangcheck.work)) seq_puts(m, "Hangcheck active, work pending\n"); else seq_puts(m, "Hangcheck inactive\n"); - seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake)); + seq_printf(m, "GT active? %s\n", yesno(gt->awake)); - for_each_engine(engine, dev_priv, id) { - seq_printf(m, "%s: %d ms ago\n", - engine->name, - jiffies_to_msecs(jiffies - - engine->hangcheck.action_timestamp)); + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + for_each_engine(engine, i915, id) { + struct intel_instdone instdone; - seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", - (long long)engine->hangcheck.acthd, - (long long)acthd[id]); + seq_printf(m, "%s: %d ms ago\n", + engine->name, + jiffies_to_msecs(jiffies - + engine->hangcheck.action_timestamp)); - if (engine->id == RCS0) { - seq_puts(m, "\tinstdone read =\n"); + seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", + (long long)engine->hangcheck.acthd, + intel_engine_get_active_head(engine)); - i915_instdone_info(dev_priv, m, &instdone); + intel_engine_get_instdone(engine, &instdone); - seq_puts(m, "\tinstdone accu =\n"); + seq_puts(m, "\tinstdone read =\n"); + i915_instdone_info(i915, m, &instdone); - i915_instdone_info(dev_priv, m, + seq_puts(m, "\tinstdone accu =\n"); + i915_instdone_info(i915, m, &engine->hangcheck.instdone); } } @@ -1141,23 +1129,6 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) return 0; } -static int i915_reset_info(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct i915_gpu_error *error = &dev_priv->gpu_error; - struct intel_engine_cs *engine; - enum intel_engine_id id; - - seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error)); - - for_each_engine(engine, dev_priv, id) { - seq_printf(m, "%s = %u\n", engine->name, - i915_reset_engine_count(error, engine)); - } - - return 0; -} - static int ironlake_drpc_info(struct seq_file *m) { struct drm_i915_private *i915 = node_to_i915(m->private); @@ -1894,11 +1865,11 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data) intel_wakeref_t wakeref; struct drm_printer p; - if (!HAS_HUC(dev_priv)) + if (!HAS_GT_UC(dev_priv)) return -ENODEV; p = drm_seq_file_printer(m); - intel_uc_fw_dump(&dev_priv->huc.fw, &p); + intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p); with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); @@ -1912,11 +1883,11 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) intel_wakeref_t wakeref; struct drm_printer p; - if (!HAS_GUC(dev_priv)) + if (!HAS_GT_UC(dev_priv)) return -ENODEV; p = drm_seq_file_printer(m); - intel_uc_fw_dump(&dev_priv->guc.fw, &p); + intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p); with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { u32 tmp = I915_READ(GUC_STATUS); @@ -1959,7 +1930,7 @@ stringify_guc_log_type(enum guc_log_buffer_type type) static void i915_guc_log_info(struct seq_file *m, struct drm_i915_private *dev_priv) { - struct intel_guc_log *log = &dev_priv->guc.log; + struct intel_guc_log *log = &dev_priv->gt.uc.guc.log; enum guc_log_buffer_type type; if (!intel_guc_log_relay_enabled(log)) { @@ -2005,7 +1976,7 @@ static void i915_guc_client_info(struct seq_file *m, static int i915_guc_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - const struct intel_guc *guc = &dev_priv->guc; + const struct intel_guc *guc = &dev_priv->gt.uc.guc; if (!USES_GUC(dev_priv)) return -ENODEV; @@ -2023,11 +1994,6 @@ static int i915_guc_info(struct seq_file *m, void *data) seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client); i915_guc_client_info(m, dev_priv, guc->execbuf_client); - if (guc->preempt_client) { - seq_printf(m, "\nGuC preempt client @ %p:\n", - guc->preempt_client); - i915_guc_client_info(m, dev_priv, guc->preempt_client); - } /* Add more as required ... */ @@ -2037,9 +2003,8 @@ static int i915_guc_info(struct seq_file *m, void *data) static int i915_guc_stage_pool(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - const struct intel_guc *guc = &dev_priv->guc; + const struct intel_guc *guc = &dev_priv->gt.uc.guc; struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr; - struct intel_guc_client *client = guc->execbuf_client; intel_engine_mask_t tmp; int index; @@ -2069,7 +2034,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data) desc->wq_addr, desc->wq_size); seq_putc(m, '\n'); - for_each_engine_masked(engine, dev_priv, client->engines, tmp) { + for_each_engine(engine, dev_priv, tmp) { u32 guc_engine_id = engine->guc_id; struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id]; @@ -2097,13 +2062,13 @@ static int i915_guc_log_dump(struct seq_file *m, void *data) u32 *log; int i = 0; - if (!HAS_GUC(dev_priv)) + if (!HAS_GT_UC(dev_priv)) return -ENODEV; if (dump_load_err) - obj = dev_priv->guc.load_err_log; - else if (dev_priv->guc.log.vma) - obj = dev_priv->guc.log.vma->obj; + obj = dev_priv->gt.uc.guc.load_err_log; + else if (dev_priv->gt.uc.guc.log.vma) + obj = dev_priv->gt.uc.guc.log.vma->obj; if (!obj) return 0; @@ -2134,7 +2099,7 @@ static int i915_guc_log_level_get(void *data, u64 *val) if (!USES_GUC(dev_priv)) return -ENODEV; - *val = intel_guc_log_get_level(&dev_priv->guc.log); + *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log); return 0; } @@ -2146,7 +2111,7 @@ static int i915_guc_log_level_set(void *data, u64 val) if (!USES_GUC(dev_priv)) return -ENODEV; - return intel_guc_log_set_level(&dev_priv->guc.log, val); + return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val); } DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops, @@ -2160,9 +2125,9 @@ static int i915_guc_log_relay_open(struct inode *inode, struct file *file) if (!USES_GUC(dev_priv)) return -ENODEV; - file->private_data = &dev_priv->guc.log; + file->private_data = &dev_priv->gt.uc.guc.log; - return intel_guc_log_relay_open(&dev_priv->guc.log); + return intel_guc_log_relay_open(&dev_priv->gt.uc.guc.log); } static ssize_t @@ -2182,7 +2147,7 @@ static int i915_guc_log_relay_release(struct inode *inode, struct file *file) { struct drm_i915_private *dev_priv = inode->i_private; - intel_guc_log_relay_close(&dev_priv->guc.log); + intel_guc_log_relay_close(&dev_priv->gt.uc.guc.log); return 0; } @@ -2485,7 +2450,8 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) for_each_power_domain(power_domain, power_well->desc->domains) seq_printf(m, " %-23s %d\n", - intel_display_power_domain_str(power_domain), + intel_display_power_domain_str(dev_priv, + power_domain), power_domains->domain_use_count[power_domain]); } @@ -2603,6 +2569,25 @@ static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) intel_seq_print_mode(m, 2, mode); } +static void intel_hdcp_info(struct seq_file *m, + struct intel_connector *intel_connector) +{ + bool hdcp_cap, hdcp2_cap; + + hdcp_cap = intel_hdcp_capable(intel_connector); + hdcp2_cap = intel_hdcp2_capable(intel_connector); + + if (hdcp_cap) + seq_puts(m, "HDCP1.4 "); + if (hdcp2_cap) + seq_puts(m, "HDCP2.2 "); + + if (!hdcp_cap && !hdcp2_cap) + seq_puts(m, "None"); + + seq_puts(m, "\n"); +} + static void intel_dp_info(struct seq_file *m, struct intel_connector *intel_connector) { @@ -2616,6 +2601,10 @@ static void intel_dp_info(struct seq_file *m, drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, &intel_dp->aux); + if (intel_connector->hdcp.shim) { + seq_puts(m, "\tHDCP version: "); + intel_hdcp_info(m, intel_connector); + } } static void intel_dp_mst_info(struct seq_file *m, @@ -2639,6 +2628,10 @@ static void intel_hdmi_info(struct seq_file *m, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); + if (intel_connector->hdcp.shim) { + seq_puts(m, "\tHDCP version: "); + intel_hdcp_info(m, intel_connector); + } } static void intel_lvds_info(struct seq_file *m, @@ -2966,14 +2959,28 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) static int i915_wa_registers(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); - const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list; - struct i915_wa *wa; - unsigned int i; + struct intel_engine_cs *engine; + enum intel_engine_id id; - seq_printf(m, "Workarounds applied: %u\n", wal->count); - for (i = 0, wa = wal->list; i < wal->count; i++, wa++) - seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", - i915_mmio_reg_offset(wa->reg), wa->val, wa->mask); + for_each_engine(engine, i915, id) { + const struct i915_wa_list *wal = &engine->ctx_wa_list; + const struct i915_wa *wa; + unsigned int count; + + count = wal->count; + if (!count) + continue; + + seq_printf(m, "%s: Workarounds applied: %u\n", + engine->name, count); + + for (wa = wal->list; count--; wa++) + seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", + i915_mmio_reg_offset(wa->reg), + wa->val, wa->mask); + + seq_printf(m, "\n"); + } return 0; } @@ -3620,7 +3627,8 @@ static const struct file_operations i915_cur_wm_latency_fops = { static int i915_wedged_get(void *data, u64 *val) { - int ret = i915_terminally_wedged(data); + struct drm_i915_private *i915 = data; + int ret = intel_gt_terminally_wedged(&i915->gt); switch (ret) { case -EIO: @@ -3640,11 +3648,11 @@ i915_wedged_set(void *data, u64 val) struct drm_i915_private *i915 = data; /* Flush any previous reset before applying for a new one */ - wait_event(i915->gpu_error.reset_queue, - !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)); + wait_event(i915->gt.reset.queue, + !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags)); - i915_handle_error(i915, val, I915_ERROR_CAPTURE, - "Manually set wedged engine mask = %llx", val); + intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE, + "Manually set wedged engine mask = %llx", val); return 0; } @@ -3687,8 +3695,9 @@ i915_drop_caches_set(void *data, u64 val) val, val & DROP_ALL); if (val & DROP_RESET_ACTIVE && - wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) - i915_gem_set_wedged(i915); + wait_for(intel_engines_are_idle(&i915->gt), + I915_IDLE_ENGINES_TIMEOUT)) + intel_gt_set_wedged(&i915->gt); /* No need to check and wait for gpu resets, only libdrm auto-restarts * on ioctls on -EAGAIN. */ @@ -3723,8 +3732,8 @@ i915_drop_caches_set(void *data, u64 val) mutex_unlock(&i915->drm.struct_mutex); } - if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915)) - i915_handle_error(i915, ALL_ENGINES, 0, NULL); + if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt)) + intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL); fs_reclaim_acquire(GFP_KERNEL); if (val & DROP_BOUND) @@ -4087,9 +4096,9 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) /* Synchronize with everything first in case there's been an HPD * storm, but we haven't finished handling it in the kernel yet */ - synchronize_irq(dev_priv->drm.irq); + intel_synchronize_irq(dev_priv); flush_work(&dev_priv->hotplug.dig_port_work); - flush_work(&dev_priv->hotplug.hotplug_work); + flush_delayed_work(&dev_priv->hotplug.hotplug_work); seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); seq_printf(m, "Detected: %s\n", @@ -4379,7 +4388,6 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_huc_load_status", i915_huc_load_status_info, 0}, {"i915_frequency_info", i915_frequency_info, 0}, {"i915_hangcheck_info", i915_hangcheck_info, 0}, - {"i915_reset_info", i915_reset_info, 0}, {"i915_drpc_info", i915_drpc_info, 0}, {"i915_emon_status", i915_emon_status, 0}, {"i915_ring_freq_table", i915_ring_freq_table, 0}, @@ -4547,7 +4555,6 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct intel_connector *intel_connector = to_intel_connector(connector); - bool hdcp_cap, hdcp2_cap; if (connector->status != connector_status_connected) return -ENODEV; @@ -4558,17 +4565,7 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id); - hdcp_cap = intel_hdcp_capable(intel_connector); - hdcp2_cap = intel_hdcp2_capable(intel_connector); - - if (hdcp_cap) - seq_puts(m, "HDCP1.4 "); - if (hdcp2_cap) - seq_puts(m, "HDCP2.2 "); - - if (!hdcp_cap && !hdcp2_cap) - seq_puts(m, "None"); - seq_puts(m, "\n"); + intel_hdcp_info(m, intel_connector); return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b99d73c1c4db..f2d3d754af37 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -61,9 +61,11 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_reset.h" #include "gt/intel_workarounds.h" +#include "gt/uc/intel_uc.h" #include "i915_debugfs.h" #include "i915_drv.h" @@ -75,19 +77,18 @@ #include "intel_csr.h" #include "intel_drv.h" #include "intel_pm.h" -#include "intel_uc.h" static struct drm_driver driver; #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) -static unsigned int i915_load_fail_count; +static unsigned int i915_probe_fail_count; -bool __i915_inject_load_failure(const char *func, int line) +bool __i915_inject_probe_failure(const char *func, int line) { - if (i915_load_fail_count >= i915_modparams.inject_load_failure) + if (i915_probe_fail_count >= i915_modparams.inject_load_failure) return false; - if (++i915_load_fail_count == i915_modparams.inject_load_failure) { + if (++i915_probe_fail_count == i915_modparams.inject_load_failure) { DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", i915_modparams.inject_load_failure, func, line); i915_modparams.inject_load_failure = 0; @@ -99,7 +100,7 @@ bool __i915_inject_load_failure(const char *func, int line) bool i915_error_injected(void) { - return i915_load_fail_count && !i915_modparams.inject_load_failure; + return i915_probe_fail_count && !i915_modparams.inject_load_failure; } #endif @@ -219,9 +220,14 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) WARN_ON(!IS_ICELAKE(dev_priv)); return PCH_ICP; case INTEL_PCH_MCC_DEVICE_ID_TYPE: + case INTEL_PCH_MCC2_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n"); WARN_ON(!IS_ELKHARTLAKE(dev_priv)); return PCH_MCC; + case INTEL_PCH_TGP_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n"); + WARN_ON(!IS_TIGERLAKE(dev_priv)); + return PCH_TGP; default: return PCH_NONE; } @@ -249,7 +255,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv) * make an educated guess as to which PCH is really there. */ - if (IS_ELKHARTLAKE(dev_priv)) + if (IS_TIGERLAKE(dev_priv)) + id = INTEL_PCH_TGP_DEVICE_ID_TYPE; + else if (IS_ELKHARTLAKE(dev_priv)) id = INTEL_PCH_MCC_DEVICE_ID_TYPE; else if (IS_ICELAKE(dev_priv)) id = INTEL_PCH_ICP_DEVICE_ID_TYPE; @@ -418,7 +426,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = sseu->min_eu_in_pool; break; case I915_PARAM_HUC_STATUS: - value = intel_huc_check_status(&dev_priv->huc); + value = intel_huc_check_status(&dev_priv->gt.uc.huc); if (value < 0) return value; break; @@ -673,13 +681,13 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { .can_switch = i915_switcheroo_can_switch, }; -static int i915_load_modeset_init(struct drm_device *dev) +static int i915_driver_modeset_probe(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; int ret; - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -ENODEV; if (HAS_DISPLAY(dev_priv)) { @@ -749,16 +757,16 @@ static int i915_load_modeset_init(struct drm_device *dev) cleanup_gem: i915_gem_suspend(dev_priv); - i915_gem_fini_hw(dev_priv); - i915_gem_fini(dev_priv); + i915_gem_driver_remove(dev_priv); + i915_gem_driver_release(dev_priv); cleanup_modeset: - intel_modeset_cleanup(dev); + intel_modeset_driver_remove(dev); cleanup_irq: - drm_irq_uninstall(dev); + intel_irq_uninstall(dev_priv); intel_gmbus_teardown(dev_priv); cleanup_csr: intel_csr_ucode_fini(dev_priv); - intel_power_domains_fini_hw(dev_priv); + intel_power_domains_driver_remove(dev_priv); vga_switcheroo_unregister_client(pdev); cleanup_vga_client: vga_client_register(pdev, NULL, NULL, NULL); @@ -840,15 +848,6 @@ out_err: return -ENOMEM; } -static void i915_engines_cleanup(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) - kfree(engine); -} - static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) { destroy_workqueue(dev_priv->hotplug.dp_wq); @@ -882,7 +881,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) } /** - * i915_driver_init_early - setup state not requiring device access + * i915_driver_early_probe - setup state not requiring device access * @dev_priv: device private * * Initialize everything that is a "SW-only" state, that is state not @@ -891,16 +890,16 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) * system memory allocation, setting up device specific attributes and * function hooks not requiring accessing the device. */ -static int i915_driver_init_early(struct drm_i915_private *dev_priv) +static int i915_driver_early_probe(struct drm_i915_private *dev_priv) { int ret = 0; - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -ENODEV; intel_device_info_subplatform_init(dev_priv); - intel_uncore_init_early(&dev_priv->uncore); + intel_uncore_init_early(&dev_priv->uncore, dev_priv); spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); @@ -920,7 +919,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv) ret = i915_workqueues_init(dev_priv); if (ret < 0) - goto err_engines; + return ret; + + intel_gt_init_early(&dev_priv->gt, dev_priv); ret = i915_gem_init_early(dev_priv); if (ret < 0) @@ -930,14 +931,13 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv) intel_detect_pch(dev_priv); intel_wopcm_init_early(&dev_priv->wopcm); - intel_uc_init_early(dev_priv); + intel_uc_init_early(&dev_priv->gt.uc); intel_pm_setup(dev_priv); intel_init_dpio(dev_priv); ret = intel_power_domains_init(dev_priv); if (ret < 0) goto err_uc; intel_irq_init(dev_priv); - intel_hangcheck_init(dev_priv); intel_init_display_hooks(dev_priv); intel_init_clock_gating_hooks(dev_priv); intel_init_audio_hooks(dev_priv); @@ -948,34 +948,32 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv) return 0; err_uc: - intel_uc_cleanup_early(dev_priv); + intel_uc_cleanup_early(&dev_priv->gt.uc); i915_gem_cleanup_early(dev_priv); err_workqueues: i915_workqueues_cleanup(dev_priv); -err_engines: - i915_engines_cleanup(dev_priv); return ret; } /** - * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() + * i915_driver_late_release - cleanup the setup done in + * i915_driver_early_probe() * @dev_priv: device private */ -static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) +static void i915_driver_late_release(struct drm_i915_private *dev_priv) { intel_irq_fini(dev_priv); intel_power_domains_cleanup(dev_priv); - intel_uc_cleanup_early(dev_priv); + intel_uc_cleanup_early(&dev_priv->gt.uc); i915_gem_cleanup_early(dev_priv); i915_workqueues_cleanup(dev_priv); - i915_engines_cleanup(dev_priv); pm_qos_remove_request(&dev_priv->sb_qos); mutex_destroy(&dev_priv->sb_lock); } /** - * i915_driver_init_mmio - setup device MMIO + * i915_driver_mmio_probe - setup device MMIO * @dev_priv: device private * * Setup minimal device state necessary for MMIO accesses later in the @@ -983,11 +981,11 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) * side effects or exposing the driver via kernel internal or user space * interfaces. */ -static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) +static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) { int ret; - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -ENODEV; if (i915_get_bridge_dev(dev_priv)) @@ -1004,7 +1002,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) intel_uncore_prune_mmio_domains(&dev_priv->uncore); - intel_uc_init_mmio(dev_priv); + intel_uc_init_mmio(&dev_priv->gt.uc); ret = intel_engines_init_mmio(dev_priv); if (ret) @@ -1024,11 +1022,12 @@ err_bridge: } /** - * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() + * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe() * @dev_priv: device private */ -static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) +static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) { + intel_engines_cleanup(dev_priv); intel_teardown_mchbar(dev_priv); intel_uncore_fini_mmio(&dev_priv->uncore); pci_dev_put(dev_priv->bridge_dev); @@ -1520,18 +1519,18 @@ static void edram_detect(struct drm_i915_private *dev_priv) } /** - * i915_driver_init_hw - setup state requiring device access + * i915_driver_hw_probe - setup state requiring device access * @dev_priv: device private * * Setup state that requires accessing the device, but doesn't require * exposing the driver via kernel internal or userspace interfaces. */ -static int i915_driver_init_hw(struct drm_i915_private *dev_priv) +static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = dev_priv->drm.pdev; int ret; - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -ENODEV; intel_device_info_runtime_init(dev_priv); @@ -1590,6 +1589,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (ret) goto err_ggtt; + intel_gt_init_hw(dev_priv); + ret = i915_ggtt_enable_hw(dev_priv); if (ret) { DRM_ERROR("failed to enable GGTT\n"); @@ -1629,7 +1630,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); - intel_uncore_sanitize(dev_priv); + /* BIOS often leaves RC6 enabled, but disable it for hw init */ + intel_sanitize_gt_powersave(dev_priv); intel_gt_init_workarounds(dev_priv); @@ -1677,17 +1679,17 @@ err_msi: pci_disable_msi(pdev); pm_qos_remove_request(&dev_priv->pm_qos); err_ggtt: - i915_ggtt_cleanup_hw(dev_priv); + i915_ggtt_driver_release(dev_priv); err_perf: i915_perf_fini(dev_priv); return ret; } /** - * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() + * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe() * @dev_priv: device private */ -static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) +static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = dev_priv->drm.pdev; @@ -1863,17 +1865,17 @@ static void i915_driver_destroy(struct drm_i915_private *i915) } /** - * i915_driver_load - setup chip and create an initial config + * i915_driver_probe - setup chip and create an initial config * @pdev: PCI device * @ent: matching PCI ID entry * - * The driver load routine has to do several things: + * The driver probe routine has to do several things: * - drive output discovery via intel_modeset_init() * - initialize the memory manager * - allocate initial config memory * - setup the DRM framebuffer with the allocated memory */ -int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) +int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct intel_device_info *match_info = (struct intel_device_info *)ent->driver_data; @@ -1892,21 +1894,23 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_fini; - ret = i915_driver_init_early(dev_priv); + ret = i915_driver_early_probe(dev_priv); if (ret < 0) goto out_pci_disable; disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - ret = i915_driver_init_mmio(dev_priv); + i915_detect_vgpu(dev_priv); + + ret = i915_driver_mmio_probe(dev_priv); if (ret < 0) goto out_runtime_pm_put; - ret = i915_driver_init_hw(dev_priv); + ret = i915_driver_hw_probe(dev_priv); if (ret < 0) goto out_cleanup_mmio; - ret = i915_load_modeset_init(&dev_priv->drm); + ret = i915_driver_modeset_probe(&dev_priv->drm); if (ret < 0) goto out_cleanup_hw; @@ -1919,22 +1923,25 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; out_cleanup_hw: - i915_driver_cleanup_hw(dev_priv); - i915_ggtt_cleanup_hw(dev_priv); + i915_driver_hw_remove(dev_priv); + i915_ggtt_driver_release(dev_priv); + + /* Paranoia: make sure we have disabled everything before we exit. */ + intel_sanitize_gt_powersave(dev_priv); out_cleanup_mmio: - i915_driver_cleanup_mmio(dev_priv); + i915_driver_mmio_release(dev_priv); out_runtime_pm_put: enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - i915_driver_cleanup_early(dev_priv); + i915_driver_late_release(dev_priv); out_pci_disable: pci_disable_device(pdev); out_fini: - i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); + i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret); i915_driver_destroy(dev_priv); return ret; } -void i915_driver_unload(struct drm_device *dev) +void i915_driver_remove(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; @@ -1948,7 +1955,7 @@ void i915_driver_unload(struct drm_device *dev) * all in-flight requests so that we can quickly unbind the active * resources. */ - i915_gem_set_wedged(dev_priv); + intel_gt_set_wedged(&dev_priv->gt); /* Flush any external code that still may be under the RCU lock */ synchronize_rcu(); @@ -1957,11 +1964,11 @@ void i915_driver_unload(struct drm_device *dev) drm_atomic_helper_shutdown(dev); - intel_gvt_cleanup(dev_priv); + intel_gvt_driver_remove(dev_priv); - intel_modeset_cleanup(dev); + intel_modeset_driver_remove(dev); - intel_bios_cleanup(dev_priv); + intel_bios_driver_remove(dev_priv); vga_switcheroo_unregister_client(pdev); vga_client_register(pdev, NULL, NULL, NULL); @@ -1969,14 +1976,14 @@ void i915_driver_unload(struct drm_device *dev) intel_csr_ucode_fini(dev_priv); /* Free error state after interrupts are fully disabled. */ - cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); + cancel_delayed_work_sync(&dev_priv->gt.hangcheck.work); i915_reset_error_state(dev_priv); - i915_gem_fini_hw(dev_priv); + i915_gem_driver_remove(dev_priv); - intel_power_domains_fini_hw(dev_priv); + intel_power_domains_driver_remove(dev_priv); - i915_driver_cleanup_hw(dev_priv); + i915_driver_hw_remove(dev_priv); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); } @@ -1988,15 +1995,19 @@ static void i915_driver_release(struct drm_device *dev) disable_rpm_wakeref_asserts(rpm); - i915_gem_fini(dev_priv); + i915_gem_driver_release(dev_priv); + + i915_ggtt_driver_release(dev_priv); - i915_ggtt_cleanup_hw(dev_priv); - i915_driver_cleanup_mmio(dev_priv); + /* Paranoia: make sure we have disabled everything before we exit. */ + intel_sanitize_gt_powersave(dev_priv); + + i915_driver_mmio_release(dev_priv); enable_rpm_wakeref_asserts(rpm); - intel_runtime_pm_cleanup(rpm); + intel_runtime_pm_driver_release(rpm); - i915_driver_cleanup_early(dev_priv); + i915_driver_late_release(dev_priv); i915_driver_destroy(dev_priv); } @@ -2189,7 +2200,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) out: enable_rpm_wakeref_asserts(rpm); if (!dev_priv->uncore.user_forcewake.count) - intel_runtime_pm_cleanup(rpm); + intel_runtime_pm_driver_release(rpm); return ret; } @@ -2348,7 +2359,7 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_uncore_resume_early(&dev_priv->uncore); - i915_check_and_clear_faults(dev_priv); + intel_gt_check_and_clear_faults(&dev_priv->gt); if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) { gen9_sanitize_dc_state(dev_priv); @@ -2357,11 +2368,11 @@ static int i915_drm_resume_early(struct drm_device *dev) hsw_disable_pc8(dev_priv); } - intel_uncore_sanitize(dev_priv); + intel_sanitize_gt_powersave(dev_priv); intel_power_domains_resume(dev_priv); - intel_gt_sanitize(dev_priv, true); + intel_gt_sanitize(&dev_priv->gt, true); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); @@ -2384,8 +2395,7 @@ static int i915_resume_switcheroo(struct drm_device *dev) static int i915_pm_prepare(struct device *kdev) { - struct pci_dev *pdev = to_pci_dev(kdev); - struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_device *dev = dev_get_drvdata(kdev); if (!dev) { dev_err(kdev, "DRM not initialized, aborting suspend.\n"); @@ -2400,8 +2410,7 @@ static int i915_pm_prepare(struct device *kdev) static int i915_pm_suspend(struct device *kdev) { - struct pci_dev *pdev = to_pci_dev(kdev); - struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_device *dev = dev_get_drvdata(kdev); if (!dev) { dev_err(kdev, "DRM not initialized, aborting suspend.\n"); @@ -2895,8 +2904,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv, static int intel_runtime_suspend(struct device *kdev) { - struct pci_dev *pdev = to_pci_dev(kdev); - struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_device *dev = dev_get_drvdata(kdev); struct drm_i915_private *dev_priv = to_i915(dev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; int ret; @@ -2917,7 +2925,7 @@ static int intel_runtime_suspend(struct device *kdev) */ i915_gem_runtime_suspend(dev_priv); - intel_uc_runtime_suspend(dev_priv); + intel_uc_runtime_suspend(&dev_priv->gt.uc); intel_runtime_pm_disable_interrupts(dev_priv); @@ -2942,9 +2950,9 @@ static int intel_runtime_suspend(struct device *kdev) intel_runtime_pm_enable_interrupts(dev_priv); - intel_uc_resume(dev_priv); + intel_uc_resume(&dev_priv->gt.uc); - i915_gem_init_swizzling(dev_priv); + intel_gt_init_swizzling(&dev_priv->gt); i915_gem_restore_fences(dev_priv); enable_rpm_wakeref_asserts(rpm); @@ -2953,7 +2961,7 @@ static int intel_runtime_suspend(struct device *kdev) } enable_rpm_wakeref_asserts(rpm); - intel_runtime_pm_cleanup(rpm); + intel_runtime_pm_driver_release(rpm); if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore)) DRM_ERROR("Unclaimed access detected prior to suspending\n"); @@ -2994,8 +3002,7 @@ static int intel_runtime_suspend(struct device *kdev) static int intel_runtime_resume(struct device *kdev) { - struct pci_dev *pdev = to_pci_dev(kdev); - struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_device *dev = dev_get_drvdata(kdev); struct drm_i915_private *dev_priv = to_i915(dev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; int ret = 0; @@ -3040,13 +3047,13 @@ static int intel_runtime_resume(struct device *kdev) intel_runtime_pm_enable_interrupts(dev_priv); - intel_uc_resume(dev_priv); + intel_uc_resume(&dev_priv->gt.uc); /* * No point of rolling back things in case of an error, as the best * we can do is to hope that things will still work (and disable RPM). */ - i915_gem_init_swizzling(dev_priv); + intel_gt_init_swizzling(&dev_priv->gt); i915_gem_restore_fences(dev_priv); /* @@ -3216,6 +3223,9 @@ static struct drm_driver driver = { .gem_prime_export = i915_gem_prime_export, .gem_prime_import = i915_gem_prime_import, + .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos, + .get_scanout_position = i915_get_crtc_scanoutpos, + .dumb_create = i915_gem_dumb_create, .dumb_map_offset = i915_gem_mmap_gtt, .ioctls = i915_ioctls, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bdd596604e93..2e13ecc9cbb6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -72,11 +72,12 @@ #include "gt/intel_lrc.h" #include "gt/intel_engine.h" +#include "gt/intel_gt_types.h" #include "gt/intel_workarounds.h" +#include "gt/uc/intel_uc.h" #include "intel_device_info.h" #include "intel_runtime_pm.h" -#include "intel_uc.h" #include "intel_uncore.h" #include "intel_wakeref.h" #include "intel_wopcm.h" @@ -88,7 +89,7 @@ #include "i915_gpu_error.h" #include "i915_request.h" #include "i915_scheduler.h" -#include "i915_timeline.h" +#include "gt/intel_timeline.h" #include "i915_vma.h" #include "intel_gvt.h" @@ -98,8 +99,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20190619" -#define DRIVER_TIMESTAMP 1560947544 +#define DRIVER_DATE "20190730" +#define DRIVER_TIMESTAMP 1564512624 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions @@ -121,20 +122,20 @@ #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) -bool __i915_inject_load_failure(const char *func, int line); -#define i915_inject_load_failure() \ - __i915_inject_load_failure(__func__, __LINE__) +bool __i915_inject_probe_failure(const char *func, int line); +#define i915_inject_probe_failure() \ + __i915_inject_probe_failure(__func__, __LINE__) bool i915_error_injected(void); #else -#define i915_inject_load_failure() false +#define i915_inject_probe_failure() false #define i915_error_injected() false #endif -#define i915_load_error(i915, fmt, ...) \ +#define i915_probe_error(i915, fmt, ...) \ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ fmt, ##__VA_ARGS__) @@ -162,7 +163,7 @@ enum hpd_pin { #define HPD_STORM_DEFAULT_THRESHOLD 50 struct i915_hotplug { - struct work_struct hotplug_work; + struct delayed_work hotplug_work; struct { unsigned long last_jiffies; @@ -174,6 +175,7 @@ struct i915_hotplug { } state; } stats[HPD_NUM_PINS]; u32 event_bits; + u32 retry_bits; struct delayed_work reenable_work; u32 long_port_mask; @@ -286,14 +288,14 @@ struct drm_i915_display_funcs { enum pipe pipe); int (*get_fifo_size)(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane); - int (*compute_pipe_wm)(struct intel_crtc_state *cstate); - int (*compute_intermediate_wm)(struct intel_crtc_state *newstate); + int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state); + int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state); void (*initial_watermarks)(struct intel_atomic_state *state, - struct intel_crtc_state *cstate); + struct intel_crtc_state *crtc_state); void (*atomic_update_watermarks)(struct intel_atomic_state *state, - struct intel_crtc_state *cstate); + struct intel_crtc_state *crtc_state); void (*optimize_watermarks)(struct intel_atomic_state *state, - struct intel_crtc_state *cstate); + struct intel_crtc_state *crtc_state); int (*compute_global_watermarks)(struct intel_atomic_state *state); void (*update_wm)(struct intel_crtc *crtc); int (*modeset_calc_cdclk)(struct intel_atomic_state *state); @@ -306,10 +308,10 @@ struct drm_i915_display_funcs { int (*crtc_compute_clock)(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); void (*crtc_enable)(struct intel_crtc_state *pipe_config, - struct drm_atomic_state *old_state); + struct intel_atomic_state *old_state); void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, - struct drm_atomic_state *old_state); - void (*update_crtcs)(struct drm_atomic_state *state); + struct intel_atomic_state *old_state); + void (*update_crtcs)(struct intel_atomic_state *state); void (*audio_codec_enable)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); @@ -535,6 +537,7 @@ enum intel_pch { PCH_CNP, /* Cannon/Comet Lake PCH */ PCH_ICP, /* Ice Lake PCH */ PCH_MCC, /* Mule Creek Canyon PCH */ + PCH_TGP, /* Tiger Lake PCH */ }; #define QUIRK_LVDS_SSC_DISABLE (1<<1) @@ -781,9 +784,6 @@ struct i915_gem_mm { */ struct vfsmount *gemfs; - /** PPGTT used for aliasing the PPGTT with the GTT */ - struct i915_ppgtt *aliasing_ppgtt; - struct notifier_block oom_notifier; struct notifier_block vmap_notifier; struct shrinker shrinker; @@ -1347,9 +1347,6 @@ struct drm_i915_private { struct intel_wopcm wopcm; - struct intel_huc huc; - struct intel_guc guc; - struct intel_csr csr; struct intel_gmbus gmbus[GMBUS_NUM_PINS]; @@ -1377,8 +1374,6 @@ struct drm_i915_private { struct intel_engine_cs *engine[I915_NUM_ENGINES]; /* Context used internally to idle the GPU and setup initial state */ struct i915_gem_context *kernel_context; - /* Context only to be used for injecting preemption commands */ - struct i915_gem_context *preempt_context; struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] [MAX_ENGINE_INSTANCE + 1]; @@ -1402,10 +1397,7 @@ struct drm_i915_private { u32 de_irq_mask[I915_MAX_PIPES]; }; u32 gt_irq_mask; - u32 pm_imr; - u32 pm_ier; u32 pm_rps_events; - u32 pm_guc_events; u32 pipestat_irq_mask[I915_MAX_PIPES]; struct i915_hotplug hotplug; @@ -1488,8 +1480,6 @@ struct drm_i915_private { DECLARE_HASHTABLE(mm_structs, 7); struct mutex mm_lock; - struct intel_ppat ppat; - /* Kernel Modesetting */ struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; @@ -1645,7 +1635,7 @@ struct drm_i915_private { /* * Should be held around atomic WM register writing; also * protects * intel_crtc->wm.active and - * cstate->wm.need_postvbl_update. + * crtc_state->wm.need_postvbl_update. */ struct mutex wm_mutex; @@ -1674,8 +1664,9 @@ struct drm_i915_private { } dram_info; struct intel_bw_info { - int num_planes; - int deratedbw[3]; + unsigned int deratedbw[3]; /* for each QGV point */ + u8 num_qgv_points; + u8 num_planes; } max_bw[6]; struct drm_private_obj bw_obj; @@ -1824,38 +1815,7 @@ struct drm_i915_private { } perf; /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ - struct { - struct i915_gt_timelines { - struct mutex mutex; /* protects list, tainted by GPU */ - struct list_head active_list; - - /* Pack multiple timelines' seqnos into the same page */ - spinlock_t hwsp_lock; - struct list_head hwsp_free_list; - } timelines; - - struct list_head active_rings; - - struct intel_wakeref wakeref; - - struct list_head closed_vma; - spinlock_t closed_lock; /* guards the list of closed_vma */ - - /** - * Is the GPU currently considered idle, or busy executing - * userspace requests? Whilst idle, we allow runtime power - * management to power down the hardware and display clocks. - * In order to reduce the effect on performance, there - * is a slight delay before we do so. - */ - intel_wakeref_t awake; - - struct blocking_notifier_head pm_notifications; - - ktime_t last_init_time; - - struct i915_vma *scratch; - } gt; + struct intel_gt gt; struct { struct notifier_block pm_notifier; @@ -1940,21 +1900,6 @@ static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm) return container_of(wopcm, struct drm_i915_private, wopcm); } -static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) -{ - return container_of(guc, struct drm_i915_private, guc); -} - -static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc) -{ - return container_of(huc, struct drm_i915_private, huc); -} - -static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore) -{ - return container_of(uncore, struct drm_i915_private, uncore); -} - /* Simple iterator over all initialised engines */ #define for_each_engine(engine__, dev_priv__, id__) \ for ((id__) = 0; \ @@ -2126,6 +2071,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) #define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE) +#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) \ @@ -2322,23 +2268,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) -/* - * For now, anything with a GuC requires uCode loading, and then supports - * command submission once loaded. But these are logically independent - * properties, so we have separate macros to test them. - */ -#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc) -#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) -#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv)) +#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) -/* For now, anything with a GuC has also HuC */ -#define HAS_HUC(dev_priv) (HAS_GUC(dev_priv)) -#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) - -/* Having a GuC is not the same as using a GuC */ -#define USES_GUC(dev_priv) intel_uc_is_using_guc(dev_priv) -#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(dev_priv) -#define USES_HUC(dev_priv) intel_uc_is_using_huc(dev_priv) +/* Having GuC/HuC is not the same as using GuC/HuC */ +#define USES_GUC(dev_priv) intel_uc_is_using_guc(&(dev_priv)->gt.uc) +#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(&(dev_priv)->gt.uc) +#define USES_HUC(dev_priv) intel_uc_is_using_huc(&(dev_priv)->gt.uc) #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) @@ -2358,6 +2293,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 #define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00 +#define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880 +#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ @@ -2365,6 +2302,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id) #define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC) +#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP) #define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) #define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP) #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) @@ -2425,40 +2363,18 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) #ifdef CONFIG_COMPAT -extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, - unsigned long arg); +long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #else #define i915_compat_ioctl NULL #endif extern const struct dev_pm_ops i915_pm_ops; -extern int i915_driver_load(struct pci_dev *pdev, - const struct pci_device_id *ent); -extern void i915_driver_unload(struct drm_device *dev); +int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +void i915_driver_remove(struct drm_device *dev); -extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); -extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); +void intel_engine_init_hangcheck(struct intel_engine_cs *engine); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); -u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv); - -static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) -{ - unsigned long delay; - - if (unlikely(!i915_modparams.enable_hangcheck)) - return; - - /* Don't continually defer the hangcheck so that it is always run at - * least once after work has been scheduled on any ring. Otherwise, - * we will ignore a hung ring if a second ring is kept busy. - */ - - delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); - queue_delayed_work(system_long_wq, - &dev_priv->gpu_error.hangcheck_work, delay); -} - static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) { return dev_priv->gvt; @@ -2480,18 +2396,17 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv); static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) { - if (!atomic_read(&i915->mm.free_count)) - return; - - /* A single pass should suffice to release all the freed objects (along + /* + * A single pass should suffice to release all the freed objects (along * most call paths) , but be a little more paranoid in that freeing * the objects does take a little amount of time, during which the rcu * callbacks could have added new objects into the freed list, and * armed the work again. */ - do { + while (atomic_read(&i915->mm.free_count)) { + flush_work(&i915->mm.free_work); rcu_barrier(); - } while (flush_work(&i915->mm.free_work)); + } } static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) @@ -2509,6 +2424,7 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) */ int pass = 3; do { + flush_workqueue(i915->wq); rcu_barrier(); i915_gem_drain_freed_objects(i915); } while (--pass); @@ -2522,7 +2438,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, u64 alignment, u64 flags); -int i915_gem_object_unbind(struct drm_i915_gem_object *obj); +int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + unsigned long flags); +#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); @@ -2545,36 +2463,22 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); -static inline bool __i915_wedged(struct i915_gpu_error *error) -{ - return unlikely(test_bit(I915_WEDGED, &error->flags)); -} - -static inline bool i915_reset_failed(struct drm_i915_private *i915) -{ - return __i915_wedged(&i915->gpu_error); -} - static inline u32 i915_reset_count(struct i915_gpu_error *error) { - return READ_ONCE(error->reset_count); + return atomic_read(&error->reset_count); } static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, struct intel_engine_cs *engine) { - return READ_ONCE(error->reset_engine_count[engine->id]); + return atomic_read(&error->reset_engine_count[engine->uabi_class]); } -void i915_gem_set_wedged(struct drm_i915_private *dev_priv); -bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); - void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); -void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); -void i915_gem_fini_hw(struct drm_i915_private *dev_priv); -void i915_gem_fini(struct drm_i915_private *dev_priv); +void i915_gem_driver_remove(struct drm_i915_private *dev_priv); +void i915_gem_driver_release(struct drm_i915_private *dev_priv); int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, unsigned int flags, long timeout); void i915_gem_suspend(struct drm_i915_private *dev_priv); @@ -2634,16 +2538,6 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, unsigned int flags); int i915_gem_evict_vm(struct i915_address_space *vm); -void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv); - -/* belongs in i915_gem_gtt.h */ -static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) -{ - wmb(); - if (INTEL_GEN(dev_priv) < 6) - intel_gtt_chipset_flush(); -} - /* i915_gem_stolen.c */ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, struct drm_mm_node *node, u64 size, @@ -2715,14 +2609,14 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, bool is_master); /* i915_perf.c */ -extern void i915_perf_init(struct drm_i915_private *dev_priv); -extern void i915_perf_fini(struct drm_i915_private *dev_priv); -extern void i915_perf_register(struct drm_i915_private *dev_priv); -extern void i915_perf_unregister(struct drm_i915_private *dev_priv); +void i915_perf_init(struct drm_i915_private *dev_priv); +void i915_perf_fini(struct drm_i915_private *dev_priv); +void i915_perf_register(struct drm_i915_private *dev_priv); +void i915_perf_unregister(struct drm_i915_private *dev_priv); /* i915_suspend.c */ -extern int i915_save_state(struct drm_i915_private *dev_priv); -extern int i915_restore_state(struct drm_i915_private *dev_priv); +int i915_save_state(struct drm_i915_private *dev_priv); +int i915_restore_state(struct drm_i915_private *dev_priv); /* i915_sysfs.c */ void i915_setup_sysfs(struct drm_i915_private *dev_priv); @@ -2736,23 +2630,22 @@ mkwrite_device_info(struct drm_i915_private *dev_priv) } /* modesetting */ -extern void intel_modeset_init_hw(struct drm_device *dev); -extern int intel_modeset_init(struct drm_device *dev); -extern void intel_modeset_cleanup(struct drm_device *dev); -extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, - bool state); -extern void intel_display_resume(struct drm_device *dev); -extern void i915_redisable_vga(struct drm_i915_private *dev_priv); -extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); -extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv); +void intel_modeset_init_hw(struct drm_device *dev); +int intel_modeset_init(struct drm_device *dev); +void intel_modeset_driver_remove(struct drm_device *dev); +int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state); +void intel_display_resume(struct drm_device *dev); +void i915_redisable_vga(struct drm_i915_private *dev_priv); +void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); +void intel_init_pch_refclk(struct drm_i915_private *dev_priv); int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -extern struct intel_display_error_state * +struct intel_display_error_state * intel_display_capture_error_state(struct drm_i915_private *dev_priv); -extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, - struct intel_display_error_state *error); +void intel_display_print_error_state(struct drm_i915_error_state_buf *e, + struct intel_display_error_state *error); #define __I915_REG_OP(op__, dev_priv__, ...) \ intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__) @@ -2828,11 +2721,6 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) return I915_HWS_CSB_WRITE_INDEX; } -static inline u32 i915_scratch_offset(const struct drm_i915_private *i915) -{ - return i915_ggtt_offset(i915->gt.scratch); -} - static inline enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915) { diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h index 6621595fe74c..a327094de2bd 100644 --- a/drivers/gpu/drm/i915/i915_fixed.h +++ b/drivers/gpu/drm/i915/i915_fixed.h @@ -6,6 +6,11 @@ #ifndef _I915_FIXED_H_ #define _I915_FIXED_H_ +#include <linux/bug.h> +#include <linux/kernel.h> +#include <linux/math64.h> +#include <linux/types.h> + typedef struct { u32 val; } uint_fixed_16_16_t; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 190ad54fb072..65863e955f40 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -46,10 +46,11 @@ #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_pm.h" #include "gem/i915_gemfs.h" -#include "gt/intel_engine_pm.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_mocs.h" #include "gt/intel_reset.h" +#include "gt/intel_renderstate.h" #include "gt/intel_workarounds.h" #include "i915_drv.h" @@ -101,7 +102,8 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, return 0; } -int i915_gem_object_unbind(struct drm_i915_gem_object *obj) +int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + unsigned long flags) { struct i915_vma *vma; LIST_HEAD(still_in_list); @@ -116,7 +118,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) list_move_tail(&vma->obj_link, &still_in_list); spin_unlock(&obj->vma.lock); - ret = i915_vma_unbind(vma); + ret = -EBUSY; + if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE || + !i915_vma_is_active(vma)) + ret = i915_vma_unbind(vma); spin_lock(&obj->vma.lock); } @@ -142,7 +147,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, return -EFAULT; drm_clflush_virt_range(vaddr, args->size); - i915_gem_chipset_flush(to_i915(obj->base.dev)); + intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); intel_fb_obj_flush(obj, ORIGIN_CPU); return 0; @@ -233,46 +238,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, &args->size, &args->handle); } -void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) -{ - intel_wakeref_t wakeref; - - /* - * No actual flushing is required for the GTT write domain for reads - * from the GTT domain. Writes to it "immediately" go to main memory - * as far as we know, so there's no chipset flush. It also doesn't - * land in the GPU render cache. - * - * However, we do have to enforce the order so that all writes through - * the GTT land before any writes to the device, such as updates to - * the GATT itself. - * - * We also have to wait a bit for the writes to land from the GTT. - * An uncached read (i.e. mmio) seems to be ideal for the round-trip - * timing. This issue has only been observed when switching quickly - * between GTT writes and CPU reads from inside the kernel on recent hw, - * and it appears to only affect discrete GTT blocks (i.e. on LLC - * system agents we cannot reproduce this behaviour, until Cannonlake - * that was!). - */ - - wmb(); - - if (INTEL_INFO(dev_priv)->has_coherent_ggtt) - return; - - i915_gem_chipset_flush(dev_priv); - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - struct intel_uncore *uncore = &dev_priv->uncore; - - spin_lock_irq(&uncore->lock); - intel_uncore_posting_read_fw(uncore, - RING_HEAD(RENDER_RING_BASE)); - spin_unlock_irq(&uncore->lock); - } -} - static int shmem_pread(struct page *page, int offset, int len, char __user *user_data, bool needs_clflush) @@ -431,11 +396,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, unsigned page_length = PAGE_SIZE - page_offset; page_length = remain < page_length ? remain : page_length; if (node.allocated) { - wmb(); ggtt->vm.insert_page(&ggtt->vm, i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), node.start, I915_CACHE_NONE, 0); - wmb(); } else { page_base += offset & PAGE_MASK; } @@ -455,7 +418,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, out_unpin: mutex_lock(&i915->drm.struct_mutex); if (node.allocated) { - wmb(); ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); remove_mappable_node(&node); } else { @@ -649,7 +611,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, unsigned int page_length = PAGE_SIZE - page_offset; page_length = remain < page_length ? remain : page_length; if (node.allocated) { - wmb(); /* flush the write before we modify the GGTT */ + /* flush the write before we modify the GGTT */ + intel_gt_flush_ggtt_writes(ggtt->vm.gt); ggtt->vm.insert_page(&ggtt->vm, i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), node.start, I915_CACHE_NONE, 0); @@ -678,8 +641,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, i915_gem_object_unlock_fence(obj, fence); out_unpin: mutex_lock(&i915->drm.struct_mutex); + intel_gt_flush_ggtt_writes(ggtt->vm.gt); if (node.allocated) { - wmb(); ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); remove_mappable_node(&node); } else { @@ -930,13 +893,13 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) } } -static int wait_for_engines(struct drm_i915_private *i915) +static int wait_for_engines(struct intel_gt *gt) { - if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) { - dev_err(i915->drm.dev, + if (wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT)) { + dev_err(gt->i915->drm.dev, "Failed to idle engines, declaring wedged!\n"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); return -EIO; } @@ -947,8 +910,8 @@ static long wait_for_timelines(struct drm_i915_private *i915, unsigned int flags, long timeout) { - struct i915_gt_timelines *gt = &i915->gt.timelines; - struct i915_timeline *tl; + struct intel_gt_timelines *gt = &i915->gt.timelines; + struct intel_timeline *tl; mutex_lock(>->mutex); list_for_each_entry(tl, >->active_list, link) { @@ -989,15 +952,15 @@ wait_for_timelines(struct drm_i915_private *i915, int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags, long timeout) { + /* If the device is asleep, we have no requests outstanding */ + if (!READ_ONCE(i915->gt.awake)) + return 0; + GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n", flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked", timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "", yesno(i915->gt.awake)); - /* If the device is asleep, we have no requests outstanding */ - if (!READ_ONCE(i915->gt.awake)) - return 0; - timeout = wait_for_timelines(i915, flags, timeout); if (timeout < 0) return timeout; @@ -1007,7 +970,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, lockdep_assert_held(&i915->drm.struct_mutex); - err = wait_for_engines(i915); + err = wait_for_engines(&i915->gt); if (err) return err; @@ -1185,8 +1148,8 @@ void i915_gem_sanitize(struct drm_i915_private *i915) * back to defaults, recovering from whatever wedged state we left it * in and so worth trying to use the device once more. */ - if (i915_terminally_wedged(i915)) - i915_gem_unset_wedged(i915); + if (intel_gt_is_wedged(&i915->gt)) + intel_gt_unset_wedged(&i915->gt); /* * If we inherit context state from the BIOS or earlier occupants @@ -1196,82 +1159,72 @@ void i915_gem_sanitize(struct drm_i915_private *i915) * it may impact the display and we are uncertain about the stability * of the reset, so this could be applied to even earlier gen. */ - intel_gt_sanitize(i915, false); + intel_gt_sanitize(&i915->gt, false); intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); intel_runtime_pm_put(&i915->runtime_pm, wakeref); } -void i915_gem_init_swizzling(struct drm_i915_private *dev_priv) +static void init_unused_ring(struct intel_gt *gt, u32 base) { - if (INTEL_GEN(dev_priv) < 5 || - dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) - return; - - I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | - DISP_TILE_SURFACE_SWIZZLING); - - if (IS_GEN(dev_priv, 5)) - return; - - I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); - if (IS_GEN(dev_priv, 6)) - I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); - else if (IS_GEN(dev_priv, 7)) - I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); - else if (IS_GEN(dev_priv, 8)) - I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); - else - BUG(); -} + struct intel_uncore *uncore = gt->uncore; -static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base) -{ - I915_WRITE(RING_CTL(base), 0); - I915_WRITE(RING_HEAD(base), 0); - I915_WRITE(RING_TAIL(base), 0); - I915_WRITE(RING_START(base), 0); + intel_uncore_write(uncore, RING_CTL(base), 0); + intel_uncore_write(uncore, RING_HEAD(base), 0); + intel_uncore_write(uncore, RING_TAIL(base), 0); + intel_uncore_write(uncore, RING_START(base), 0); } -static void init_unused_rings(struct drm_i915_private *dev_priv) +static void init_unused_rings(struct intel_gt *gt) { - if (IS_I830(dev_priv)) { - init_unused_ring(dev_priv, PRB1_BASE); - init_unused_ring(dev_priv, SRB0_BASE); - init_unused_ring(dev_priv, SRB1_BASE); - init_unused_ring(dev_priv, SRB2_BASE); - init_unused_ring(dev_priv, SRB3_BASE); - } else if (IS_GEN(dev_priv, 2)) { - init_unused_ring(dev_priv, SRB0_BASE); - init_unused_ring(dev_priv, SRB1_BASE); - } else if (IS_GEN(dev_priv, 3)) { - init_unused_ring(dev_priv, PRB1_BASE); - init_unused_ring(dev_priv, PRB2_BASE); + struct drm_i915_private *i915 = gt->i915; + + if (IS_I830(i915)) { + init_unused_ring(gt, PRB1_BASE); + init_unused_ring(gt, SRB0_BASE); + init_unused_ring(gt, SRB1_BASE); + init_unused_ring(gt, SRB2_BASE); + init_unused_ring(gt, SRB3_BASE); + } else if (IS_GEN(i915, 2)) { + init_unused_ring(gt, SRB0_BASE); + init_unused_ring(gt, SRB1_BASE); + } else if (IS_GEN(i915, 3)) { + init_unused_ring(gt, PRB1_BASE); + init_unused_ring(gt, PRB2_BASE); } } -int i915_gem_init_hw(struct drm_i915_private *dev_priv) +int i915_gem_init_hw(struct drm_i915_private *i915) { + struct intel_uncore *uncore = &i915->uncore; + struct intel_gt *gt = &i915->gt; int ret; - dev_priv->gt.last_init_time = ktime_get(); + BUG_ON(!i915->kernel_context); + ret = intel_gt_terminally_wedged(gt); + if (ret) + return ret; + + gt->last_init_time = ktime_get(); /* Double layer security blanket, see i915_gem_init() */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9) - I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); + if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9) + intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf)); - if (IS_HASWELL(dev_priv)) - I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? - LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); + if (IS_HASWELL(i915)) + intel_uncore_write(uncore, + MI_PREDICATE_RESULT_2, + IS_HSW_GT3(i915) ? + LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); /* Apply the GT workarounds... */ - intel_gt_apply_workarounds(dev_priv); + intel_gt_apply_workarounds(gt); /* ...and determine whether they are sticking. */ - intel_gt_verify_workarounds(dev_priv, "init"); + intel_gt_verify_workarounds(gt, "init"); - i915_gem_init_swizzling(dev_priv); + intel_gt_init_swizzling(gt); /* * At least 830 can leave some of the unused rings @@ -1279,49 +1232,33 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) * will prevent c3 entry. Makes sure all unused rings * are totally idle. */ - init_unused_rings(dev_priv); + init_unused_rings(gt); - BUG_ON(!dev_priv->kernel_context); - ret = i915_terminally_wedged(dev_priv); - if (ret) - goto out; - - ret = i915_ppgtt_init_hw(dev_priv); + ret = i915_ppgtt_init_hw(gt); if (ret) { DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); goto out; } - ret = intel_wopcm_init_hw(&dev_priv->wopcm); + ret = intel_wopcm_init_hw(&i915->wopcm, gt); if (ret) { DRM_ERROR("Enabling WOPCM failed (%d)\n", ret); goto out; } /* We can't enable contexts until all firmware is loaded */ - ret = intel_uc_init_hw(dev_priv); + ret = intel_uc_init_hw(&i915->gt.uc); if (ret) { DRM_ERROR("Enabling uc failed (%d)\n", ret); goto out; } - intel_mocs_init_l3cc_table(dev_priv); - - /* Only when the HW is re-initialised, can we replay the requests */ - ret = intel_engines_resume(dev_priv); - if (ret) - goto cleanup_uc; - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); + intel_mocs_init_l3cc_table(gt); - intel_engines_set_scheduler_caps(dev_priv); - return 0; + intel_engines_set_scheduler_caps(i915); -cleanup_uc: - intel_uc_fini_hw(dev_priv); out: - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); return ret; } @@ -1358,10 +1295,24 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) goto err_active; } - err = 0; - if (rq->engine->init_context) - err = rq->engine->init_context(rq); + err = intel_engine_emit_ctx_wa(rq); + if (err) + goto err_rq; + + /* + * Failing to program the MOCS is non-fatal.The system will not + * run at peak performance. So warn the user and carry on. + */ + err = intel_mocs_emit(rq); + if (err) + dev_notice(i915->drm.dev, + "Failed to program MOCS registers; expect performance issues.\n"); + + err = intel_renderstate_emit(rq); + if (err) + goto err_rq; +err_rq: i915_request_add(rq); if (err) goto err_active; @@ -1446,46 +1397,19 @@ err_active: * and ready to be torn-down. The quickest way we can accomplish * this is by declaring ourselves wedged. */ - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); goto out_ctx; } static int i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size) { - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - int ret; - - obj = i915_gem_object_create_stolen(i915, size); - if (!obj) - obj = i915_gem_object_create_internal(i915, size); - if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate scratch page\n"); - return PTR_ERR(obj); - } - - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err_unref; - } - - ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); - if (ret) - goto err_unref; - - i915->gt.scratch = vma; - return 0; - -err_unref: - i915_gem_object_put(obj); - return ret; + return intel_gt_init_scratch(&i915->gt, size); } static void i915_gem_fini_scratch(struct drm_i915_private *i915) { - i915_vma_unpin_and_release(&i915->gt.scratch, 0); + intel_gt_fini_scratch(&i915->gt); } static int intel_engines_verify_workarounds(struct drm_i915_private *i915) @@ -1516,19 +1440,17 @@ int i915_gem_init(struct drm_i915_private *dev_priv) dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1); - i915_timelines_init(dev_priv); + intel_timelines_init(dev_priv); ret = i915_gem_init_userptr(dev_priv); if (ret) return ret; - ret = intel_uc_init_misc(dev_priv); - if (ret) - return ret; + intel_uc_fetch_firmwares(&dev_priv->gt.uc); ret = intel_wopcm_init(&dev_priv->wopcm); if (ret) - goto err_uc_misc; + goto err_uc_fw; /* This is just a security blanket to placate dragons. * On some systems, we very sporadically observe that the first TLBs @@ -1539,7 +1461,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->drm.struct_mutex); intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - ret = i915_gem_init_ggtt(dev_priv); + ret = i915_init_ggtt(dev_priv); if (ret) { GEM_BUG_ON(ret == -EIO); goto err_unlock; @@ -1572,7 +1494,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) intel_init_gt_powersave(dev_priv); - ret = intel_uc_init(dev_priv); + ret = intel_uc_init(&dev_priv->gt.uc); if (ret) goto err_pm; @@ -1580,6 +1502,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) goto err_uc_init; + /* Only when the HW is re-initialised, can we replay the requests */ + ret = intel_gt_resume(&dev_priv->gt); + if (ret) + goto err_init_hw; + /* * Despite its name intel_init_clock_gating applies both display * clock gating workarounds; GT mmio workarounds and the occasional @@ -1593,20 +1520,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ret = intel_engines_verify_workarounds(dev_priv); if (ret) - goto err_init_hw; + goto err_gt; ret = __intel_engines_record_defaults(dev_priv); if (ret) - goto err_init_hw; + goto err_gt; - if (i915_inject_load_failure()) { + if (i915_inject_probe_failure()) { ret = -ENODEV; - goto err_init_hw; + goto err_gt; } - if (i915_inject_load_failure()) { + if (i915_inject_probe_failure()) { ret = -EIO; - goto err_init_hw; + goto err_gt; } intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); @@ -1620,19 +1547,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv) * HW as irrevisibly wedged, but keep enough state around that the * driver doesn't explode during runtime. */ -err_init_hw: +err_gt: mutex_unlock(&dev_priv->drm.struct_mutex); - i915_gem_set_wedged(dev_priv); + intel_gt_set_wedged(&dev_priv->gt); i915_gem_suspend(dev_priv); i915_gem_suspend_late(dev_priv); i915_gem_drain_workqueue(dev_priv); mutex_lock(&dev_priv->drm.struct_mutex); - intel_uc_fini_hw(dev_priv); +err_init_hw: + intel_uc_fini_hw(&dev_priv->gt.uc); err_uc_init: - intel_uc_fini(dev_priv); + intel_uc_fini(&dev_priv->gt.uc); err_pm: if (ret != -EIO) { intel_cleanup_gt_powersave(dev_priv); @@ -1648,12 +1576,12 @@ err_unlock: intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); mutex_unlock(&dev_priv->drm.struct_mutex); -err_uc_misc: - intel_uc_fini_misc(dev_priv); +err_uc_fw: + intel_uc_cleanup_firmwares(&dev_priv->gt.uc); if (ret != -EIO) { i915_gem_cleanup_userptr(dev_priv); - i915_timelines_fini(dev_priv); + intel_timelines_fini(dev_priv); } if (ret == -EIO) { @@ -1664,10 +1592,10 @@ err_uc_misc: * wedged. But we only want to do this where the GPU is angry, * for all other failure, such as an allocation failure, bail. */ - if (!i915_reset_failed(dev_priv)) { - i915_load_error(dev_priv, - "Failed to initialize GPU, declaring it wedged!\n"); - i915_gem_set_wedged(dev_priv); + if (!intel_gt_is_wedged(&dev_priv->gt)) { + i915_probe_error(dev_priv, + "Failed to initialize GPU, declaring it wedged!\n"); + intel_gt_set_wedged(&dev_priv->gt); } /* Minimal basic recovery for KMS */ @@ -1683,7 +1611,7 @@ err_uc_misc: return ret; } -void i915_gem_fini_hw(struct drm_i915_private *dev_priv) +void i915_gem_driver_remove(struct drm_i915_private *dev_priv) { GEM_BUG_ON(dev_priv->gt.awake); @@ -1696,14 +1624,14 @@ void i915_gem_fini_hw(struct drm_i915_private *dev_priv) i915_gem_drain_workqueue(dev_priv); mutex_lock(&dev_priv->drm.struct_mutex); - intel_uc_fini_hw(dev_priv); - intel_uc_fini(dev_priv); + intel_uc_fini_hw(&dev_priv->gt.uc); + intel_uc_fini(&dev_priv->gt.uc); mutex_unlock(&dev_priv->drm.struct_mutex); i915_gem_drain_freed_objects(dev_priv); } -void i915_gem_fini(struct drm_i915_private *dev_priv) +void i915_gem_driver_release(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->drm.struct_mutex); intel_engines_cleanup(dev_priv); @@ -1715,9 +1643,9 @@ void i915_gem_fini(struct drm_i915_private *dev_priv) intel_cleanup_gt_powersave(dev_priv); - intel_uc_fini_misc(dev_priv); + intel_uc_cleanup_firmwares(&dev_priv->gt.uc); i915_gem_cleanup_userptr(dev_priv); - i915_timelines_fini(dev_priv); + intel_timelines_fini(dev_priv); i915_gem_drain_freed_objects(dev_priv); @@ -1746,20 +1674,9 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) { int err; - intel_gt_pm_init(dev_priv); - - INIT_LIST_HEAD(&dev_priv->gt.active_rings); - INIT_LIST_HEAD(&dev_priv->gt.closed_vma); - spin_lock_init(&dev_priv->gt.closed_lock); - i915_gem_init__mm(dev_priv); i915_gem_init__pm(dev_priv); - init_waitqueue_head(&dev_priv->gpu_error.wait_queue); - init_waitqueue_head(&dev_priv->gpu_error.reset_queue); - mutex_init(&dev_priv->gpu_error.wedge_mutex); - init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); - atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); spin_lock_init(&dev_priv->fb_tracking.lock); @@ -1778,7 +1695,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); WARN_ON(dev_priv->mm.shrink_count); - cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); + intel_gt_cleanup_early(&dev_priv->gt); i915_gemfs_fini(dev_priv); } diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c index 25a3e4d09a2f..b17f23991253 100644 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c @@ -94,34 +94,26 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, list = &pool->cache_list[n]; list_for_each_entry(obj, list, batch_pool_link) { + struct reservation_object *resv = obj->base.resv; + /* The batches are strictly LRU ordered */ - if (i915_gem_object_is_active(obj)) { - struct reservation_object *resv = obj->base.resv; - - if (!reservation_object_test_signaled_rcu(resv, true)) - break; - - i915_retire_requests(pool->engine->i915); - GEM_BUG_ON(i915_gem_object_is_active(obj)); - - /* - * The object is now idle, clear the array of shared - * fences before we add a new request. Although, we - * remain on the same engine, we may be on a different - * timeline and so may continually grow the array, - * trapping a reference to all the old fences, rather - * than replace the existing fence. - */ - if (rcu_access_pointer(resv->fence)) { - reservation_object_lock(resv, NULL); - reservation_object_add_excl_fence(resv, NULL); - reservation_object_unlock(resv); - } + if (!reservation_object_test_signaled_rcu(resv, true)) + break; + + /* + * The object is now idle, clear the array of shared + * fences before we add a new request. Although, we + * remain on the same engine, we may be on a different + * timeline and so may continually grow the array, + * trapping a reference to all the old fences, rather + * than replace the existing fence. + */ + if (rcu_access_pointer(resv->fence)) { + reservation_object_lock(resv, NULL); + reservation_object_add_excl_fence(resv, NULL); + reservation_object_unlock(resv); } - GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv, - true)); - if (obj->base.size >= size) goto found; } diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 0bf53ac1c835..bcac359ec661 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -834,3 +834,35 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt) i915_gem_restore_fences(i915); } + +void intel_gt_init_swizzling(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + + if (INTEL_GEN(i915) < 5 || + i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) + return; + + intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING); + + if (IS_GEN(i915, 5)) + return; + + intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL); + + if (IS_GEN(i915, 6)) + intel_uncore_write(uncore, + ARB_MODE, + _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); + else if (IS_GEN(i915, 7)) + intel_uncore_write(uncore, + ARB_MODE, + _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); + else if (IS_GEN(i915, 8)) + intel_uncore_write(uncore, + GAMTARBMODE, + _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); + else + MISSING_CASE(INTEL_GEN(i915)); +} diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h index d2da98828179..37e4f104f7c0 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h @@ -32,6 +32,7 @@ struct drm_i915_gem_object; struct drm_i915_private; struct i915_ggtt; struct i915_vma; +struct intel_gt; struct sg_table; #define I965_FENCE_PAGE 4096UL @@ -66,4 +67,6 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, void i915_ggtt_init_fences(struct i915_ggtt *ggtt); +void intel_gt_init_swizzling(struct intel_gt *gt); + #endif diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 8ab820145ea6..c3028722d4e3 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -36,6 +36,7 @@ #include <drm/i915_drm.h> #include "display/intel_frontbuffer.h" +#include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_scatterlist.h" @@ -45,6 +46,12 @@ #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) +#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT) +#define DBG(...) trace_printk(__VA_ARGS__) +#else +#define DBG(...) +#endif + /** * DOC: Global GTT views * @@ -106,12 +113,14 @@ * */ +#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt) + static int i915_get_ggtt_vma_pages(struct i915_vma *vma); -static void gen6_ggtt_invalidate(struct drm_i915_private *i915) +static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) { - struct intel_uncore *uncore = &i915->uncore; + struct intel_uncore *uncore = &ggtt->vm.i915->uncore; /* * Note that as an uncached mmio write, this will flush the @@ -120,24 +129,19 @@ static void gen6_ggtt_invalidate(struct drm_i915_private *i915) intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); } -static void guc_ggtt_invalidate(struct drm_i915_private *i915) +static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) { - struct intel_uncore *uncore = &i915->uncore; + struct intel_uncore *uncore = &ggtt->vm.i915->uncore; - gen6_ggtt_invalidate(i915); + gen6_ggtt_invalidate(ggtt); intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); } -static void gmch_ggtt_invalidate(struct drm_i915_private *i915) +static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) { intel_gtt_chipset_flush(); } -static inline void i915_ggtt_invalidate(struct drm_i915_private *i915) -{ - i915->ggtt.invalidate(i915); -} - static int ppgtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 unused) @@ -215,10 +219,10 @@ static u64 gen8_pte_encode(dma_addr_t addr, return pte; } -static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, - const enum i915_cache_level level) +static u64 gen8_pde_encode(const dma_addr_t addr, + const enum i915_cache_level level) { - gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; + u64 pde = _PAGE_PRESENT | _PAGE_RW; pde |= addr; if (level != I915_CACHE_NONE) pde |= PPAT_CACHED_PDE; @@ -227,9 +231,6 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, return pde; } -#define gen8_pdpe_encode gen8_pde_encode -#define gen8_pml4e_encode gen8_pde_encode - static u64 snb_pte_encode(dma_addr_t addr, enum i915_cache_level level, u32 flags) @@ -482,9 +483,69 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page) spin_unlock(&vm->free_pages.lock); } +static void i915_address_space_fini(struct i915_address_space *vm) +{ + spin_lock(&vm->free_pages.lock); + if (pagevec_count(&vm->free_pages.pvec)) + vm_free_pages_release(vm, true); + GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); + spin_unlock(&vm->free_pages.lock); + + drm_mm_takedown(&vm->mm); + + mutex_destroy(&vm->mutex); +} + +static void ppgtt_destroy_vma(struct i915_address_space *vm) +{ + struct list_head *phases[] = { + &vm->bound_list, + &vm->unbound_list, + NULL, + }, **phase; + + mutex_lock(&vm->i915->drm.struct_mutex); + for (phase = phases; *phase; phase++) { + struct i915_vma *vma, *vn; + + list_for_each_entry_safe(vma, vn, *phase, vm_link) + i915_vma_destroy(vma); + } + mutex_unlock(&vm->i915->drm.struct_mutex); +} + +static void __i915_vm_release(struct work_struct *work) +{ + struct i915_address_space *vm = + container_of(work, struct i915_address_space, rcu.work); + + ppgtt_destroy_vma(vm); + + GEM_BUG_ON(!list_empty(&vm->bound_list)); + GEM_BUG_ON(!list_empty(&vm->unbound_list)); + + vm->cleanup(vm); + i915_address_space_fini(vm); + + kfree(vm); +} + +void i915_vm_release(struct kref *kref) +{ + struct i915_address_space *vm = + container_of(kref, struct i915_address_space, ref); + + GEM_BUG_ON(i915_is_ggtt(vm)); + trace_i915_ppgtt_release(vm); + + vm->closed = true; + queue_rcu_work(vm->i915->wq, &vm->rcu); +} + static void i915_address_space_init(struct i915_address_space *vm, int subclass) { kref_init(&vm->ref); + INIT_RCU_WORK(&vm->rcu, __i915_vm_release); /* * The vm->mutex must be reclaim safe (for use in the shrinker). @@ -505,19 +566,6 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass) INIT_LIST_HEAD(&vm->bound_list); } -static void i915_address_space_fini(struct i915_address_space *vm) -{ - spin_lock(&vm->free_pages.lock); - if (pagevec_count(&vm->free_pages.pvec)) - vm_free_pages_release(vm, true); - GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); - spin_unlock(&vm->free_pages.lock); - - drm_mm_takedown(&vm->mm); - - mutex_destroy(&vm->mutex); -} - static int __setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p, gfp_t gfp) @@ -554,28 +602,17 @@ static void cleanup_page_dma(struct i915_address_space *vm, #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) -#define setup_px(vm, px) setup_page_dma((vm), px_base(px)) -#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px)) -#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v)) -#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v)) - -static void fill_page_dma(struct i915_address_space *vm, - struct i915_page_dma *p, - const u64 val) +static void +fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count) { - u64 * const vaddr = kmap_atomic(p->page); - - memset64(vaddr, val, PAGE_SIZE / sizeof(val)); - - kunmap_atomic(vaddr); + kunmap_atomic(memset64(kmap_atomic(p->page), val, count)); } -static void fill_page_dma_32(struct i915_address_space *vm, - struct i915_page_dma *p, - const u32 v) -{ - fill_page_dma(vm, p, (u64)v << 32 | v); -} +#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64)) +#define fill32_px(px, v) do { \ + u64 v__ = lower_32_bits(v); \ + fill_px((px), v__ << 32 | v__); \ +} while (0) static int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) @@ -602,7 +639,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL; do { - int order = get_order(size); + unsigned int order = get_order(size); struct page *page; dma_addr_t addr; @@ -621,8 +658,8 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) if (unlikely(!IS_ALIGNED(addr, size))) goto unmap_page; - vm->scratch_page.page = page; - vm->scratch_page.daddr = addr; + vm->scratch[0].base.page = page; + vm->scratch[0].base.daddr = addr; vm->scratch_order = order; return 0; @@ -641,14 +678,30 @@ skip: static void cleanup_scratch_page(struct i915_address_space *vm) { - struct i915_page_dma *p = &vm->scratch_page; - int order = vm->scratch_order; + struct i915_page_dma *p = px_base(&vm->scratch[0]); + unsigned int order = vm->scratch_order; dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT, PCI_DMA_BIDIRECTIONAL); __free_pages(p->page, order); } +static void free_scratch(struct i915_address_space *vm) +{ + int i; + + if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */ + return; + + for (i = 1; i <= vm->top; i++) { + if (!px_dma(&vm->scratch[i])) + break; + cleanup_page_dma(vm, px_base(&vm->scratch[i])); + } + + cleanup_scratch_page(vm); +} + static struct i915_page_table *alloc_pt(struct i915_address_space *vm) { struct i915_page_table *pt; @@ -657,50 +710,24 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm) if (unlikely(!pt)) return ERR_PTR(-ENOMEM); - if (unlikely(setup_px(vm, pt))) { + if (unlikely(setup_page_dma(vm, &pt->base))) { kfree(pt); return ERR_PTR(-ENOMEM); } atomic_set(&pt->used, 0); - return pt; } -static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt) -{ - cleanup_px(vm, pt); - kfree(pt); -} - -static void gen8_initialize_pt(struct i915_address_space *vm, - struct i915_page_table *pt) -{ - fill_px(vm, pt, vm->scratch_pte); -} - -static void gen6_initialize_pt(struct i915_address_space *vm, - struct i915_page_table *pt) -{ - fill32_px(vm, pt, vm->scratch_pte); -} - -static struct i915_page_directory *__alloc_pd(void) +static struct i915_page_directory *__alloc_pd(size_t sz) { struct i915_page_directory *pd; - pd = kmalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL); - + pd = kzalloc(sz, I915_GFP_ALLOW_FAIL); if (unlikely(!pd)) return NULL; - memset(&pd->base, 0, sizeof(pd->base)); - atomic_set(&pd->used, 0); spin_lock_init(&pd->lock); - - /* for safety */ - pd->entry[0] = NULL; - return pd; } @@ -708,11 +735,11 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) { struct i915_page_directory *pd; - pd = __alloc_pd(); + pd = __alloc_pd(sizeof(*pd)); if (unlikely(!pd)) return ERR_PTR(-ENOMEM); - if (unlikely(setup_px(vm, pd))) { + if (unlikely(setup_page_dma(vm, px_base(pd)))) { kfree(pd); return ERR_PTR(-ENOMEM); } @@ -720,36 +747,72 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) return pd; } -static inline bool pd_has_phys_page(const struct i915_page_directory * const pd) +static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd) { - return pd->base.page; + cleanup_page_dma(vm, pd); + kfree(pd); } -static void free_pd(struct i915_address_space *vm, - struct i915_page_directory *pd) +#define free_px(vm, px) free_pd(vm, px_base(px)) + +static inline void +write_dma_entry(struct i915_page_dma * const pdma, + const unsigned short idx, + const u64 encoded_entry) { - if (likely(pd_has_phys_page(pd))) - cleanup_px(vm, pd); + u64 * const vaddr = kmap_atomic(pdma->page); - kfree(pd); + vaddr[idx] = encoded_entry; + kunmap_atomic(vaddr); } -static void init_pd_with_page(struct i915_address_space *vm, - struct i915_page_directory * const pd, - struct i915_page_table *pt) +static inline void +__set_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + struct i915_page_dma * const to, + u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) +{ + GEM_BUG_ON(atomic_read(px_used(pd)) > ARRAY_SIZE(pd->entry)); + + atomic_inc(px_used(pd)); + pd->entry[idx] = to; + write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC)); +} + +#define set_pd_entry(pd, idx, to) \ + __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode) + +static inline void +clear_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + const struct i915_page_scratch * const scratch) { - fill_px(vm, pd, gen8_pde_encode(px_dma(pt), I915_CACHE_LLC)); - memset_p(pd->entry, pt, 512); + GEM_BUG_ON(atomic_read(px_used(pd)) == 0); + + write_dma_entry(px_base(pd), idx, scratch->encode); + pd->entry[idx] = NULL; + atomic_dec(px_used(pd)); } -static void init_pd(struct i915_address_space *vm, - struct i915_page_directory * const pd, - struct i915_page_directory * const to) +static bool +release_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + struct i915_page_table * const pt, + const struct i915_page_scratch * const scratch) { - GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd)); + bool free = false; + + if (atomic_add_unless(&pt->used, -1, 1)) + return false; + + spin_lock(&pd->lock); + if (atomic_dec_and_test(&pt->used)) { + clear_pd_entry(pd, idx, scratch); + free = true; + } + spin_unlock(&pd->lock); - fill_px(vm, pd, gen8_pdpe_encode(px_dma(to), I915_CACHE_LLC)); - memset_p(pd->entry, to, 512); + return free; } /* @@ -763,165 +826,305 @@ static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt) ppgtt->pd_dirty_engines = ALL_ENGINES; } -/* Removes entries from a single page table, releasing it if it's empty. - * Caller can use the return value to update higher-level entries. - */ -static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm, - struct i915_page_table *pt, - u64 start, u64 length) +static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) { - unsigned int num_entries = gen8_pte_count(start, length); - gen8_pte_t *vaddr; + struct i915_address_space *vm = &ppgtt->vm; + struct drm_i915_private *dev_priv = vm->i915; + enum vgt_g2v_type msg; + int i; - vaddr = kmap_atomic_px(pt); - memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries); - kunmap_atomic(vaddr); + if (create) + atomic_inc(px_used(ppgtt->pd)); /* never remove */ + else + atomic_dec(px_used(ppgtt->pd)); + + if (i915_vm_is_4lvl(vm)) { + const u64 daddr = px_dma(ppgtt->pd); - GEM_BUG_ON(num_entries > atomic_read(&pt->used)); - return !atomic_sub_return(num_entries, &pt->used); + I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); + I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); + + msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : + VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); + } else { + for (i = 0; i < GEN8_3LVL_PDPES; i++) { + const u64 daddr = i915_page_dir_dma_addr(ppgtt, i); + + I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); + I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); + } + + msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : + VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); + } + + I915_WRITE(vgtif_reg(g2v_notify), msg); + + return 0; } -static void gen8_ppgtt_set_pde(struct i915_address_space *vm, - struct i915_page_directory *pd, - struct i915_page_table *pt, - unsigned int pde) +/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */ +#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */ +#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE)) +#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64)) +#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES)) +#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl)) +#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl)) +#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl)) + +static inline unsigned int +gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx) { - gen8_pde_t *vaddr; + const int shift = gen8_pd_shift(lvl); + const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); - vaddr = kmap_atomic_px(pd); - vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC); - kunmap_atomic(vaddr); + GEM_BUG_ON(start >= end); + end += ~mask >> gen8_pd_shift(1); + + *idx = i915_pde_index(start, shift); + if ((start ^ end) & mask) + return GEN8_PDES - *idx; + else + return i915_pde_index(end, shift) - *idx; } -static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, - struct i915_page_directory *pd, - u64 start, u64 length) +static inline bool gen8_pd_contains(u64 start, u64 end, int lvl) { - struct i915_page_table *pt; - u32 pde; + const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); - gen8_for_each_pde(pt, pd, start, length, pde) { - bool free = false; + GEM_BUG_ON(start >= end); + return (start ^ end) & mask && (start & ~mask) == 0; +} - GEM_BUG_ON(pt == vm->scratch_pt); +static inline unsigned int gen8_pt_count(u64 start, u64 end) +{ + GEM_BUG_ON(start >= end); + if ((start ^ end) >> gen8_pd_shift(1)) + return GEN8_PDES - (start & (GEN8_PDES - 1)); + else + return end - start; +} - if (!gen8_ppgtt_clear_pt(vm, pt, start, length)) - continue; +static inline unsigned int gen8_pd_top_count(const struct i915_address_space *vm) +{ + unsigned int shift = __gen8_pte_shift(vm->top); + return (vm->total + (1ull << shift) - 1) >> shift; +} - spin_lock(&pd->lock); - if (!atomic_read(&pt->used)) { - gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde); - pd->entry[pde] = vm->scratch_pt; +static void __gen8_ppgtt_cleanup(struct i915_address_space *vm, + struct i915_page_directory *pd, + int count, int lvl) +{ + if (lvl) { + void **pde = pd->entry; - GEM_BUG_ON(!atomic_read(&pd->used)); - atomic_dec(&pd->used); - free = true; - } - spin_unlock(&pd->lock); - if (free) - free_pt(vm, pt); + do { + if (!*pde) + continue; + + __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1); + } while (pde++, --count); } - return !atomic_read(&pd->used); + free_px(vm, pd); } -static void gen8_ppgtt_set_pdpe(struct i915_page_directory *pdp, - struct i915_page_directory *pd, - unsigned int pdpe) +static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { - gen8_ppgtt_pdpe_t *vaddr; + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - if (!pd_has_phys_page(pdp)) - return; + if (intel_vgpu_active(vm->i915)) + gen8_ppgtt_notify_vgt(ppgtt, false); - vaddr = kmap_atomic_px(pdp); - vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC); - kunmap_atomic(vaddr); + __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top); + free_scratch(vm); } -/* Removes entries from a single page dir pointer, releasing it if it's empty. - * Caller can use the return value to update higher-level entries - */ -static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, - struct i915_page_directory * const pdp, - u64 start, u64 length) +static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm, + struct i915_page_directory * const pd, + u64 start, const u64 end, int lvl) { - struct i915_page_directory *pd; - unsigned int pdpe; + const struct i915_page_scratch * const scratch = &vm->scratch[lvl]; + unsigned int idx, len; - gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - bool free = false; + len = gen8_pd_range(start, end, lvl--, &idx); + DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d}\n", + __func__, vm, lvl + 1, start, end, + idx, len, atomic_read(px_used(pd))); + GEM_BUG_ON(!len || len >= atomic_read(px_used(pd))); - GEM_BUG_ON(pd == vm->scratch_pd); - - if (!gen8_ppgtt_clear_pd(vm, pd, start, length)) + do { + struct i915_page_table *pt = pd->entry[idx]; + + if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) && + gen8_pd_contains(start, end, lvl)) { + DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n", + __func__, vm, lvl + 1, idx, start, end); + clear_pd_entry(pd, idx, scratch); + __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl); + start += (u64)I915_PDES << gen8_pd_shift(lvl); continue; + } + + if (lvl) { + start = __gen8_ppgtt_clear(vm, as_pd(pt), + start, end, lvl); + } else { + unsigned int count; + u64 *vaddr; - spin_lock(&pdp->lock); - if (!atomic_read(&pd->used)) { - gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); - pdp->entry[pdpe] = vm->scratch_pd; + count = gen8_pt_count(start, end); + DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d} removing pte\n", + __func__, vm, lvl, start, end, + gen8_pd_index(start, 0), count, + atomic_read(&pt->used)); + GEM_BUG_ON(!count || count >= atomic_read(&pt->used)); + + vaddr = kmap_atomic_px(pt); + memset64(vaddr + gen8_pd_index(start, 0), + vm->scratch[0].encode, + count); + kunmap_atomic(vaddr); - GEM_BUG_ON(!atomic_read(&pdp->used)); - atomic_dec(&pdp->used); - free = true; + atomic_sub(count, &pt->used); + start += count; } - spin_unlock(&pdp->lock); - if (free) - free_pd(vm, pd); - } - return !atomic_read(&pdp->used); + if (release_pd_entry(pd, idx, pt, scratch)) + free_px(vm, pt); + } while (idx++, --len); + + return start; } -static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm, - u64 start, u64 length) +static void gen8_ppgtt_clear(struct i915_address_space *vm, + u64 start, u64 length) { - gen8_ppgtt_clear_pdp(vm, i915_vm_to_ppgtt(vm)->pd, start, length); + GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); + GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); + + start >>= GEN8_PTE_SHIFT; + length >>= GEN8_PTE_SHIFT; + GEM_BUG_ON(length == 0); + + __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, + start, start + length, vm->top); } -static void gen8_ppgtt_set_pml4e(struct i915_page_directory *pml4, - struct i915_page_directory *pdp, - unsigned int pml4e) +static int __gen8_ppgtt_alloc(struct i915_address_space * const vm, + struct i915_page_directory * const pd, + u64 * const start, u64 end, int lvl) { - gen8_ppgtt_pml4e_t *vaddr; + const struct i915_page_scratch * const scratch = &vm->scratch[lvl]; + struct i915_page_table *alloc = NULL; + unsigned int idx, len; + int ret = 0; - vaddr = kmap_atomic_px(pml4); - vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); - kunmap_atomic(vaddr); + len = gen8_pd_range(*start, end, lvl--, &idx); + DBG("%s(%p):{lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d}\n", + __func__, vm, lvl + 1, *start, end, + idx, len, atomic_read(px_used(pd))); + GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1)); + + spin_lock(&pd->lock); + GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */ + do { + struct i915_page_table *pt = pd->entry[idx]; + + if (!pt) { + spin_unlock(&pd->lock); + + DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n", + __func__, vm, lvl + 1, idx); + + pt = fetch_and_zero(&alloc); + if (lvl) { + if (!pt) { + pt = &alloc_pd(vm)->pt; + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto out; + } + } + + fill_px(pt, vm->scratch[lvl].encode); + } else { + if (!pt) { + pt = alloc_pt(vm); + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto out; + } + } + + if (intel_vgpu_active(vm->i915) || + gen8_pt_count(*start, end) < I915_PDES) + fill_px(pt, vm->scratch[lvl].encode); + } + + spin_lock(&pd->lock); + if (likely(!pd->entry[idx])) + set_pd_entry(pd, idx, pt); + else + alloc = pt, pt = pd->entry[idx]; + } + + if (lvl) { + atomic_inc(&pt->used); + spin_unlock(&pd->lock); + + ret = __gen8_ppgtt_alloc(vm, as_pd(pt), + start, end, lvl); + if (unlikely(ret)) { + if (release_pd_entry(pd, idx, pt, scratch)) + free_px(vm, pt); + goto out; + } + + spin_lock(&pd->lock); + atomic_dec(&pt->used); + GEM_BUG_ON(!atomic_read(&pt->used)); + } else { + unsigned int count = gen8_pt_count(*start, end); + + DBG("%s(%p):{lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d} inserting pte\n", + __func__, vm, lvl, *start, end, + gen8_pd_index(*start, 0), count, + atomic_read(&pt->used)); + + atomic_add(count, &pt->used); + GEM_BUG_ON(atomic_read(&pt->used) > I915_PDES); + *start += count; + } + } while (idx++, --len); + spin_unlock(&pd->lock); +out: + if (alloc) + free_px(vm, alloc); + return ret; } -/* Removes entries from a single pml4. - * This is the top-level structure in 4-level page tables used on gen8+. - * Empty entries are always scratch pml4e. - */ -static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, - u64 start, u64 length) +static int gen8_ppgtt_alloc(struct i915_address_space *vm, + u64 start, u64 length) { - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - struct i915_page_directory * const pml4 = ppgtt->pd; - struct i915_page_directory *pdp; - unsigned int pml4e; + u64 from; + int err; - GEM_BUG_ON(!i915_vm_is_4lvl(vm)); + GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); + GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); - gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - bool free = false; - GEM_BUG_ON(pdp == vm->scratch_pdp); + start >>= GEN8_PTE_SHIFT; + length >>= GEN8_PTE_SHIFT; + GEM_BUG_ON(length == 0); + from = start; - if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length)) - continue; + err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd, + &start, start + length, vm->top); + if (unlikely(err && from != start)) + __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, + from, start, vm->top); - spin_lock(&pml4->lock); - if (!atomic_read(&pdp->used)) { - gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); - pml4->entry[pml4e] = vm->scratch_pdp; - free = true; - } - spin_unlock(&pml4->lock); - if (free) - free_pd(vm, pdp); - } + return err; } static inline struct sgt_dma { @@ -933,47 +1136,28 @@ static inline struct sgt_dma { return (struct sgt_dma) { sg, addr, addr + sg->length }; } -struct gen8_insert_pte { - u16 pml4e; - u16 pdpe; - u16 pde; - u16 pte; -}; - -static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start) -{ - return (struct gen8_insert_pte) { - gen8_pml4e_index(start), - gen8_pdpe_index(start), - gen8_pde_index(start), - gen8_pte_index(start), - }; -} - -static __always_inline bool +static __always_inline u64 gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt, struct i915_page_directory *pdp, struct sgt_dma *iter, - struct gen8_insert_pte *idx, + u64 idx, enum i915_cache_level cache_level, u32 flags) { struct i915_page_directory *pd; const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); gen8_pte_t *vaddr; - bool ret; - GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); - pd = i915_pd_entry(pdp, idx->pdpe); - vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde)); + pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); + vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); do { - vaddr[idx->pte] = pte_encode | iter->dma; + vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; iter->dma += I915_GTT_PAGE_SIZE; if (iter->dma >= iter->max) { iter->sg = __sg_next(iter->sg); if (!iter->sg) { - ret = false; + idx = 0; break; } @@ -981,30 +1165,22 @@ gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt, iter->max = iter->dma + iter->sg->length; } - if (++idx->pte == GEN8_PTES) { - idx->pte = 0; - - if (++idx->pde == I915_PDES) { - idx->pde = 0; - + if (gen8_pd_index(++idx, 0) == 0) { + if (gen8_pd_index(idx, 1) == 0) { /* Limited by sg length for 3lvl */ - if (++idx->pdpe == GEN8_PML4ES_PER_PML4) { - idx->pdpe = 0; - ret = true; + if (gen8_pd_index(idx, 2) == 0) break; - } - GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); - pd = pdp->entry[idx->pdpe]; + pd = pdp->entry[gen8_pd_index(idx, 2)]; } kunmap_atomic(vaddr); - vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde)); + vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); } } while (1); kunmap_atomic(vaddr); - return ret; + return idx; } static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, @@ -1014,9 +1190,9 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, { struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sgt_dma iter = sgt_dma(vma); - struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); - gen8_ppgtt_insert_pte_entries(ppgtt, ppgtt->pd, &iter, &idx, + gen8_ppgtt_insert_pte_entries(ppgtt, ppgtt->pd, &iter, + vma->node.start >> GEN8_PTE_SHIFT, cache_level, flags); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; @@ -1033,39 +1209,38 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, dma_addr_t rem = iter->sg->length; do { - struct gen8_insert_pte idx = gen8_insert_pte(start); struct i915_page_directory *pdp = - i915_pdp_entry(pml4, idx.pml4e); - struct i915_page_directory *pd = i915_pd_entry(pdp, idx.pdpe); - unsigned int page_size; - bool maybe_64K = false; + i915_pd_entry(pml4, __gen8_pte_index(start, 3)); + struct i915_page_directory *pd = + i915_pd_entry(pdp, __gen8_pte_index(start, 2)); gen8_pte_t encode = pte_encode; + unsigned int maybe_64K = -1; + unsigned int page_size; gen8_pte_t *vaddr; - u16 index, max; + u16 index; if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M && IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) && - rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) { - index = idx.pde; - max = I915_PDES; - page_size = I915_GTT_PAGE_SIZE_2M; - + rem >= I915_GTT_PAGE_SIZE_2M && + !__gen8_pte_index(start, 0)) { + index = __gen8_pte_index(start, 1); encode |= GEN8_PDE_PS_2M; + page_size = I915_GTT_PAGE_SIZE_2M; vaddr = kmap_atomic_px(pd); } else { - struct i915_page_table *pt = i915_pt_entry(pd, idx.pde); + struct i915_page_table *pt = + i915_pt_entry(pd, __gen8_pte_index(start, 1)); - index = idx.pte; - max = GEN8_PTES; + index = __gen8_pte_index(start, 0); page_size = I915_GTT_PAGE_SIZE; if (!index && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K && IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || - rem >= (max - index) * I915_GTT_PAGE_SIZE)) - maybe_64K = true; + rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)) + maybe_64K = __gen8_pte_index(start, 1); vaddr = kmap_atomic_px(pt); } @@ -1086,16 +1261,16 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, iter->dma = sg_dma_address(iter->sg); iter->max = iter->dma + rem; - if (maybe_64K && index < max && + if (maybe_64K != -1 && index < I915_PDES && !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || - rem >= (max - index) * I915_GTT_PAGE_SIZE))) - maybe_64K = false; + rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))) + maybe_64K = -1; if (unlikely(!IS_ALIGNED(iter->dma, page_size))) break; } - } while (rem >= page_size && index < max); + } while (rem >= page_size && index < I915_PDES); kunmap_atomic(vaddr); @@ -1105,14 +1280,14 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, * it and have reached the end of the sg table and we have * enough padding. */ - if (maybe_64K && - (index == max || + if (maybe_64K != -1 && + (index == I915_PDES || (i915_vm_has_scratch_64K(vma->vm) && !iter->sg && IS_ALIGNED(vma->node.start + vma->node.size, I915_GTT_PAGE_SIZE_2M)))) { vaddr = kmap_atomic_px(pd); - vaddr[idx.pde] |= GEN8_PDE_IPS_64K; + vaddr[maybe_64K] |= GEN8_PDE_IPS_64K; kunmap_atomic(vaddr); page_size = I915_GTT_PAGE_SIZE_64K; @@ -1128,9 +1303,8 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { u16 i; - encode = vma->vm->scratch_pte; - vaddr = kmap_atomic_px(i915_pt_entry(pd, - idx.pde)); + encode = vma->vm->scratch[0].encode; + vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K)); for (i = 1; i < index; i += 16) memset64(vaddr + i, encode, 15); @@ -1156,32 +1330,22 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, gen8_ppgtt_insert_huge_entries(vma, pml4, &iter, cache_level, flags); } else { - struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); + u64 idx = vma->node.start >> GEN8_PTE_SHIFT; - while (gen8_ppgtt_insert_pte_entries(ppgtt, - i915_pdp_entry(pml4, idx.pml4e++), - &iter, &idx, cache_level, - flags)) - GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); + while ((idx = gen8_ppgtt_insert_pte_entries(ppgtt, + i915_pd_entry(pml4, gen8_pd_index(idx, 3)), + &iter, idx, cache_level, + flags))) + ; vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; } } -static void gen8_free_page_tables(struct i915_address_space *vm, - struct i915_page_directory *pd) -{ - int i; - - for (i = 0; i < I915_PDES; i++) { - if (pd->entry[i] != vm->scratch_pt) - free_pt(vm, pd->entry[i]); - } -} - static int gen8_init_scratch(struct i915_address_space *vm) { int ret; + int i; /* * If everybody agrees to not to write into the scratch page, @@ -1195,10 +1359,8 @@ static int gen8_init_scratch(struct i915_address_space *vm) GEM_BUG_ON(!clone->has_read_only); vm->scratch_order = clone->scratch_order; - vm->scratch_pte = clone->scratch_pte; - vm->scratch_pt = clone->scratch_pt; - vm->scratch_pd = clone->scratch_pd; - vm->scratch_pdp = clone->scratch_pdp; + memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch)); + px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */ return 0; } @@ -1206,373 +1368,88 @@ static int gen8_init_scratch(struct i915_address_space *vm) if (ret) return ret; - vm->scratch_pte = - gen8_pte_encode(vm->scratch_page.daddr, - I915_CACHE_LLC, - vm->has_read_only); + vm->scratch[0].encode = + gen8_pte_encode(px_dma(&vm->scratch[0]), + I915_CACHE_LLC, vm->has_read_only); - vm->scratch_pt = alloc_pt(vm); - if (IS_ERR(vm->scratch_pt)) { - ret = PTR_ERR(vm->scratch_pt); - goto free_scratch_page; - } + for (i = 1; i <= vm->top; i++) { + if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i])))) + goto free_scratch; - vm->scratch_pd = alloc_pd(vm); - if (IS_ERR(vm->scratch_pd)) { - ret = PTR_ERR(vm->scratch_pd); - goto free_pt; - } - - if (i915_vm_is_4lvl(vm)) { - vm->scratch_pdp = alloc_pd(vm); - if (IS_ERR(vm->scratch_pdp)) { - ret = PTR_ERR(vm->scratch_pdp); - goto free_pd; - } + fill_px(&vm->scratch[i], vm->scratch[i - 1].encode); + vm->scratch[i].encode = + gen8_pde_encode(px_dma(&vm->scratch[i]), + I915_CACHE_LLC); } - gen8_initialize_pt(vm, vm->scratch_pt); - init_pd_with_page(vm, vm->scratch_pd, vm->scratch_pt); - if (i915_vm_is_4lvl(vm)) - init_pd(vm, vm->scratch_pdp, vm->scratch_pd); - return 0; -free_pd: - free_pd(vm, vm->scratch_pd); -free_pt: - free_pt(vm, vm->scratch_pt); -free_scratch_page: - cleanup_scratch_page(vm); - - return ret; +free_scratch: + free_scratch(vm); + return -ENOMEM; } -static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) +static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) { struct i915_address_space *vm = &ppgtt->vm; - struct drm_i915_private *dev_priv = vm->i915; - enum vgt_g2v_type msg; - int i; + struct i915_page_directory *pd = ppgtt->pd; + unsigned int idx; - if (i915_vm_is_4lvl(vm)) { - const u64 daddr = px_dma(ppgtt->pd); + GEM_BUG_ON(vm->top != 2); + GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES); - I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); - I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); + for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) { + struct i915_page_directory *pde; - msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : - VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); - } else { - for (i = 0; i < GEN8_3LVL_PDPES; i++) { - const u64 daddr = i915_page_dir_dma_addr(ppgtt, i); + pde = alloc_pd(vm); + if (IS_ERR(pde)) + return PTR_ERR(pde); - I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); - I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); - } - - msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : - VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); + fill_px(pde, vm->scratch[1].encode); + set_pd_entry(pd, idx, pde); + atomic_inc(px_used(pde)); /* keep pinned */ } - I915_WRITE(vgtif_reg(g2v_notify), msg); - return 0; } -static void gen8_free_scratch(struct i915_address_space *vm) -{ - if (!vm->scratch_page.daddr) - return; - - if (i915_vm_is_4lvl(vm)) - free_pd(vm, vm->scratch_pdp); - free_pd(vm, vm->scratch_pd); - free_pt(vm, vm->scratch_pt); - cleanup_scratch_page(vm); -} - -static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, - struct i915_page_directory *pdp) +static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) { - const unsigned int pdpes = i915_pdpes_per_pdp(vm); - int i; - - for (i = 0; i < pdpes; i++) { - if (pdp->entry[i] == vm->scratch_pd) - continue; - - gen8_free_page_tables(vm, pdp->entry[i]); - free_pd(vm, pdp->entry[i]); - } - - free_pd(vm, pdp); -} - -static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt) -{ - struct i915_page_directory * const pml4 = ppgtt->pd; - int i; - - for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) { - struct i915_page_directory *pdp = i915_pdp_entry(pml4, i); - - if (pdp == ppgtt->vm.scratch_pdp) - continue; - - gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp); - } - - free_pd(&ppgtt->vm, pml4); -} - -static void gen8_ppgtt_cleanup(struct i915_address_space *vm) -{ - struct drm_i915_private *i915 = vm->i915; - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct drm_i915_private *i915 = gt->i915; - if (intel_vgpu_active(i915)) - gen8_ppgtt_notify_vgt(ppgtt, false); - - if (i915_vm_is_4lvl(vm)) - gen8_ppgtt_cleanup_4lvl(ppgtt); - else - gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pd); - - gen8_free_scratch(vm); -} - -static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, - struct i915_page_directory *pd, - u64 start, u64 length) -{ - struct i915_page_table *pt, *alloc = NULL; - u64 from = start; - unsigned int pde; - int ret = 0; - - spin_lock(&pd->lock); - gen8_for_each_pde(pt, pd, start, length, pde) { - const int count = gen8_pte_count(start, length); - - if (pt == vm->scratch_pt) { - spin_unlock(&pd->lock); - - pt = fetch_and_zero(&alloc); - if (!pt) - pt = alloc_pt(vm); - if (IS_ERR(pt)) { - ret = PTR_ERR(pt); - goto unwind; - } - - if (count < GEN8_PTES || intel_vgpu_active(vm->i915)) - gen8_initialize_pt(vm, pt); - - spin_lock(&pd->lock); - if (pd->entry[pde] == vm->scratch_pt) { - gen8_ppgtt_set_pde(vm, pd, pt, pde); - pd->entry[pde] = pt; - atomic_inc(&pd->used); - } else { - alloc = pt; - pt = pd->entry[pde]; - } - } - - atomic_add(count, &pt->used); - } - spin_unlock(&pd->lock); - goto out; - -unwind: - gen8_ppgtt_clear_pd(vm, pd, from, start - from); -out: - if (alloc) - free_pt(vm, alloc); - return ret; -} - -static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, - struct i915_page_directory *pdp, - u64 start, u64 length) -{ - struct i915_page_directory *pd, *alloc = NULL; - u64 from = start; - unsigned int pdpe; - int ret = 0; - - spin_lock(&pdp->lock); - gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - if (pd == vm->scratch_pd) { - spin_unlock(&pdp->lock); - - pd = fetch_and_zero(&alloc); - if (!pd) - pd = alloc_pd(vm); - if (IS_ERR(pd)) { - ret = PTR_ERR(pd); - goto unwind; - } - - init_pd_with_page(vm, pd, vm->scratch_pt); - - spin_lock(&pdp->lock); - if (pdp->entry[pdpe] == vm->scratch_pd) { - gen8_ppgtt_set_pdpe(pdp, pd, pdpe); - pdp->entry[pdpe] = pd; - atomic_inc(&pdp->used); - } else { - alloc = pd; - pd = pdp->entry[pdpe]; - } - } - atomic_inc(&pd->used); - spin_unlock(&pdp->lock); - - ret = gen8_ppgtt_alloc_pd(vm, pd, start, length); - if (unlikely(ret)) - goto unwind_pd; - - spin_lock(&pdp->lock); - atomic_dec(&pd->used); - } - spin_unlock(&pdp->lock); - goto out; - -unwind_pd: - spin_lock(&pdp->lock); - if (atomic_dec_and_test(&pd->used)) { - gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); - GEM_BUG_ON(!atomic_read(&pdp->used)); - atomic_dec(&pdp->used); - free_pd(vm, pd); - } - spin_unlock(&pdp->lock); -unwind: - gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); -out: - if (alloc) - free_pd(vm, alloc); - return ret; -} - -static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm, - u64 start, u64 length) -{ - return gen8_ppgtt_alloc_pdp(vm, - i915_vm_to_ppgtt(vm)->pd, start, length); -} - -static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, - u64 start, u64 length) -{ - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - struct i915_page_directory * const pml4 = ppgtt->pd; - struct i915_page_directory *pdp, *alloc = NULL; - u64 from = start; - int ret = 0; - u32 pml4e; - - spin_lock(&pml4->lock); - gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - if (pdp == vm->scratch_pdp) { - spin_unlock(&pml4->lock); - - pdp = fetch_and_zero(&alloc); - if (!pdp) - pdp = alloc_pd(vm); - if (IS_ERR(pdp)) { - ret = PTR_ERR(pdp); - goto unwind; - } - - init_pd(vm, pdp, vm->scratch_pd); - - spin_lock(&pml4->lock); - if (pml4->entry[pml4e] == vm->scratch_pdp) { - gen8_ppgtt_set_pml4e(pml4, pdp, pml4e); - pml4->entry[pml4e] = pdp; - } else { - alloc = pdp; - pdp = pml4->entry[pml4e]; - } - } - atomic_inc(&pdp->used); - spin_unlock(&pml4->lock); - - ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length); - if (unlikely(ret)) - goto unwind_pdp; + ppgtt->vm.gt = gt; + ppgtt->vm.i915 = i915; + ppgtt->vm.dma = &i915->drm.pdev->dev; + ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); - spin_lock(&pml4->lock); - atomic_dec(&pdp->used); - } - spin_unlock(&pml4->lock); - goto out; + i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); -unwind_pdp: - spin_lock(&pml4->lock); - if (atomic_dec_and_test(&pdp->used)) { - gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); - free_pd(vm, pdp); - } - spin_unlock(&pml4->lock); -unwind: - gen8_ppgtt_clear_4lvl(vm, from, start - from); -out: - if (alloc) - free_pd(vm, alloc); - return ret; + ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; + ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; + ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; + ppgtt->vm.vma_ops.clear_pages = clear_pages; } -static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) +static struct i915_page_directory * +gen8_alloc_top_pd(struct i915_address_space *vm) { - struct i915_address_space *vm = &ppgtt->vm; - struct i915_page_directory *pdp = ppgtt->pd; + const unsigned int count = gen8_pd_top_count(vm); struct i915_page_directory *pd; - u64 start = 0, length = ppgtt->vm.total; - u64 from = start; - unsigned int pdpe; - gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - pd = alloc_pd(vm); - if (IS_ERR(pd)) - goto unwind; + GEM_BUG_ON(count > ARRAY_SIZE(pd->entry)); - init_pd_with_page(vm, pd, vm->scratch_pt); - gen8_ppgtt_set_pdpe(pdp, pd, pdpe); - - atomic_inc(&pdp->used); - } - - atomic_inc(&pdp->used); /* never remove */ - - return 0; + pd = __alloc_pd(offsetof(typeof(*pd), entry[count])); + if (unlikely(!pd)) + return ERR_PTR(-ENOMEM); -unwind: - start -= from; - gen8_for_each_pdpe(pd, pdp, from, start, pdpe) { - gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); - free_pd(vm, pd); + if (unlikely(setup_page_dma(vm, px_base(pd)))) { + kfree(pd); + return ERR_PTR(-ENOMEM); } - atomic_set(&pdp->used, 0); - return -ENOMEM; -} -static void ppgtt_init(struct drm_i915_private *i915, - struct i915_ppgtt *ppgtt) -{ - ppgtt->vm.i915 = i915; - ppgtt->vm.dma = &i915->drm.pdev->dev; - ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); - - i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); - - ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; - ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; - ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; - ppgtt->vm.vma_ops.clear_pages = clear_pages; + fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count); + atomic_inc(px_used(pd)); /* mark as pinned */ + return pd; } /* @@ -1591,7 +1468,8 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); - ppgtt_init(i915, ppgtt); + ppgtt_init(ppgtt, &i915->gt); + ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2; /* * From bdw, there is hw support for read-only pages in the PPGTT. @@ -1611,41 +1489,27 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) if (err) goto err_free; - ppgtt->pd = __alloc_pd(); - if (!ppgtt->pd) { - err = -ENOMEM; + ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm); + if (IS_ERR(ppgtt->pd)) { + err = PTR_ERR(ppgtt->pd); goto err_free_scratch; } if (i915_vm_is_4lvl(&ppgtt->vm)) { - err = setup_px(&ppgtt->vm, ppgtt->pd); - if (err) - goto err_free_pdp; - - init_pd(&ppgtt->vm, ppgtt->pd, ppgtt->vm.scratch_pdp); - - ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl; ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl; - ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl; } else { - /* - * We don't need to setup dma for top level pdp, only - * for entries. So point entries to scratch. - */ - memset_p(ppgtt->pd->entry, ppgtt->vm.scratch_pd, - GEN8_3LVL_PDPES); - if (intel_vgpu_active(i915)) { err = gen8_preallocate_top_level_pdp(ppgtt); if (err) - goto err_free_pdp; + goto err_free_pd; } - ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl; ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl; - ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl; } + ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc; + ppgtt->vm.clear_range = gen8_ppgtt_clear; + if (intel_vgpu_active(i915)) gen8_ppgtt_notify_vgt(ppgtt, true); @@ -1653,10 +1517,11 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) return ppgtt; -err_free_pdp: - free_pd(&ppgtt->vm, ppgtt->pd); +err_free_pd: + __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd, + gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top); err_free_scratch: - gen8_free_scratch(&ppgtt->vm); + free_scratch(&ppgtt->vm); err_free: kfree(ppgtt); return ERR_PTR(err); @@ -1672,25 +1537,26 @@ static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt, ppgtt->pd_addr + pde); } -static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) +static void gen7_ppgtt_enable(struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; - u32 ecochk, ecobits; enum intel_engine_id id; + u32 ecochk; - ecobits = I915_READ(GAC_ECO_BITS); - I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); + intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B); - ecochk = I915_READ(GAM_ECOCHK); - if (IS_HASWELL(dev_priv)) { + ecochk = intel_uncore_read(uncore, GAM_ECOCHK); + if (IS_HASWELL(i915)) { ecochk |= ECOCHK_PPGTT_WB_HSW; } else { ecochk |= ECOCHK_PPGTT_LLC_IVB; ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; } - I915_WRITE(GAM_ECOCHK, ecochk); + intel_uncore_write(uncore, GAM_ECOCHK, ecochk); - for_each_engine(engine, dev_priv, id) { + for_each_engine(engine, i915, id) { /* GFX_MODE is per-ring on gen7+ */ ENGINE_WRITE(engine, RING_MODE_GEN7, @@ -1698,22 +1564,29 @@ static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) } } -static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv) +static void gen6_ppgtt_enable(struct intel_gt *gt) { - u32 ecochk, gab_ctl, ecobits; + struct intel_uncore *uncore = gt->uncore; - ecobits = I915_READ(GAC_ECO_BITS); - I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | - ECOBITS_PPGTT_CACHE64B); + intel_uncore_rmw(uncore, + GAC_ECO_BITS, + 0, + ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B); - gab_ctl = I915_READ(GAB_CTL); - I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); + intel_uncore_rmw(uncore, + GAB_CTL, + 0, + GAB_CTL_CONT_AFTER_PAGEFAULT); - ecochk = I915_READ(GAM_ECOCHK); - I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); + intel_uncore_rmw(uncore, + GAM_ECOCHK, + 0, + ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); - if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */ - I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */ + intel_uncore_write(uncore, + GFX_MODE, + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); } /* PPGTT support for Sandybdrige/Gen6 and later */ @@ -1722,7 +1595,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, { struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); const unsigned int first_entry = start / I915_GTT_PAGE_SIZE; - const gen6_pte_t scratch_pte = vm->scratch_pte; + const gen6_pte_t scratch_pte = vm->scratch[0].encode; unsigned int pde = first_entry / GEN6_PTES; unsigned int pte = first_entry % GEN6_PTES; unsigned int num_entries = length / I915_GTT_PAGE_SIZE; @@ -1733,7 +1606,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, const unsigned int count = min(num_entries, GEN6_PTES - pte); gen6_pte_t *vaddr; - GEM_BUG_ON(pt == vm->scratch_pt); + GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1])); num_entries -= count; @@ -1770,7 +1643,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, struct sgt_dma iter = sgt_dma(vma); gen6_pte_t *vaddr; - GEM_BUG_ON(i915_pt_entry(pd, act_pt) == vm->scratch_pt); + GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]); vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); do { @@ -1815,7 +1688,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, gen6_for_each_pde(pt, pd, start, length, pde) { const unsigned int count = gen6_pte_count(start, length); - if (pt == vm->scratch_pt) { + if (px_base(pt) == px_base(&vm->scratch[1])) { spin_unlock(&pd->lock); pt = fetch_and_zero(&alloc); @@ -1826,10 +1699,10 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, goto unwind_out; } - gen6_initialize_pt(vm, pt); + fill32_px(pt, vm->scratch[0].encode); spin_lock(&pd->lock); - if (pd->entry[pde] == vm->scratch_pt) { + if (pd->entry[pde] == &vm->scratch[1]) { pd->entry[pde] = pt; if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) { @@ -1848,7 +1721,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, if (flush) { mark_tlbs_dirty(&ppgtt->base); - gen6_ggtt_invalidate(vm->i915); + gen6_ggtt_invalidate(vm->gt->ggtt); } goto out; @@ -1857,7 +1730,7 @@ unwind_out: gen6_ppgtt_clear_range(vm, from, start - from); out: if (alloc) - free_pt(vm, alloc); + free_px(vm, alloc); intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); return ret; } @@ -1866,108 +1739,52 @@ static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) { struct i915_address_space * const vm = &ppgtt->base.vm; struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_table *unused; - u32 pde; int ret; ret = setup_scratch_page(vm, __GFP_HIGHMEM); if (ret) return ret; - vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr, - I915_CACHE_NONE, - PTE_READ_ONLY); + vm->scratch[0].encode = + vm->pte_encode(px_dma(&vm->scratch[0]), + I915_CACHE_NONE, PTE_READ_ONLY); - vm->scratch_pt = alloc_pt(vm); - if (IS_ERR(vm->scratch_pt)) { + if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) { cleanup_scratch_page(vm); - return PTR_ERR(vm->scratch_pt); + return -ENOMEM; } - gen6_initialize_pt(vm, vm->scratch_pt); - - gen6_for_all_pdes(unused, pd, pde) - pd->entry[pde] = vm->scratch_pt; + fill32_px(&vm->scratch[1], vm->scratch[0].encode); + memset_p(pd->entry, &vm->scratch[1], I915_PDES); return 0; } -static void gen6_ppgtt_free_scratch(struct i915_address_space *vm) -{ - free_pt(vm, vm->scratch_pt); - cleanup_scratch_page(vm); -} - static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) { struct i915_page_directory * const pd = ppgtt->base.pd; + struct i915_page_dma * const scratch = + px_base(&ppgtt->base.vm.scratch[1]); struct i915_page_table *pt; u32 pde; gen6_for_all_pdes(pt, pd, pde) - if (pt != ppgtt->base.vm.scratch_pt) - free_pt(&ppgtt->base.vm, pt); -} - -struct gen6_ppgtt_cleanup_work { - struct work_struct base; - struct i915_vma *vma; -}; - -static void gen6_ppgtt_cleanup_work(struct work_struct *wrk) -{ - struct gen6_ppgtt_cleanup_work *work = - container_of(wrk, typeof(*work), base); - /* Side note, vma->vm is the GGTT not the ppgtt we just destroyed! */ - struct drm_i915_private *i915 = work->vma->vm->i915; - - mutex_lock(&i915->drm.struct_mutex); - i915_vma_destroy(work->vma); - mutex_unlock(&i915->drm.struct_mutex); - - kfree(work); -} - -static int nop_set_pages(struct i915_vma *vma) -{ - return -ENODEV; + if (px_base(pt) != scratch) + free_px(&ppgtt->base.vm, pt); } -static void nop_clear_pages(struct i915_vma *vma) -{ -} - -static int nop_bind(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 unused) -{ - return -ENODEV; -} - -static void nop_unbind(struct i915_vma *vma) -{ -} - -static const struct i915_vma_ops nop_vma_ops = { - .set_pages = nop_set_pages, - .clear_pages = nop_clear_pages, - .bind_vma = nop_bind, - .unbind_vma = nop_unbind, -}; - static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - struct gen6_ppgtt_cleanup_work *work = ppgtt->work; + struct drm_i915_private *i915 = vm->i915; /* FIXME remove the struct_mutex to bring the locking under control */ - INIT_WORK(&work->base, gen6_ppgtt_cleanup_work); - work->vma = ppgtt->vma; - work->vma->ops = &nop_vma_ops; - schedule_work(&work->base); + mutex_lock(&i915->drm.struct_mutex); + i915_vma_destroy(ppgtt->vma); + mutex_unlock(&i915->drm.struct_mutex); gen6_ppgtt_free_pd(ppgtt); - gen6_ppgtt_free_scratch(vm); + free_scratch(vm); kfree(ppgtt->base.pd); } @@ -1994,14 +1811,14 @@ static int pd_vma_bind(struct i915_vma *vma, struct i915_page_table *pt; unsigned int pde; - ppgtt->base.pd->base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t); + px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t); ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset; gen6_for_all_pdes(pt, ppgtt->base.pd, pde) gen6_write_pde(ppgtt, pde, pt); mark_tlbs_dirty(&ppgtt->base); - gen6_ggtt_invalidate(ppgtt->base.vm.i915); + gen6_ggtt_invalidate(ggtt); return 0; } @@ -2010,7 +1827,8 @@ static void pd_vma_unbind(struct i915_vma *vma) { struct gen6_ppgtt *ppgtt = vma->private; struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt; + struct i915_page_dma * const scratch = + px_base(&ppgtt->base.vm.scratch[1]); struct i915_page_table *pt; unsigned int pde; @@ -2019,11 +1837,11 @@ static void pd_vma_unbind(struct i915_vma *vma) /* Free all no longer used page tables */ gen6_for_all_pdes(pt, ppgtt->base.pd, pde) { - if (atomic_read(&pt->used) || pt == scratch_pt) + if (px_base(pt) == scratch || atomic_read(&pt->used)) continue; - free_pt(&ppgtt->base.vm, pt); - pd->entry[pde] = scratch_pt; + free_px(&ppgtt->base.vm, pt); + pd->entry[pde] = scratch; } ppgtt->scan_for_unused_pt = false; @@ -2039,7 +1857,7 @@ static const struct i915_vma_ops pd_vma_ops = { static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) { struct drm_i915_private *i915 = ppgtt->base.vm.i915; - struct i915_ggtt *ggtt = &i915->ggtt; + struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt; struct i915_vma *vma; GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); @@ -2049,7 +1867,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) if (!vma) return ERR_PTR(-ENOMEM); - i915_active_init(i915, &vma->active, NULL); + i915_active_init(i915, &vma->active, NULL, NULL); INIT_ACTIVE_REQUEST(&vma->last_fence); vma->vm = &ggtt->vm; @@ -2137,7 +1955,8 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); - ppgtt_init(i915, &ppgtt->base); + ppgtt_init(&ppgtt->base, &i915->gt); + ppgtt->base.vm.top = 1; ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; @@ -2146,16 +1965,10 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; - ppgtt->work = kmalloc(sizeof(*ppgtt->work), GFP_KERNEL); - if (!ppgtt->work) { - err = -ENOMEM; - goto err_free; - } - - ppgtt->base.pd = __alloc_pd(); + ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd)); if (!ppgtt->base.pd) { err = -ENOMEM; - goto err_work; + goto err_free; } err = gen6_ppgtt_init_scratch(ppgtt); @@ -2171,31 +1984,40 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) return &ppgtt->base; err_scratch: - gen6_ppgtt_free_scratch(&ppgtt->base.vm); + free_scratch(&ppgtt->base.vm); err_pd: kfree(ppgtt->base.pd); -err_work: - kfree(ppgtt->work); err_free: kfree(ppgtt); return ERR_PTR(err); } -static void gtt_write_workarounds(struct drm_i915_private *dev_priv) +static void gtt_write_workarounds(struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + /* This function is for gtt related workarounds. This function is * called on driver load and after a GPU reset, so you can place * workarounds here even if they get overwritten by GPU reset. */ /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */ - if (IS_BROADWELL(dev_priv)) - I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); - else if (IS_CHERRYVIEW(dev_priv)) - I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); - else if (IS_GEN9_LP(dev_priv)) - I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); - else if (INTEL_GEN(dev_priv) >= 9) - I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); + if (IS_BROADWELL(i915)) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); + else if (IS_CHERRYVIEW(i915)) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); + else if (IS_GEN9_LP(i915)) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); + else if (INTEL_GEN(i915) >= 9) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); /* * To support 64K PTEs we need to first enable the use of the @@ -2208,21 +2030,24 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv) * 32K pages, but we don't currently have any support for it in our * driver. */ - if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) && - INTEL_GEN(dev_priv) <= 10) - I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA, - I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) | - GAMW_ECO_ENABLE_64K_IPS_FIELD); + if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) && + INTEL_GEN(i915) <= 10) + intel_uncore_rmw(uncore, + GEN8_GAMW_ECO_DEV_RW_IA, + 0, + GAMW_ECO_ENABLE_64K_IPS_FIELD); } -int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) +int i915_ppgtt_init_hw(struct intel_gt *gt) { - gtt_write_workarounds(dev_priv); + struct drm_i915_private *i915 = gt->i915; - if (IS_GEN(dev_priv, 6)) - gen6_ppgtt_enable(dev_priv); - else if (IS_GEN(dev_priv, 7)) - gen7_ppgtt_enable(dev_priv); + gtt_write_workarounds(gt); + + if (IS_GEN(i915, 6)) + gen6_ppgtt_enable(gt); + else if (IS_GEN(i915, 7)) + gen7_ppgtt_enable(gt); return 0; } @@ -2250,42 +2075,6 @@ i915_ppgtt_create(struct drm_i915_private *i915) return ppgtt; } -static void ppgtt_destroy_vma(struct i915_address_space *vm) -{ - struct list_head *phases[] = { - &vm->bound_list, - &vm->unbound_list, - NULL, - }, **phase; - - vm->closed = true; - for (phase = phases; *phase; phase++) { - struct i915_vma *vma, *vn; - - list_for_each_entry_safe(vma, vn, *phase, vm_link) - i915_vma_destroy(vma); - } -} - -void i915_vm_release(struct kref *kref) -{ - struct i915_address_space *vm = - container_of(kref, struct i915_address_space, ref); - - GEM_BUG_ON(i915_is_ggtt(vm)); - trace_i915_ppgtt_release(vm); - - ppgtt_destroy_vma(vm); - - GEM_BUG_ON(!list_empty(&vm->bound_list)); - GEM_BUG_ON(!list_empty(&vm->unbound_list)); - - vm->cleanup(vm); - i915_address_space_fini(vm); - - kfree(vm); -} - /* Certain Gen5 chipsets require require idling the GPU before * unmapping anything from the GTT when VT-d is enabled. */ @@ -2297,21 +2086,26 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv) return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active(); } -void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) +static void ggtt_suspend_mappings(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_private *i915 = ggtt->vm.i915; /* Don't bother messing with faults pre GEN6 as we have little * documentation supporting that it's a good idea. */ - if (INTEL_GEN(dev_priv) < 6) + if (INTEL_GEN(i915) < 6) return; - i915_check_and_clear_faults(dev_priv); + intel_gt_check_and_clear_faults(ggtt->vm.gt); ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - i915_ggtt_invalidate(dev_priv); + ggtt->invalidate(ggtt); +} + +void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915) +{ + ggtt_suspend_mappings(&i915->ggtt); } int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, @@ -2357,7 +2151,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); - ggtt->invalidate(vm->i915); + ggtt->invalidate(ggtt); } static void gen8_ggtt_insert_entries(struct i915_address_space *vm, @@ -2385,7 +2179,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, * We want to flush the TLBs only after we're certain all the PTE * updates have finished. */ - ggtt->invalidate(vm->i915); + ggtt->invalidate(ggtt); } static void gen6_ggtt_insert_page(struct i915_address_space *vm, @@ -2400,7 +2194,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm, iowrite32(vm->pte_encode(addr, level, flags), pte); - ggtt->invalidate(vm->i915); + ggtt->invalidate(ggtt); } /* @@ -2426,7 +2220,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, * We want to flush the TLBs only after we're certain all the PTE * updates have finished. */ - ggtt->invalidate(vm->i915); + ggtt->invalidate(ggtt); } static void nop_clear_range(struct i915_address_space *vm, @@ -2440,7 +2234,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); unsigned first_entry = start / I915_GTT_PAGE_SIZE; unsigned num_entries = length / I915_GTT_PAGE_SIZE; - const gen8_pte_t scratch_pte = vm->scratch_pte; + const gen8_pte_t scratch_pte = vm->scratch[0].encode; gen8_pte_t __iomem *gtt_base = (gen8_pte_t __iomem *)ggtt->gsm + first_entry; const int max_entries = ggtt_total_entries(ggtt) - first_entry; @@ -2565,8 +2359,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = vm->scratch_pte; - + scratch_pte = vm->scratch[0].encode; for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); } @@ -2653,18 +2446,18 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, pte_flags |= PTE_READ_ONLY; if (flags & I915_VMA_LOCAL_BIND) { - struct i915_ppgtt *appgtt = i915->mm.aliasing_ppgtt; + struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias; if (!(vma->flags & I915_VMA_LOCAL_BIND)) { - ret = appgtt->vm.allocate_va_range(&appgtt->vm, - vma->node.start, - vma->size); + ret = alias->vm.allocate_va_range(&alias->vm, + vma->node.start, + vma->size); if (ret) return ret; } - appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level, - pte_flags); + alias->vm.insert_entries(&alias->vm, vma, + cache_level, pte_flags); } if (flags & I915_VMA_GLOBAL_BIND) { @@ -2692,7 +2485,8 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) } if (vma->flags & I915_VMA_LOCAL_BIND) { - struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm; + struct i915_address_space *vm = + &i915_vm_to_ggtt(vma->vm)->alias->vm; vm->clear_range(vm, vma->node.start, vma->size); } @@ -2749,13 +2543,12 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node, *end -= I915_GTT_PAGE_SIZE; } -static int init_aliasing_ppgtt(struct drm_i915_private *i915) +static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &i915->ggtt; struct i915_ppgtt *ppgtt; int err; - ppgtt = i915_ppgtt_create(i915); + ppgtt = i915_ppgtt_create(ggtt->vm.i915); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -2774,7 +2567,7 @@ static int init_aliasing_ppgtt(struct drm_i915_private *i915) if (err) goto err_ppgtt; - i915->mm.aliasing_ppgtt = ppgtt; + ggtt->alias = ppgtt; GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; @@ -2789,19 +2582,24 @@ err_ppgtt: return err; } -static void fini_aliasing_ppgtt(struct drm_i915_private *i915) +static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &i915->ggtt; + struct drm_i915_private *i915 = ggtt->vm.i915; struct i915_ppgtt *ppgtt; - ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt); + mutex_lock(&i915->drm.struct_mutex); + + ppgtt = fetch_and_zero(&ggtt->alias); if (!ppgtt) - return; + goto out; i915_vm_put(&ppgtt->vm); ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + +out: + mutex_unlock(&i915->drm.struct_mutex); } static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) @@ -2830,7 +2628,13 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt) drm_mm_remove_node(&ggtt->uc_fw); } -int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) +static void cleanup_init_ggtt(struct i915_ggtt *ggtt) +{ + ggtt_release_guc_top(ggtt); + drm_mm_remove_node(&ggtt->error_capture); +} + +static int init_ggtt(struct i915_ggtt *ggtt) { /* Let GEM Manage all of the aperture. * @@ -2841,7 +2645,6 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) * aperture. One page should be enough to keep any prefetching inside * of the aperture. */ - struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned long hole_start, hole_end; struct drm_mm_node *entry; int ret; @@ -2853,9 +2656,9 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) * why. */ ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, - intel_wopcm_guc_size(&dev_priv->wopcm)); + intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); - ret = intel_vgt_balloon(dev_priv); + ret = intel_vgt_balloon(ggtt); if (ret) return ret; @@ -2874,7 +2677,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) */ ret = ggtt_reserve_guc_top(ggtt); if (ret) - goto err_reserve; + goto err; /* Clear any non-preallocated blocks */ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { @@ -2887,35 +2690,41 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) /* And finally clear the reserved guard page */ ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); - if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) { - ret = init_aliasing_ppgtt(dev_priv); + return 0; + +err: + cleanup_init_ggtt(ggtt); + return ret; +} + +int i915_init_ggtt(struct drm_i915_private *i915) +{ + int ret; + + ret = init_ggtt(&i915->ggtt); + if (ret) + return ret; + + if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) { + ret = init_aliasing_ppgtt(&i915->ggtt); if (ret) - goto err_appgtt; + cleanup_init_ggtt(&i915->ggtt); } return 0; - -err_appgtt: - ggtt_release_guc_top(ggtt); -err_reserve: - drm_mm_remove_node(&ggtt->error_capture); - return ret; } -/** - * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization - * @dev_priv: i915 device - */ -void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) +static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_private *i915 = ggtt->vm.i915; struct i915_vma *vma, *vn; - struct pagevec *pvec; ggtt->vm.closed = true; - mutex_lock(&dev_priv->drm.struct_mutex); - fini_aliasing_ppgtt(dev_priv); + rcu_barrier(); /* flush the RCU'ed__i915_vm_release */ + flush_workqueue(i915->wq); + + mutex_lock(&i915->drm.struct_mutex); list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) WARN_ON(i915_vma_unbind(vma)); @@ -2926,24 +2735,37 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ggtt_release_guc_top(ggtt); if (drm_mm_initialized(&ggtt->vm.mm)) { - intel_vgt_deballoon(dev_priv); + intel_vgt_deballoon(ggtt); i915_address_space_fini(&ggtt->vm); } ggtt->vm.cleanup(&ggtt->vm); - pvec = &dev_priv->mm.wc_stash.pvec; + mutex_unlock(&i915->drm.struct_mutex); + + arch_phys_wc_del(ggtt->mtrr); + io_mapping_fini(&ggtt->iomap); +} + +/** + * i915_ggtt_driver_release - Clean up GGTT hardware initialization + * @i915: i915 device + */ +void i915_ggtt_driver_release(struct drm_i915_private *i915) +{ + struct pagevec *pvec; + + fini_aliasing_ppgtt(&i915->ggtt); + + ggtt_cleanup_hw(&i915->ggtt); + + pvec = &i915->mm.wc_stash.pvec; if (pvec->nr) { set_pages_array_wb(pvec->pages, pvec->nr); __pagevec_release(pvec); } - mutex_unlock(&dev_priv->drm.struct_mutex); - - arch_phys_wc_del(ggtt->mtrr); - io_mapping_fini(&ggtt->iomap); - - i915_gem_cleanup_stolen(dev_priv); + i915_gem_cleanup_stolen(i915); } static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) @@ -3014,243 +2836,48 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return ret; } - ggtt->vm.scratch_pte = - ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr, + ggtt->vm.scratch[0].encode = + ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]), I915_CACHE_NONE, 0); return 0; } -static struct intel_ppat_entry * -__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value) +static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv) { - struct intel_ppat_entry *entry = &ppat->entries[index]; - - GEM_BUG_ON(index >= ppat->max_entries); - GEM_BUG_ON(test_bit(index, ppat->used)); - - entry->ppat = ppat; - entry->value = value; - kref_init(&entry->ref); - set_bit(index, ppat->used); - set_bit(index, ppat->dirty); - - return entry; + I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC); + I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); + I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); + I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC); + I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); + I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); + I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); + I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); } -static void __free_ppat_entry(struct intel_ppat_entry *entry) -{ - struct intel_ppat *ppat = entry->ppat; - unsigned int index = entry - ppat->entries; - - GEM_BUG_ON(index >= ppat->max_entries); - GEM_BUG_ON(!test_bit(index, ppat->used)); - - entry->value = ppat->clear_value; - clear_bit(index, ppat->used); - set_bit(index, ppat->dirty); -} - -/** - * intel_ppat_get - get a usable PPAT entry - * @i915: i915 device instance - * @value: the PPAT value required by the caller - * - * The function tries to search if there is an existing PPAT entry which - * matches with the required value. If perfectly matched, the existing PPAT - * entry will be used. If only partially matched, it will try to check if - * there is any available PPAT index. If yes, it will allocate a new PPAT - * index for the required entry and update the HW. If not, the partially - * matched entry will be used. - */ -const struct intel_ppat_entry * -intel_ppat_get(struct drm_i915_private *i915, u8 value) -{ - struct intel_ppat *ppat = &i915->ppat; - struct intel_ppat_entry *entry = NULL; - unsigned int scanned, best_score; - int i; - - GEM_BUG_ON(!ppat->max_entries); - - scanned = best_score = 0; - for_each_set_bit(i, ppat->used, ppat->max_entries) { - unsigned int score; - - score = ppat->match(ppat->entries[i].value, value); - if (score > best_score) { - entry = &ppat->entries[i]; - if (score == INTEL_PPAT_PERFECT_MATCH) { - kref_get(&entry->ref); - return entry; - } - best_score = score; - } - scanned++; - } - - if (scanned == ppat->max_entries) { - if (!entry) - return ERR_PTR(-ENOSPC); - - kref_get(&entry->ref); - return entry; - } - - i = find_first_zero_bit(ppat->used, ppat->max_entries); - entry = __alloc_ppat_entry(ppat, i, value); - ppat->update_hw(i915); - return entry; -} - -static void release_ppat(struct kref *kref) -{ - struct intel_ppat_entry *entry = - container_of(kref, struct intel_ppat_entry, ref); - struct drm_i915_private *i915 = entry->ppat->i915; - - __free_ppat_entry(entry); - entry->ppat->update_hw(i915); -} - -/** - * intel_ppat_put - put back the PPAT entry got from intel_ppat_get() - * @entry: an intel PPAT entry - * - * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the - * entry is dynamically allocated, its reference count will be decreased. Once - * the reference count becomes into zero, the PPAT index becomes free again. - */ -void intel_ppat_put(const struct intel_ppat_entry *entry) -{ - struct intel_ppat *ppat = entry->ppat; - unsigned int index = entry - ppat->entries; - - GEM_BUG_ON(!ppat->max_entries); - - kref_put(&ppat->entries[index].ref, release_ppat); -} - -static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv) -{ - struct intel_ppat *ppat = &dev_priv->ppat; - int i; - - for_each_set_bit(i, ppat->dirty, ppat->max_entries) { - I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value); - clear_bit(i, ppat->dirty); - } -} - -static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv) +/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability + * bits. When using advanced contexts each context stores its own PAT, but + * writing this data shouldn't be harmful even in those cases. */ +static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) { - struct intel_ppat *ppat = &dev_priv->ppat; - u64 pat = 0; - int i; - - for (i = 0; i < ppat->max_entries; i++) - pat |= GEN8_PPAT(i, ppat->entries[i].value); + u64 pat; - bitmap_clear(ppat->dirty, 0, ppat->max_entries); + pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ + GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ + GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ + GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ + GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | + GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | + GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | + GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); } -static unsigned int bdw_private_pat_match(u8 src, u8 dst) +static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) { - unsigned int score = 0; - enum { - AGE_MATCH = BIT(0), - TC_MATCH = BIT(1), - CA_MATCH = BIT(2), - }; - - /* Cache attribute has to be matched. */ - if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst)) - return 0; - - score |= CA_MATCH; - - if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst)) - score |= TC_MATCH; - - if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst)) - score |= AGE_MATCH; - - if (score == (AGE_MATCH | TC_MATCH | CA_MATCH)) - return INTEL_PPAT_PERFECT_MATCH; - - return score; -} - -static unsigned int chv_private_pat_match(u8 src, u8 dst) -{ - return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ? - INTEL_PPAT_PERFECT_MATCH : 0; -} - -static void cnl_setup_private_ppat(struct intel_ppat *ppat) -{ - ppat->max_entries = 8; - ppat->update_hw = cnl_private_pat_update_hw; - ppat->match = bdw_private_pat_match; - ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); - - __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); - __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); - __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); - __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); - __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); - __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); - __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); - __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); -} - -/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability - * bits. When using advanced contexts each context stores its own PAT, but - * writing this data shouldn't be harmful even in those cases. */ -static void bdw_setup_private_ppat(struct intel_ppat *ppat) -{ - ppat->max_entries = 8; - ppat->update_hw = bdw_private_pat_update_hw; - ppat->match = bdw_private_pat_match; - ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); - - if (!HAS_PPGTT(ppat->i915)) { - /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, - * so RTL will always use the value corresponding to - * pat_sel = 000". - * So let's disable cache for GGTT to avoid screen corruptions. - * MOCS still can be used though. - * - System agent ggtt writes (i.e. cpu gtt mmaps) already work - * before this patch, i.e. the same uncached + snooping access - * like on gen6/7 seems to be in effect. - * - So this just fixes blitter/render access. Again it looks - * like it's not just uncached access, but uncached + snooping. - * So we can still hold onto all our assumptions wrt cpu - * clflushing on LLC machines. - */ - __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC); - return; - } - - __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */ - __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */ - __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */ - __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */ - __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); - __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); - __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); - __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); -} - -static void chv_setup_private_ppat(struct intel_ppat *ppat) -{ - ppat->max_entries = 8; - ppat->update_hw = bdw_private_pat_update_hw; - ppat->match = chv_private_pat_match; - ppat->clear_value = CHV_PPAT_SNOOP; + u64 pat; /* * Map WB on BDW to snooped on CHV. @@ -3271,14 +2898,17 @@ static void chv_setup_private_ppat(struct intel_ppat *ppat) * in order to keep the global status page working. */ - __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP); - __alloc_ppat_entry(ppat, 1, 0); - __alloc_ppat_entry(ppat, 2, 0); - __alloc_ppat_entry(ppat, 3, 0); - __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP); - __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP); - __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP); - __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP); + pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | + GEN8_PPAT(1, 0) | + GEN8_PPAT(2, 0) | + GEN8_PPAT(3, 0) | + GEN8_PPAT(4, CHV_PPAT_SNOOP) | + GEN8_PPAT(5, CHV_PPAT_SNOOP) | + GEN8_PPAT(6, CHV_PPAT_SNOOP) | + GEN8_PPAT(7, CHV_PPAT_SNOOP); + + I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); + I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); } static void gen6_gmch_remove(struct i915_address_space *vm) @@ -3291,27 +2921,14 @@ static void gen6_gmch_remove(struct i915_address_space *vm) static void setup_private_pat(struct drm_i915_private *dev_priv) { - struct intel_ppat *ppat = &dev_priv->ppat; - int i; - - ppat->i915 = dev_priv; + GEM_BUG_ON(INTEL_GEN(dev_priv) < 8); if (INTEL_GEN(dev_priv) >= 10) - cnl_setup_private_ppat(ppat); + cnl_setup_private_ppat(dev_priv); else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) - chv_setup_private_ppat(ppat); + chv_setup_private_ppat(dev_priv); else - bdw_setup_private_ppat(ppat); - - GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES); - - for_each_clear_bit(i, ppat->used, ppat->max_entries) { - ppat->entries[i].value = ppat->clear_value; - ppat->entries[i].ppat = ppat; - set_bit(i, ppat->dirty); - } - - ppat->update_hw(dev_priv); + bdw_setup_private_ppat(dev_priv); } static int gen8_gmch_probe(struct i915_ggtt *ggtt) @@ -3356,11 +2973,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; if (ggtt->vm.clear_range != nop_clear_range) ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; - - /* Prevent recursively calling stop_machine() and deadlocks. */ - dev_info(dev_priv->drm.dev, - "Disabling error capture for VT-d workaround\n"); - i915_disable_error_state(dev_priv, -ENODEV); } ggtt->invalidate = gen6_ggtt_invalidate; @@ -3478,21 +3090,18 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) return 0; } -/** - * i915_ggtt_probe_hw - Probe GGTT hardware location - * @dev_priv: i915 device - */ -int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) +static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_private *i915 = gt->i915; int ret; - ggtt->vm.i915 = dev_priv; - ggtt->vm.dma = &dev_priv->drm.pdev->dev; + ggtt->vm.gt = gt; + ggtt->vm.i915 = i915; + ggtt->vm.dma = &i915->drm.pdev->dev; - if (INTEL_GEN(dev_priv) <= 5) + if (INTEL_GEN(i915) <= 5) ret = i915_gmch_probe(ggtt); - else if (INTEL_GEN(dev_priv) < 8) + else if (INTEL_GEN(i915) < 8) ret = gen6_gmch_probe(ggtt); else ret = gen8_gmch_probe(ggtt); @@ -3520,51 +3129,82 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); DRM_DEBUG_DRIVER("DSM size = %lluM\n", (u64)resource_size(&intel_graphics_stolen_res) >> 20); - if (intel_vtd_active()) - DRM_INFO("VT-d active for gfx access\n"); return 0; } /** - * i915_ggtt_init_hw - Initialize GGTT hardware - * @dev_priv: i915 device + * i915_ggtt_probe_hw - Probe GGTT hardware location + * @i915: i915 device */ -int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) +int i915_ggtt_probe_hw(struct drm_i915_private *i915) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; int ret; - stash_init(&dev_priv->mm.wc_stash); + ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); + if (ret) + return ret; + + if (intel_vtd_active()) + DRM_INFO("VT-d active for gfx access\n"); + + return 0; +} + +static int ggtt_init_hw(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + int ret = 0; + + mutex_lock(&i915->drm.struct_mutex); - /* Note that we use page colouring to enforce a guard page at the - * end of the address space. This is required as the CS may prefetch - * beyond the end of the batch buffer, across the page boundary, - * and beyond the end of the GTT if we do not provide a guard. - */ - mutex_lock(&dev_priv->drm.struct_mutex); i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); ggtt->vm.is_ggtt = true; /* Only VLV supports read-only GGTT mappings */ - ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); + ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); - if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv)) + if (!HAS_LLC(i915) && !HAS_PPGTT(i915)) ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; - mutex_unlock(&dev_priv->drm.struct_mutex); - if (!io_mapping_init_wc(&dev_priv->ggtt.iomap, - dev_priv->ggtt.gmadr.start, - dev_priv->ggtt.mappable_end)) { + if (!io_mapping_init_wc(&ggtt->iomap, + ggtt->gmadr.start, + ggtt->mappable_end)) { + ggtt->vm.cleanup(&ggtt->vm); ret = -EIO; - goto out_gtt_cleanup; + goto out; } ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end); i915_ggtt_init_fences(ggtt); +out: + mutex_unlock(&i915->drm.struct_mutex); + + return ret; +} + +/** + * i915_ggtt_init_hw - Initialize GGTT hardware + * @dev_priv: i915 device + */ +int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) +{ + int ret; + + stash_init(&dev_priv->mm.wc_stash); + + /* Note that we use page colouring to enforce a guard page at the + * end of the address space. This is required as the CS may prefetch + * beyond the end of the batch buffer, across the page boundary, + * and beyond the end of the GTT if we do not provide a guard. + */ + ret = ggtt_init_hw(&dev_priv->ggtt); + if (ret) + return ret; + /* * Initialise stolen early so that we may reserve preallocated * objects for the BIOS to KMS transition. @@ -3576,7 +3216,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) return 0; out_gtt_cleanup: - ggtt->vm.cleanup(&ggtt->vm); + dev_priv->ggtt.vm.cleanup(&dev_priv->ggtt.vm); return ret; } @@ -3588,35 +3228,34 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) return 0; } -void i915_ggtt_enable_guc(struct drm_i915_private *i915) +void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) { - GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate); + GEM_BUG_ON(ggtt->invalidate != gen6_ggtt_invalidate); - i915->ggtt.invalidate = guc_ggtt_invalidate; + ggtt->invalidate = guc_ggtt_invalidate; - i915_ggtt_invalidate(i915); + ggtt->invalidate(ggtt); } -void i915_ggtt_disable_guc(struct drm_i915_private *i915) +void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) { /* XXX Temporary pardon for error unload */ - if (i915->ggtt.invalidate == gen6_ggtt_invalidate) + if (ggtt->invalidate == gen6_ggtt_invalidate) return; /* We should only be called after i915_ggtt_enable_guc() */ - GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate); + GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate); - i915->ggtt.invalidate = gen6_ggtt_invalidate; + ggtt->invalidate = gen6_ggtt_invalidate; - i915_ggtt_invalidate(i915); + ggtt->invalidate(ggtt); } -void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) +static void ggtt_restore_mappings(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma, *vn; - i915_check_and_clear_faults(dev_priv); + intel_gt_check_and_clear_faults(ggtt->vm.gt); mutex_lock(&ggtt->vm.mutex); @@ -3650,17 +3289,17 @@ lock: } ggtt->vm.closed = false; - i915_ggtt_invalidate(dev_priv); + ggtt->invalidate(ggtt); mutex_unlock(&ggtt->vm.mutex); +} - if (INTEL_GEN(dev_priv) >= 8) { - struct intel_ppat *ppat = &dev_priv->ppat; +void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915) +{ + ggtt_restore_mappings(&i915->ggtt); - bitmap_set(ppat->dirty, 0, ppat->max_entries); - dev_priv->ppat.update_hw(dev_priv); - return; - } + if (INTEL_GEN(i915) >= 8) + setup_private_pat(i915); } static struct scatterlist * @@ -3949,7 +3588,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm, GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT)); GEM_BUG_ON(range_overflows(offset, size, vm->total)); - GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); + GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); GEM_BUG_ON(drm_mm_node_allocated(node)); node->size = size; @@ -4046,7 +3685,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, GEM_BUG_ON(start >= end); GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); - GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); + GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); GEM_BUG_ON(drm_mm_node_allocated(node)); if (unlikely(range_overflows(start, size, end))) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 812717ccc69b..51274483502e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -35,15 +35,19 @@ #define __I915_GEM_GTT_H__ #include <linux/io-mapping.h> +#include <linux/kref.h> #include <linux/mm.h> #include <linux/pagevec.h> +#include <linux/workqueue.h> + +#include <drm/drm_mm.h> #include "gt/intel_reset.h" #include "i915_gem_fence_reg.h" #include "i915_request.h" #include "i915_scatterlist.h" #include "i915_selftest.h" -#include "i915_timeline.h" +#include "gt/intel_timeline.h" #define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) #define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) @@ -64,12 +68,10 @@ struct drm_i915_file_private; struct drm_i915_gem_object; struct i915_vma; +struct intel_gt; typedef u32 gen6_pte_t; typedef u64 gen8_pte_t; -typedef u64 gen8_pde_t; -typedef u64 gen8_ppgtt_pdpe_t; -typedef u64 gen8_ppgtt_pml4e_t; #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) @@ -113,30 +115,18 @@ typedef u64 gen8_ppgtt_pml4e_t; #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) -/* GEN8 32b style address is defined as a 3 level page table: +/* + * GEN8 32b style address is defined as a 3 level page table: * 31:30 | 29:21 | 20:12 | 11:0 * PDPE | PDE | PTE | offset * The difference as compared to normal x86 3 level page table is the PDPEs are * programmed via register. - */ -#define GEN8_3LVL_PDPES 4 -#define GEN8_PDE_SHIFT 21 -#define GEN8_PDE_MASK 0x1ff -#define GEN8_PTE_SHIFT 12 -#define GEN8_PTE_MASK 0x1ff -#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t)) - -/* GEN8 48b style address is defined as a 4 level page table: + * + * GEN8 48b style address is defined as a 4 level page table: * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 * PML4E | PDPE | PDE | PTE | offset */ -#define GEN8_PML4ES_PER_PML4 512 -#define GEN8_PML4E_SHIFT 39 -#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1) -#define GEN8_PDPE_SHIFT 30 -/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page - * tables */ -#define GEN8_PDPE_MASK 0x1ff +#define GEN8_3LVL_PDPES 4 #define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD) #define PPAT_CACHED_PDE 0 /* WB LLC */ @@ -155,11 +145,6 @@ typedef u64 gen8_ppgtt_pml4e_t; #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) -#define GEN8_PPAT_GET_CA(x) ((x) & 3) -#define GEN8_PPAT_GET_TC(x) ((x) & (3 << 2)) -#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4)) -#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6)) - #define GEN8_PDE_IPS_64K BIT(11) #define GEN8_PDE_PS_2M BIT(7) @@ -243,8 +228,10 @@ struct i915_page_dma { }; }; -#define px_base(px) (&(px)->base) -#define px_dma(px) (px_base(px)->daddr) +struct i915_page_scratch { + struct i915_page_dma base; + u64 encode; +}; struct i915_page_table { struct i915_page_dma base; @@ -252,12 +239,32 @@ struct i915_page_table { }; struct i915_page_directory { - struct i915_page_dma base; - atomic_t used; + struct i915_page_table pt; spinlock_t lock; void *entry[512]; }; +#define __px_choose_expr(x, type, expr, other) \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), type) || \ + __builtin_types_compatible_p(typeof(x), const type), \ + ({ type __x = (type)(x); expr; }), \ + other) + +#define px_base(px) \ + __px_choose_expr(px, struct i915_page_dma *, __x, \ + __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \ + __px_choose_expr(px, struct i915_page_table *, &__x->base, \ + __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \ + (void)0)))) +#define px_dma(px) (px_base(px)->daddr) + +#define px_pt(px) \ + __px_choose_expr(px, struct i915_page_table *, __x, \ + __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ + (void)0)) +#define px_used(px) (&px_pt(px)->used) + struct i915_vma_ops { /* Map an object into an address space with the given cache flags. */ int (*bind_vma)(struct i915_vma *vma, @@ -280,8 +287,10 @@ struct pagestash { struct i915_address_space { struct kref ref; + struct rcu_work rcu; struct drm_mm mm; + struct intel_gt *gt; struct drm_i915_private *i915; struct device *dma; /* Every address space belongs to a struct file - except for the global @@ -302,12 +311,9 @@ struct i915_address_space { #define VM_CLASS_GGTT 0 #define VM_CLASS_PPGTT 1 - u64 scratch_pte; - int scratch_order; - struct i915_page_dma scratch_page; - struct i915_page_table *scratch_pt; - struct i915_page_directory *scratch_pd; - struct i915_page_directory *scratch_pdp; /* GEN8+ & 48b PPGTT */ + struct i915_page_scratch scratch[4]; + unsigned int scratch_order; + unsigned int top; /** * List of vma currently bound. @@ -386,7 +392,10 @@ struct i915_ggtt { /** "Graphics Stolen Memory" holds the global PTEs */ void __iomem *gsm; - void (*invalidate)(struct drm_i915_private *dev_priv); + void (*invalidate)(struct i915_ggtt *ggtt); + + /** PPGTT used for aliasing the PPGTT with the GTT */ + struct i915_ppgtt *alias; bool do_idle_maps; @@ -425,8 +434,6 @@ struct gen6_ppgtt { unsigned int pin_count; bool scan_for_unused_pt; - - struct gen6_ppgtt_cleanup_work *work; }; #define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base) @@ -506,15 +513,6 @@ static inline u32 gen6_pde_index(u32 addr) return i915_pde_index(addr, GEN6_PDE_SHIFT); } -static inline unsigned int -i915_pdpes_per_pdp(const struct i915_address_space *vm) -{ - if (i915_vm_is_4lvl(vm)) - return GEN8_PML4ES_PER_PML4; - - return GEN8_3LVL_PDPES; -} - static inline struct i915_page_table * i915_pt_entry(const struct i915_page_directory * const pd, const unsigned short n) @@ -529,73 +527,12 @@ i915_pd_entry(const struct i915_page_directory * const pdp, return pdp->entry[n]; } -static inline struct i915_page_directory * -i915_pdp_entry(const struct i915_page_directory * const pml4, - const unsigned short n) -{ - return pml4->entry[n]; -} - -/* Equivalent to the gen6 version, For each pde iterates over every pde - * between from start until start + length. On gen8+ it simply iterates - * over every page directory entry in a page directory. - */ -#define gen8_for_each_pde(pt, pd, start, length, iter) \ - for (iter = gen8_pde_index(start); \ - length > 0 && iter < I915_PDES && \ - (pt = i915_pt_entry(pd, iter), true); \ - ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \ - temp = min(temp - start, length); \ - start += temp, length -= temp; }), ++iter) - -#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \ - for (iter = gen8_pdpe_index(start); \ - length > 0 && iter < i915_pdpes_per_pdp(vm) && \ - (pd = i915_pd_entry(pdp, iter), true); \ - ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \ - temp = min(temp - start, length); \ - start += temp, length -= temp; }), ++iter) - -#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \ - for (iter = gen8_pml4e_index(start); \ - length > 0 && iter < GEN8_PML4ES_PER_PML4 && \ - (pdp = i915_pdp_entry(pml4, iter), true); \ - ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \ - temp = min(temp - start, length); \ - start += temp, length -= temp; }), ++iter) - -static inline u32 gen8_pte_index(u64 address) -{ - return i915_pte_index(address, GEN8_PDE_SHIFT); -} - -static inline u32 gen8_pde_index(u64 address) -{ - return i915_pde_index(address, GEN8_PDE_SHIFT); -} - -static inline u32 gen8_pdpe_index(u64 address) -{ - return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK; -} - -static inline u32 gen8_pml4e_index(u64 address) -{ - return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK; -} - -static inline u64 gen8_pte_count(u64 address, u64 length) -{ - return i915_pte_count(address, length, GEN8_PDE_SHIFT); -} - static inline dma_addr_t i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) { - struct i915_page_directory *pd; + struct i915_page_dma *pt = ppgtt->pd->entry[n]; - pd = i915_pdp_entry(ppgtt->pd, n); - return px_dma(pd); + return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top])); } static inline struct i915_ggtt * @@ -614,46 +551,15 @@ i915_vm_to_ppgtt(struct i915_address_space *vm) return container_of(vm, struct i915_ppgtt, vm); } -#define INTEL_MAX_PPAT_ENTRIES 8 -#define INTEL_PPAT_PERFECT_MATCH (~0U) - -struct intel_ppat; - -struct intel_ppat_entry { - struct intel_ppat *ppat; - struct kref ref; - u8 value; -}; - -struct intel_ppat { - struct intel_ppat_entry entries[INTEL_MAX_PPAT_ENTRIES]; - DECLARE_BITMAP(used, INTEL_MAX_PPAT_ENTRIES); - DECLARE_BITMAP(dirty, INTEL_MAX_PPAT_ENTRIES); - unsigned int max_entries; - u8 clear_value; - /* - * Return a score to show how two PPAT values match, - * a INTEL_PPAT_PERFECT_MATCH indicates a perfect match - */ - unsigned int (*match)(u8 src, u8 dst); - void (*update_hw)(struct drm_i915_private *i915); - - struct drm_i915_private *i915; -}; - -const struct intel_ppat_entry * -intel_ppat_get(struct drm_i915_private *i915, u8 value); -void intel_ppat_put(const struct intel_ppat_entry *entry); - int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv); int i915_ggtt_init_hw(struct drm_i915_private *dev_priv); int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv); -void i915_ggtt_enable_guc(struct drm_i915_private *i915); -void i915_ggtt_disable_guc(struct drm_i915_private *i915); -int i915_gem_init_ggtt(struct drm_i915_private *dev_priv); -void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); +void i915_ggtt_enable_guc(struct i915_ggtt *ggtt); +void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); +int i915_init_ggtt(struct drm_i915_private *dev_priv); +void i915_ggtt_driver_release(struct drm_i915_private *dev_priv); -int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); +int i915_ppgtt_init_hw(struct intel_gt *gt); struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h deleted file mode 100644 index 112cda8fa1a8..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_render_state.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef _I915_GEM_RENDER_STATE_H_ -#define _I915_GEM_RENDER_STATE_H_ - -struct i915_request; - -int i915_gem_render_state_emit(struct i915_request *rq); - -#endif /* _I915_GEM_RENDER_STATE_H_ */ diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h index 04c1ce107fc0..2d199f411a4a 100644 --- a/drivers/gpu/drm/i915/i915_globals.h +++ b/drivers/gpu/drm/i915/i915_globals.h @@ -7,6 +7,8 @@ #ifndef _I915_GLOBALS_H_ #define _I915_GLOBALS_H_ +#include <linux/types.h> + typedef void (*i915_global_func_t)(void); struct i915_global { diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index b7e9fddef270..0c0f255000c2 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -29,8 +29,8 @@ #include <linux/ascii85.h> #include <linux/nmi.h> +#include <linux/pagevec.h> #include <linux/scatterlist.h> -#include <linux/stop_machine.h> #include <linux/utsname.h> #include <linux/zlib.h> @@ -46,6 +46,9 @@ #include "i915_scatterlist.h" #include "intel_csr.h" +#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) +#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN) + static inline const struct intel_engine_cs * engine_lookup(const struct drm_i915_private *i915, unsigned int id) { @@ -67,26 +70,6 @@ engine_name(const struct drm_i915_private *i915, unsigned int id) return __engine_name(engine_lookup(i915, id)); } -static const char *tiling_flag(int tiling) -{ - switch (tiling) { - default: - case I915_TILING_NONE: return ""; - case I915_TILING_X: return " X"; - case I915_TILING_Y: return " Y"; - } -} - -static const char *dirty_flag(int dirty) -{ - return dirty ? " dirty" : ""; -} - -static const char *purgeable_flag(int purgeable) -{ - return purgeable ? " purgeable" : ""; -} - static void __sg_set_buf(struct scatterlist *sg, void *addr, unsigned int len, loff_t it) { @@ -114,7 +97,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) if (e->cur == e->end) { struct scatterlist *sgl; - sgl = (typeof(sgl))__get_free_page(GFP_KERNEL); + sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL); if (!sgl) { e->err = -ENOMEM; return false; @@ -134,7 +117,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) } e->size = ALIGN(len + 1, SZ_64K); - e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + e->buf = kmalloc(e->size, ALLOW_FAIL); if (!e->buf) { e->size = PAGE_ALIGN(len + 1); e->buf = kmalloc(e->size, GFP_KERNEL); @@ -211,47 +194,115 @@ i915_error_printer(struct drm_i915_error_state_buf *e) return p; } +/* single threaded page allocator with a reserved stash for emergencies */ +static void pool_fini(struct pagevec *pv) +{ + pagevec_release(pv); +} + +static int pool_refill(struct pagevec *pv, gfp_t gfp) +{ + while (pagevec_space(pv)) { + struct page *p; + + p = alloc_page(gfp); + if (!p) + return -ENOMEM; + + pagevec_add(pv, p); + } + + return 0; +} + +static int pool_init(struct pagevec *pv, gfp_t gfp) +{ + int err; + + pagevec_init(pv); + + err = pool_refill(pv, gfp); + if (err) + pool_fini(pv); + + return err; +} + +static void *pool_alloc(struct pagevec *pv, gfp_t gfp) +{ + struct page *p; + + p = alloc_page(gfp); + if (!p && pagevec_count(pv)) + p = pv->pages[--pv->nr]; + + return p ? page_address(p) : NULL; +} + +static void pool_free(struct pagevec *pv, void *addr) +{ + struct page *p = virt_to_page(addr); + + if (pagevec_space(pv)) + pagevec_add(pv, p); + else + __free_page(p); +} + #ifdef CONFIG_DRM_I915_COMPRESS_ERROR struct compress { + struct pagevec pool; struct z_stream_s zstream; void *tmp; }; static bool compress_init(struct compress *c) { - struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream)); + struct z_stream_s *zstream = &c->zstream; - zstream->workspace = - kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), - GFP_ATOMIC | __GFP_NOWARN); - if (!zstream->workspace) + if (pool_init(&c->pool, ALLOW_FAIL)) return false; - if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) { - kfree(zstream->workspace); + zstream->workspace = + kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), + ALLOW_FAIL); + if (!zstream->workspace) { + pool_fini(&c->pool); return false; } c->tmp = NULL; if (i915_has_memcpy_from_wc()) - c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN); + c->tmp = pool_alloc(&c->pool, ALLOW_FAIL); return true; } -static void *compress_next_page(struct drm_i915_error_object *dst) +static bool compress_start(struct compress *c) +{ + struct z_stream_s *zstream = &c->zstream; + void *workspace = zstream->workspace; + + memset(zstream, 0, sizeof(*zstream)); + zstream->workspace = workspace; + + return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK; +} + +static void *compress_next_page(struct compress *c, + struct drm_i915_error_object *dst) { - unsigned long page; + void *page; if (dst->page_count >= dst->num_pages) return ERR_PTR(-ENOSPC); - page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN); + page = pool_alloc(&c->pool, ALLOW_FAIL); if (!page) return ERR_PTR(-ENOMEM); - return dst->pages[dst->page_count++] = (void *)page; + return dst->pages[dst->page_count++] = page; } static int compress_page(struct compress *c, @@ -267,7 +318,7 @@ static int compress_page(struct compress *c, do { if (zstream->avail_out == 0) { - zstream->next_out = compress_next_page(dst); + zstream->next_out = compress_next_page(c, dst); if (IS_ERR(zstream->next_out)) return PTR_ERR(zstream->next_out); @@ -276,8 +327,6 @@ static int compress_page(struct compress *c, if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK) return -EIO; - - touch_nmi_watchdog(); } while (zstream->avail_in); /* Fallback to uncompressed if we increase size? */ @@ -295,7 +344,7 @@ static int compress_flush(struct compress *c, do { switch (zlib_deflate(zstream, Z_FINISH)) { case Z_OK: /* more space requested */ - zstream->next_out = compress_next_page(dst); + zstream->next_out = compress_next_page(c, dst); if (IS_ERR(zstream->next_out)) return PTR_ERR(zstream->next_out); @@ -316,15 +365,17 @@ end: return 0; } -static void compress_fini(struct compress *c, - struct drm_i915_error_object *dst) +static void compress_finish(struct compress *c) { - struct z_stream_s *zstream = &c->zstream; + zlib_deflateEnd(&c->zstream); +} - zlib_deflateEnd(zstream); - kfree(zstream->workspace); +static void compress_fini(struct compress *c) +{ + kfree(c->zstream.workspace); if (c->tmp) - free_page((unsigned long)c->tmp); + pool_free(&c->pool, c->tmp); + pool_fini(&c->pool); } static void err_compression_marker(struct drm_i915_error_state_buf *m) @@ -335,10 +386,16 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) #else struct compress { + struct pagevec pool; }; static bool compress_init(struct compress *c) { + return pool_init(&c->pool, ALLOW_FAIL) == 0; +} + +static bool compress_start(struct compress *c) +{ return true; } @@ -346,14 +403,12 @@ static int compress_page(struct compress *c, void *src, struct drm_i915_error_object *dst) { - unsigned long page; void *ptr; - page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN); - if (!page) + ptr = pool_alloc(&c->pool, ALLOW_FAIL); + if (!ptr) return -ENOMEM; - ptr = (void *)page; if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE)) memcpy(ptr, src, PAGE_SIZE); dst->pages[dst->page_count++] = ptr; @@ -367,11 +422,15 @@ static int compress_flush(struct compress *c, return 0; } -static void compress_fini(struct compress *c, - struct drm_i915_error_object *dst) +static void compress_finish(struct compress *c) { } +static void compress_fini(struct compress *c) +{ + pool_fini(&c->pool); +} + static void err_compression_marker(struct drm_i915_error_state_buf *m) { err_puts(m, "~"); @@ -379,36 +438,6 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) #endif -static void print_error_buffers(struct drm_i915_error_state_buf *m, - const char *name, - struct drm_i915_error_buffer *err, - int count) -{ - err_printf(m, "%s [%d]:\n", name, count); - - while (count--) { - err_printf(m, " %08x_%08x %8u %02x %02x", - upper_32_bits(err->gtt_offset), - lower_32_bits(err->gtt_offset), - err->size, - err->read_domains, - err->write_domain); - err_puts(m, tiling_flag(err->tiling)); - err_puts(m, dirty_flag(err->dirty)); - err_puts(m, purgeable_flag(err->purgeable)); - err_puts(m, err->userptr ? " userptr" : ""); - err_puts(m, i915_cache_level_str(m->i915, err->cache_level)); - - if (err->name) - err_printf(m, " (name: %d)", err->name); - if (err->fence_reg != I915_FENCE_REG_NONE) - err_printf(m, " (fence: %d)", err->fence_reg); - - err_puts(m, "\n"); - err++; - } -} - static void error_print_instdone(struct drm_i915_error_state_buf *m, const struct drm_i915_error_engine *ee) { @@ -620,7 +649,7 @@ static void err_print_uc(struct drm_i915_error_state_buf *m, const struct i915_gpu_state *error = container_of(error_uc, typeof(*error), uc); - if (!error->device_info.has_guc) + if (!error->device_info.has_gt_uc) return; intel_uc_fw_dump(&error_uc->guc_fw, &p); @@ -734,33 +763,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, error_print_engine(m, &error->engine[i], error->epoch); } - for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) { - char buf[128]; - int len, first = 1; - - if (!error->active_vm[i]) - break; - - len = scnprintf(buf, sizeof(buf), "Active ("); - for (j = 0; j < ARRAY_SIZE(error->engine); j++) { - if (error->engine[j].vm != error->active_vm[i]) - continue; - - len += scnprintf(buf + len, sizeof(buf), "%s%s", - first ? "" : ", ", - m->i915->engine[j]->name); - first = 0; - } - scnprintf(buf + len, sizeof(buf), ")"); - print_error_buffers(m, buf, - error->active_bo[i], - error->active_bo_count[i]); - } - - print_error_buffers(m, "Pinned (global)", - error->pinned_bo, - error->pinned_bo_count); - for (i = 0; i < ARRAY_SIZE(error->engine); i++) { const struct drm_i915_error_engine *ee = &error->engine[i]; @@ -974,10 +976,6 @@ void __i915_gpu_state_free(struct kref *error_ref) kfree(ee->requests); } - for (i = 0; i < ARRAY_SIZE(error->active_bo); i++) - kfree(error->active_bo[i]); - kfree(error->pinned_bo); - kfree(error->overlay); kfree(error->display); @@ -990,108 +988,63 @@ void __i915_gpu_state_free(struct kref *error_ref) static struct drm_i915_error_object * i915_error_object_create(struct drm_i915_private *i915, - struct i915_vma *vma) + struct i915_vma *vma, + struct compress *compress) { struct i915_ggtt *ggtt = &i915->ggtt; const u64 slot = ggtt->error_capture.start; struct drm_i915_error_object *dst; - struct compress compress; unsigned long num_pages; struct sgt_iter iter; dma_addr_t dma; int ret; + might_sleep(); + if (!vma || !vma->pages) return NULL; num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */ - dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), - GFP_ATOMIC | __GFP_NOWARN); + dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL); if (!dst) return NULL; + if (!compress_start(compress)) { + kfree(dst); + return NULL; + } + dst->gtt_offset = vma->node.start; dst->gtt_size = vma->node.size; dst->num_pages = num_pages; dst->page_count = 0; dst->unused = 0; - if (!compress_init(&compress)) { - kfree(dst); - return NULL; - } - ret = -EINVAL; for_each_sgt_dma(dma, iter, vma->pages) { void __iomem *s; ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); - s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); - ret = compress_page(&compress, (void __force *)s, dst); - io_mapping_unmap_atomic(s); + s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE); + ret = compress_page(compress, (void __force *)s, dst); + io_mapping_unmap(s); if (ret) break; } - if (ret || compress_flush(&compress, dst)) { + if (ret || compress_flush(compress, dst)) { while (dst->page_count--) - free_page((unsigned long)dst->pages[dst->page_count]); + pool_free(&compress->pool, dst->pages[dst->page_count]); kfree(dst); dst = NULL; } + compress_finish(compress); - compress_fini(&compress, dst); return dst; } -static void capture_bo(struct drm_i915_error_buffer *err, - struct i915_vma *vma) -{ - struct drm_i915_gem_object *obj = vma->obj; - - err->size = obj->base.size; - err->name = obj->base.name; - - err->gtt_offset = vma->node.start; - err->read_domains = obj->read_domains; - err->write_domain = obj->write_domain; - err->fence_reg = vma->fence ? vma->fence->id : -1; - err->tiling = i915_gem_object_get_tiling(obj); - err->dirty = obj->mm.dirty; - err->purgeable = obj->mm.madv != I915_MADV_WILLNEED; - err->userptr = obj->userptr.mm != NULL; - err->cache_level = obj->cache_level; -} - -static u32 capture_error_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head, - unsigned int flags) -#define ACTIVE_ONLY BIT(0) -#define PINNED_ONLY BIT(1) -{ - struct i915_vma *vma; - int i = 0; - - list_for_each_entry(vma, head, vm_link) { - if (!vma->obj) - continue; - - if (flags & ACTIVE_ONLY && !i915_vma_is_active(vma)) - continue; - - if (flags & PINNED_ONLY && !i915_vma_is_pinned(vma)) - continue; - - capture_bo(err++, vma); - if (++i == count) - break; - } - - return i; -} - /* * Generate a semi-unique error code. The code is not meant to have meaning, The * code's only purpose is to try to prevent false duplicated bug reports by @@ -1194,6 +1147,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error, switch (engine->id) { default: MISSING_CASE(engine->id); + /* fall through */ case RCS0: mmio = RENDER_HWS_PGA_GEN7; break; @@ -1248,10 +1202,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error, } } -static void record_request(struct i915_request *request, +static void record_request(const struct i915_request *request, struct drm_i915_error_request *erq) { - struct i915_gem_context *ctx = request->gem_context; + const struct i915_gem_context *ctx = request->gem_context; erq->flags = request->fence.flags; erq->context = request->fence.context; @@ -1281,7 +1235,7 @@ static void engine_record_requests(struct intel_engine_cs *engine, if (!count) return; - ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC); + ee->requests = kcalloc(count, sizeof(*ee->requests), ATOMIC_MAYFAIL); if (!ee->requests) return; @@ -1315,20 +1269,15 @@ static void engine_record_requests(struct intel_engine_cs *engine, ee->num_requests = count; } -static void error_record_engine_execlists(struct intel_engine_cs *engine, +static void error_record_engine_execlists(const struct intel_engine_cs *engine, struct drm_i915_error_engine *ee) { const struct intel_engine_execlists * const execlists = &engine->execlists; - unsigned int n; - - for (n = 0; n < execlists_num_ports(execlists); n++) { - struct i915_request *rq = port_request(&execlists->port[n]); + struct i915_request * const *port = execlists->active; + unsigned int n = 0; - if (!rq) - break; - - record_request(rq, &ee->execlist[n]); - } + while (*port) + record_request(*port++, &ee->execlist[n++]); ee->num_ports = n; } @@ -1354,8 +1303,42 @@ static void record_context(struct drm_i915_error_context *e, e->active = atomic_read(&ctx->active_count); } -static void request_record_user_bo(struct i915_request *request, - struct drm_i915_error_engine *ee) +struct capture_vma { + struct capture_vma *next; + void **slot; +}; + +static struct capture_vma * +capture_vma(struct capture_vma *next, + struct i915_vma *vma, + struct drm_i915_error_object **out) +{ + struct capture_vma *c; + + *out = NULL; + if (!vma) + return next; + + c = kmalloc(sizeof(*c), ATOMIC_MAYFAIL); + if (!c) + return next; + + if (!i915_active_trygrab(&vma->active)) { + kfree(c); + return next; + } + + c->slot = (void **)out; + *c->slot = i915_vma_get(vma); + + c->next = next; + return c; +} + +static struct capture_vma * +request_record_user_bo(struct i915_request *request, + struct drm_i915_error_engine *ee, + struct capture_vma *capture) { struct i915_capture_list *c; struct drm_i915_error_object **bo; @@ -1365,33 +1348,34 @@ static void request_record_user_bo(struct i915_request *request, for (c = request->capture_list; c; c = c->next) max++; if (!max) - return; + return capture; - bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC); + bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL); if (!bo) { /* If we can't capture everything, try to capture something. */ max = min_t(long, max, PAGE_SIZE / sizeof(*bo)); - bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC); + bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL); } if (!bo) - return; + return capture; count = 0; for (c = request->capture_list; c; c = c->next) { - bo[count] = i915_error_object_create(request->i915, c->vma); - if (!bo[count]) - break; + capture = capture_vma(capture, c->vma, &bo[count]); if (++count == max) break; } ee->user_bo = bo; ee->user_bo_count = count; + + return capture; } static struct drm_i915_error_object * capture_object(struct drm_i915_private *dev_priv, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + struct compress *compress) { if (obj && i915_gem_object_has_pages(obj)) { struct i915_vma fake = { @@ -1401,22 +1385,24 @@ capture_object(struct drm_i915_private *dev_priv, .obj = obj, }; - return i915_error_object_create(dev_priv, &fake); + return i915_error_object_create(dev_priv, &fake, compress); } else { return NULL; } } -static void gem_record_rings(struct i915_gpu_state *error) +static void +gem_record_rings(struct i915_gpu_state *error, struct compress *compress) { struct drm_i915_private *i915 = error->i915; - struct i915_ggtt *ggtt = &i915->ggtt; int i; for (i = 0; i < I915_NUM_ENGINES; i++) { struct intel_engine_cs *engine = i915->engine[i]; struct drm_i915_error_engine *ee = &error->engine[i]; + struct capture_vma *capture = NULL; struct i915_request *request; + unsigned long flags; ee->engine_id = -1; @@ -1425,34 +1411,43 @@ static void gem_record_rings(struct i915_gpu_state *error) ee->engine_id = i; + /* Refill our page pool before entering atomic section */ + pool_refill(&compress->pool, ALLOW_FAIL); + error_record_engine_registers(error, engine, ee); error_record_engine_execlists(engine, ee); + spin_lock_irqsave(&engine->active.lock, flags); request = intel_engine_find_active_request(engine); if (request) { struct i915_gem_context *ctx = request->gem_context; - struct intel_ring *ring; - - ee->vm = ctx->vm ?: &ggtt->vm; + struct intel_ring *ring = request->ring; record_context(&ee->context, ctx); - /* We need to copy these to an anonymous buffer + /* + * We need to copy these to an anonymous buffer * as the simplest method to avoid being overwritten * by userspace. */ - ee->batchbuffer = - i915_error_object_create(i915, request->batch); + capture = capture_vma(capture, + request->batch, + &ee->batchbuffer); if (HAS_BROKEN_CS_TLB(i915)) - ee->wa_batchbuffer = - i915_error_object_create(i915, - i915->gt.scratch); - request_record_user_bo(request, ee); + capture = capture_vma(capture, + engine->gt->scratch, + &ee->wa_batchbuffer); - ee->ctx = - i915_error_object_create(i915, - request->hw_context->state); + capture = request_record_user_bo(request, ee, capture); + + capture = capture_vma(capture, + request->hw_context->state, + &ee->ctx); + + capture = capture_vma(capture, + ring->vma, + &ee->ringbuffer); error->simulated |= i915_gem_context_no_error_capture(ctx); @@ -1461,118 +1456,65 @@ static void gem_record_rings(struct i915_gpu_state *error) ee->rq_post = request->postfix; ee->rq_tail = request->tail; - ring = request->ring; ee->cpu_ring_head = ring->head; ee->cpu_ring_tail = ring->tail; - ee->ringbuffer = - i915_error_object_create(i915, ring->vma); engine_record_requests(engine, request, ee); } + spin_unlock_irqrestore(&engine->active.lock, flags); - ee->hws_page = - i915_error_object_create(i915, - engine->status_page.vma); - - ee->wa_ctx = i915_error_object_create(i915, engine->wa_ctx.vma); + while (capture) { + struct capture_vma *this = capture; + struct i915_vma *vma = *this->slot; - ee->default_state = capture_object(i915, engine->default_state); - } -} + *this->slot = + i915_error_object_create(i915, vma, compress); -static void gem_capture_vm(struct i915_gpu_state *error, - struct i915_address_space *vm, - int idx) -{ - struct drm_i915_error_buffer *active_bo; - struct i915_vma *vma; - int count; + i915_active_ungrab(&vma->active); + i915_vma_put(vma); - count = 0; - list_for_each_entry(vma, &vm->bound_list, vm_link) - if (i915_vma_is_active(vma)) - count++; - - active_bo = NULL; - if (count) - active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC); - if (active_bo) - count = capture_error_bo(active_bo, - count, &vm->bound_list, - ACTIVE_ONLY); - else - count = 0; - - error->active_vm[idx] = vm; - error->active_bo[idx] = active_bo; - error->active_bo_count[idx] = count; -} - -static void capture_active_buffers(struct i915_gpu_state *error) -{ - int cnt = 0, i, j; - - BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo)); - BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm)); - BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count)); + capture = this->next; + kfree(this); + } - /* Scan each engine looking for unique active contexts/vm */ - for (i = 0; i < ARRAY_SIZE(error->engine); i++) { - struct drm_i915_error_engine *ee = &error->engine[i]; - bool found; + ee->hws_page = + i915_error_object_create(i915, + engine->status_page.vma, + compress); - if (!ee->vm) - continue; + ee->wa_ctx = + i915_error_object_create(i915, + engine->wa_ctx.vma, + compress); - found = false; - for (j = 0; j < i && !found; j++) - found = error->engine[j].vm == ee->vm; - if (!found) - gem_capture_vm(error, ee->vm, cnt++); + ee->default_state = + capture_object(i915, engine->default_state, compress); } } -static void capture_pinned_buffers(struct i915_gpu_state *error) -{ - struct i915_address_space *vm = &error->i915->ggtt.vm; - struct drm_i915_error_buffer *bo; - struct i915_vma *vma; - int count; - - count = 0; - list_for_each_entry(vma, &vm->bound_list, vm_link) - count++; - - bo = NULL; - if (count) - bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC); - if (!bo) - return; - - error->pinned_bo_count = - capture_error_bo(bo, count, &vm->bound_list, PINNED_ONLY); - error->pinned_bo = bo; -} - -static void capture_uc_state(struct i915_gpu_state *error) +static void +capture_uc_state(struct i915_gpu_state *error, struct compress *compress) { struct drm_i915_private *i915 = error->i915; struct i915_error_uc *error_uc = &error->uc; + struct intel_uc *uc = &i915->gt.uc; /* Capturing uC state won't be useful if there is no GuC */ - if (!error->device_info.has_guc) + if (!error->device_info.has_gt_uc) return; - error_uc->guc_fw = i915->guc.fw; - error_uc->huc_fw = i915->huc.fw; + error_uc->guc_fw = uc->guc.fw; + error_uc->huc_fw = uc->huc.fw; /* Non-default firmware paths will be specified by the modparam. * As modparams are generally accesible from the userspace make * explicit copies of the firmware paths. */ - error_uc->guc_fw.path = kstrdup(i915->guc.fw.path, GFP_ATOMIC); - error_uc->huc_fw.path = kstrdup(i915->huc.fw.path, GFP_ATOMIC); - error_uc->guc_log = i915_error_object_create(i915, i915->guc.log.vma); + error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL); + error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL); + error_uc->guc_log = i915_error_object_create(i915, + uc->guc.log.vma, + compress); } /* Capture all registers which don't fit into another category. */ @@ -1756,56 +1698,53 @@ static void capture_finish(struct i915_gpu_state *error) ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); } -static int capture(void *data) -{ - struct i915_gpu_state *error = data; - - error->time = ktime_get_real(); - error->boottime = ktime_get_boottime(); - error->uptime = ktime_sub(ktime_get(), - error->i915->gt.last_init_time); - error->capture = jiffies; - - capture_params(error); - capture_gen_state(error); - capture_uc_state(error); - capture_reg_state(error); - gem_record_fences(error); - gem_record_rings(error); - capture_active_buffers(error); - capture_pinned_buffers(error); - - error->overlay = intel_overlay_capture_error_state(error->i915); - error->display = intel_display_capture_error_state(error->i915); - - error->epoch = capture_find_epoch(error); - - capture_finish(error); - return 0; -} - #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x)) struct i915_gpu_state * i915_capture_gpu_state(struct drm_i915_private *i915) { struct i915_gpu_state *error; + struct compress compress; /* Check if GPU capture has been disabled */ error = READ_ONCE(i915->gpu_error.first_error); if (IS_ERR(error)) return error; - error = kzalloc(sizeof(*error), GFP_ATOMIC); + error = kzalloc(sizeof(*error), ALLOW_FAIL); if (!error) { i915_disable_error_state(i915, -ENOMEM); return ERR_PTR(-ENOMEM); } + if (!compress_init(&compress)) { + kfree(error); + i915_disable_error_state(i915, -ENOMEM); + return ERR_PTR(-ENOMEM); + } + kref_init(&error->ref); error->i915 = i915; - stop_machine(capture, error, NULL); + error->time = ktime_get_real(); + error->boottime = ktime_get_boottime(); + error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time); + error->capture = jiffies; + + capture_params(error); + capture_gen_state(error); + capture_uc_state(error, &compress); + capture_reg_state(error); + gem_record_fences(error); + gem_record_rings(error, &compress); + + error->overlay = intel_overlay_capture_error_state(i915); + error->display = intel_display_capture_error_state(i915); + + error->epoch = capture_find_epoch(error); + + capture_finish(error); + compress_fini(&compress); return error; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 2ecd0c6a1c94..a24c35107d16 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -7,6 +7,7 @@ #ifndef _I915_GPU_ERROR_H_ #define _I915_GPU_ERROR_H_ +#include <linux/atomic.h> #include <linux/kref.h> #include <linux/ktime.h> #include <linux/sched.h> @@ -14,9 +15,9 @@ #include <drm/drm_mm.h> #include "gt/intel_engine.h" +#include "gt/uc/intel_uc_fw.h" #include "intel_device_info.h" -#include "intel_uc_fw.h" #include "i915_gem.h" #include "i915_gem_gtt.h" @@ -84,7 +85,6 @@ struct i915_gpu_state { /* Software tracked state */ bool idle; unsigned long hangcheck_timestamp; - struct i915_address_space *vm; int num_requests; u32 reset_count; @@ -160,32 +160,10 @@ struct i915_gpu_state { } vm_info; } engine[I915_NUM_ENGINES]; - struct drm_i915_error_buffer { - u32 size; - u32 name; - u64 gtt_offset; - u32 read_domains; - u32 write_domain; - s32 fence_reg:I915_MAX_NUM_FENCE_BITS; - u32 tiling:2; - u32 dirty:1; - u32 purgeable:1; - u32 userptr:1; - u32 cache_level:3; - } *active_bo[I915_NUM_ENGINES], *pinned_bo; - u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; - struct i915_address_space *active_vm[I915_NUM_ENGINES]; - struct scatterlist *sgl, *fit; }; struct i915_gpu_error { - /* For hangcheck timer */ -#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ -#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) - - struct delayed_work hangcheck_work; - /* For reset and error_state handling. */ spinlock_t lock; /* Protected by the above dev->gpu_error.lock. */ @@ -193,52 +171,11 @@ struct i915_gpu_error { atomic_t pending_fb_pin; - /** - * flags: Control various stages of the GPU reset - * - * #I915_RESET_BACKOFF - When we start a global reset, we need to - * serialise with any other users attempting to do the same, and - * any global resources that may be clobber by the reset (such as - * FENCE registers). - * - * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to - * acquire the struct_mutex to reset an engine, we need an explicit - * flag to prevent two concurrent reset attempts in the same engine. - * As the number of engines continues to grow, allocate the flags from - * the most significant bits. - * - * #I915_WEDGED - If reset fails and we can no longer use the GPU, - * we set the #I915_WEDGED bit. Prior to command submission, e.g. - * i915_request_alloc(), this bit is checked and the sequence - * aborted (with -EIO reported to userspace) if set. - */ - unsigned long flags; -#define I915_RESET_BACKOFF 0 -#define I915_RESET_MODESET 1 -#define I915_RESET_ENGINE 2 -#define I915_WEDGED (BITS_PER_LONG - 1) - /** Number of times the device has been reset (global) */ - u32 reset_count; + atomic_t reset_count; /** Number of times an engine has been reset */ - u32 reset_engine_count[I915_NUM_ENGINES]; - - struct mutex wedge_mutex; /* serialises wedging/unwedging */ - - /** - * Waitqueue to signal when a hang is detected. Used to for waiters - * to release the struct_mutex for the reset to procede. - */ - wait_queue_head_t wait_queue; - - /** - * Waitqueue to signal when the reset has completed. Used by clients - * that wait for dev_priv->mm.wedged to settle. - */ - wait_queue_head_t reset_queue; - - struct srcu_struct reset_backoff_srcu; + atomic_t reset_engine_count[I915_NUM_ENGINES]; }; struct drm_i915_error_state_buf { diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b2e27b5b0df9..a17d4fd17962 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -42,6 +42,8 @@ #include "display/intel_lpe_audio.h" #include "display/intel_psr.h" +#include "gt/intel_gt.h" + #include "i915_drv.h" #include "i915_irq.h" #include "i915_trace.h" @@ -264,7 +266,7 @@ static void gen2_irq_init(struct intel_uncore *uncore, gen2_irq_init((uncore), imr_val, ier_val) static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); -static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); +static void guc_irq_handler(struct intel_guc *guc, u16 guc_iir); /* For display hotplug interrupt */ static inline void @@ -305,17 +307,17 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, } static u32 -gen11_gt_engine_identity(struct drm_i915_private * const i915, +gen11_gt_engine_identity(struct intel_gt *gt, const unsigned int bank, const unsigned int bit); -static bool gen11_reset_one_iir(struct drm_i915_private * const i915, +static bool gen11_reset_one_iir(struct intel_gt *gt, const unsigned int bank, const unsigned int bit) { - void __iomem * const regs = i915->uncore.regs; + void __iomem * const regs = gt->uncore->regs; u32 dw; - lockdep_assert_held(&i915->irq_lock); + lockdep_assert_held(>->i915->irq_lock); dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); if (dw & BIT(bit)) { @@ -323,7 +325,7 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915, * According to the BSpec, DW_IIR bits cannot be cleared without * first servicing the Selector & Shared IIR registers. */ - gen11_gt_engine_identity(i915, bank, bit); + gen11_gt_engine_identity(gt, bank, bit); /* * We locked GT INT DW by reading it. If we want to (try @@ -409,50 +411,54 @@ static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; } -static void write_pm_imr(struct drm_i915_private *dev_priv) +static void write_pm_imr(struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + u32 mask = gt->pm_imr; i915_reg_t reg; - u32 mask = dev_priv->pm_imr; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(i915) >= 11) { reg = GEN11_GPM_WGBOXPERF_INTR_MASK; /* pm is in upper half */ mask = mask << 16; - } else if (INTEL_GEN(dev_priv) >= 8) { + } else if (INTEL_GEN(i915) >= 8) { reg = GEN8_GT_IMR(2); } else { reg = GEN6_PMIMR; } - I915_WRITE(reg, mask); - POSTING_READ(reg); + intel_uncore_write(uncore, reg, mask); + intel_uncore_posting_read(uncore, reg); } -static void write_pm_ier(struct drm_i915_private *dev_priv) +static void write_pm_ier(struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + u32 mask = gt->pm_ier; i915_reg_t reg; - u32 mask = dev_priv->pm_ier; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(i915) >= 11) { reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE; /* pm is in upper half */ mask = mask << 16; - } else if (INTEL_GEN(dev_priv) >= 8) { + } else if (INTEL_GEN(i915) >= 8) { reg = GEN8_GT_IER(2); } else { reg = GEN6_PMIER; } - I915_WRITE(reg, mask); + intel_uncore_write(uncore, reg, mask); } /** * snb_update_pm_irq - update GEN6_PMIMR - * @dev_priv: driver private + * @gt: gt for the interrupts * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -static void snb_update_pm_irq(struct drm_i915_private *dev_priv, +static void snb_update_pm_irq(struct intel_gt *gt, u32 interrupt_mask, u32 enabled_irq_mask) { @@ -460,37 +466,37 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, WARN_ON(enabled_irq_mask & ~interrupt_mask); - lockdep_assert_held(&dev_priv->irq_lock); + lockdep_assert_held(>->i915->irq_lock); - new_val = dev_priv->pm_imr; + new_val = gt->pm_imr; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); - if (new_val != dev_priv->pm_imr) { - dev_priv->pm_imr = new_val; - write_pm_imr(dev_priv); + if (new_val != gt->pm_imr) { + gt->pm_imr = new_val; + write_pm_imr(gt); } } -void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) +void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask) { - if (WARN_ON(!intel_irqs_enabled(dev_priv))) + if (WARN_ON(!intel_irqs_enabled(gt->i915))) return; - snb_update_pm_irq(dev_priv, mask, mask); + snb_update_pm_irq(gt, mask, mask); } -static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) +static void __gen6_mask_pm_irq(struct intel_gt *gt, u32 mask) { - snb_update_pm_irq(dev_priv, mask, 0); + snb_update_pm_irq(gt, mask, 0); } -void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) +void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask) { - if (WARN_ON(!intel_irqs_enabled(dev_priv))) + if (WARN_ON(!intel_irqs_enabled(gt->i915))) return; - __gen6_mask_pm_irq(dev_priv, mask); + __gen6_mask_pm_irq(gt, mask); } static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) @@ -504,23 +510,23 @@ static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) POSTING_READ(reg); } -static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) +static void gen6_enable_pm_irq(struct intel_gt *gt, u32 enable_mask) { - lockdep_assert_held(&dev_priv->irq_lock); + lockdep_assert_held(>->i915->irq_lock); - dev_priv->pm_ier |= enable_mask; - write_pm_ier(dev_priv); - gen6_unmask_pm_irq(dev_priv, enable_mask); + gt->pm_ier |= enable_mask; + write_pm_ier(gt); + gen6_unmask_pm_irq(gt, enable_mask); /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ } -static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) +static void gen6_disable_pm_irq(struct intel_gt *gt, u32 disable_mask) { - lockdep_assert_held(&dev_priv->irq_lock); + lockdep_assert_held(>->i915->irq_lock); - dev_priv->pm_ier &= ~disable_mask; - __gen6_mask_pm_irq(dev_priv, disable_mask); - write_pm_ier(dev_priv); + gt->pm_ier &= ~disable_mask; + __gen6_mask_pm_irq(gt, disable_mask); + write_pm_ier(gt); /* though a barrier is missing here, but don't really need a one */ } @@ -528,7 +534,7 @@ void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) { spin_lock_irq(&dev_priv->irq_lock); - while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)) + while (gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM)) ; dev_priv->gt_pm.rps.pm_iir = 0; @@ -546,6 +552,7 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) { + struct intel_gt *gt = &dev_priv->gt; struct intel_rps *rps = &dev_priv->gt_pm.rps; if (READ_ONCE(rps->interrupts_enabled)) @@ -555,12 +562,12 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) WARN_ON_ONCE(rps->pm_iir); if (INTEL_GEN(dev_priv) >= 11) - WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)); + WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GTPM)); else WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); rps->interrupts_enabled = true; - gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); + gen6_enable_pm_irq(gt, dev_priv->pm_rps_events); spin_unlock_irq(&dev_priv->irq_lock); } @@ -577,10 +584,10 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); - gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); + gen6_disable_pm_irq(&dev_priv->gt, GEN6_PM_RPS_EVENTS); spin_unlock_irq(&dev_priv->irq_lock); - synchronize_irq(dev_priv->drm.irq); + intel_synchronize_irq(dev_priv); /* Now that we will not be generating any more work, flush any * outstanding tasks. As we are called on the RPS idle path, @@ -594,78 +601,94 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) gen6_reset_rps_interrupts(dev_priv); } -void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) +void gen9_reset_guc_interrupts(struct intel_guc *guc) { - assert_rpm_wakelock_held(&dev_priv->runtime_pm); + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = gt->i915; - spin_lock_irq(&dev_priv->irq_lock); - gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); - spin_unlock_irq(&dev_priv->irq_lock); + assert_rpm_wakelock_held(&i915->runtime_pm); + + spin_lock_irq(&i915->irq_lock); + gen6_reset_pm_iir(i915, gt->pm_guc_events); + spin_unlock_irq(&i915->irq_lock); } -void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) +void gen9_enable_guc_interrupts(struct intel_guc *guc) { - assert_rpm_wakelock_held(&dev_priv->runtime_pm); + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = gt->i915; - spin_lock_irq(&dev_priv->irq_lock); - if (!dev_priv->guc.interrupts.enabled) { - WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & - dev_priv->pm_guc_events); - dev_priv->guc.interrupts.enabled = true; - gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); + assert_rpm_wakelock_held(&i915->runtime_pm); + + spin_lock_irq(&i915->irq_lock); + if (!guc->interrupts.enabled) { + WARN_ON_ONCE(intel_uncore_read(gt->uncore, gen6_pm_iir(i915)) & + gt->pm_guc_events); + guc->interrupts.enabled = true; + gen6_enable_pm_irq(gt, gt->pm_guc_events); } - spin_unlock_irq(&dev_priv->irq_lock); + spin_unlock_irq(&i915->irq_lock); } -void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) +void gen9_disable_guc_interrupts(struct intel_guc *guc) { - assert_rpm_wakelock_held(&dev_priv->runtime_pm); + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = gt->i915; - spin_lock_irq(&dev_priv->irq_lock); - dev_priv->guc.interrupts.enabled = false; + assert_rpm_wakelock_held(&i915->runtime_pm); - gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); + spin_lock_irq(&i915->irq_lock); + guc->interrupts.enabled = false; - spin_unlock_irq(&dev_priv->irq_lock); - synchronize_irq(dev_priv->drm.irq); + gen6_disable_pm_irq(gt, gt->pm_guc_events); - gen9_reset_guc_interrupts(dev_priv); + spin_unlock_irq(&i915->irq_lock); + intel_synchronize_irq(i915); + + gen9_reset_guc_interrupts(guc); } -void gen11_reset_guc_interrupts(struct drm_i915_private *i915) +void gen11_reset_guc_interrupts(struct intel_guc *guc) { + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = gt->i915; + spin_lock_irq(&i915->irq_lock); - gen11_reset_one_iir(i915, 0, GEN11_GUC); + gen11_reset_one_iir(gt, 0, GEN11_GUC); spin_unlock_irq(&i915->irq_lock); } -void gen11_enable_guc_interrupts(struct drm_i915_private *dev_priv) +void gen11_enable_guc_interrupts(struct intel_guc *guc) { - spin_lock_irq(&dev_priv->irq_lock); - if (!dev_priv->guc.interrupts.enabled) { - u32 events = REG_FIELD_PREP(ENGINE1_MASK, - GEN11_GUC_INTR_GUC2HOST); - - WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GUC)); - I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events); - I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events); - dev_priv->guc.interrupts.enabled = true; + struct intel_gt *gt = guc_to_gt(guc); + + spin_lock_irq(>->i915->irq_lock); + if (!guc->interrupts.enabled) { + u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); + + WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GUC)); + intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events); + intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events); + guc->interrupts.enabled = true; } - spin_unlock_irq(&dev_priv->irq_lock); + spin_unlock_irq(>->i915->irq_lock); } -void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv) +void gen11_disable_guc_interrupts(struct intel_guc *guc) { - spin_lock_irq(&dev_priv->irq_lock); - dev_priv->guc.interrupts.enabled = false; + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = gt->i915; - I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0); - I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0); + spin_lock_irq(&i915->irq_lock); + guc->interrupts.enabled = false; - spin_unlock_irq(&dev_priv->irq_lock); - synchronize_irq(dev_priv->drm.irq); + intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0); + intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0); - gen11_reset_guc_interrupts(dev_priv); + spin_unlock_irq(&i915->irq_lock); + intel_synchronize_irq(i915); + + gen11_reset_guc_interrupts(guc); } /** @@ -924,11 +947,12 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) /* Called from drm generic code, passed a 'crtc', which * we use as a pipe index */ -static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) +u32 i915_get_vblank_counter(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; const struct drm_display_mode *mode = &vblank->hwmode; + enum pipe pipe = to_intel_crtc(crtc)->pipe; i915_reg_t high_frame, low_frame; u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; unsigned long irqflags; @@ -989,9 +1013,10 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; } -static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) +u32 g4x_get_vblank_counter(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); } @@ -1107,10 +1132,10 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) return (position + crtc->scanline_offset) % vtotal; } -static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, - bool in_vblank_irq, int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode) +bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, + bool in_vblank_irq, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, @@ -1424,7 +1449,7 @@ out: /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ spin_lock_irq(&dev_priv->irq_lock); if (rps->interrupts_enabled) - gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); + gen6_unmask_pm_irq(&dev_priv->gt, dev_priv->pm_rps_events); spin_unlock_irq(&dev_priv->irq_lock); } @@ -1637,7 +1662,7 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915, if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { gen6_rps_irq_handler(i915, gt_iir[2]); - gen9_guc_irq_handler(i915, gt_iir[2]); + guc_irq_handler(&i915->gt.uc.guc, gt_iir[2] >> 16); } } @@ -1891,8 +1916,9 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, /* The RPS events need forcewake, so we add them to a work queue and mask their * IMR bits until the work is done. Other interrupts can be processed without * the work queue. */ -static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir) +static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir) { + struct drm_i915_private *i915 = gt->i915; struct intel_rps *rps = &i915->gt_pm.rps; const u32 events = i915->pm_rps_events & pm_iir; @@ -1901,7 +1927,7 @@ static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir) if (unlikely(!events)) return; - gen6_mask_pm_irq(i915, events); + gen6_mask_pm_irq(gt, events); if (!rps->interrupts_enabled) return; @@ -1916,7 +1942,8 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) if (pm_iir & dev_priv->pm_rps_events) { spin_lock(&dev_priv->irq_lock); - gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); + gen6_mask_pm_irq(&dev_priv->gt, + pm_iir & dev_priv->pm_rps_events); if (rps->interrupts_enabled) { rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; schedule_work(&rps->work); @@ -1934,16 +1961,10 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); } -static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) +static void guc_irq_handler(struct intel_guc *guc, u16 iir) { - if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) - intel_guc_to_host_event_handler(&dev_priv->guc); -} - -static void gen11_guc_irq_handler(struct drm_i915_private *i915, u16 iir) -{ - if (iir & GEN11_GUC_INTR_GUC2HOST) - intel_guc_to_host_event_handler(&i915->guc); + if (iir & GUC_INTR_GUC2HOST) + intel_guc_to_host_event_handler(guc); } static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) @@ -2185,8 +2206,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, static irqreturn_t valleyview_irq_handler(int irq, void *arg) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = arg; irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -2271,8 +2291,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) static irqreturn_t cherryview_irq_handler(int irq, void *arg) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = arg; irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -2691,8 +2710,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, */ static irqreturn_t ironlake_irq_handler(int irq, void *arg) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = arg; u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; @@ -2826,6 +2844,14 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) return mask; } +static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) +{ + if (INTEL_GEN(dev_priv) >= 9) + return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + else + return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; +} + static irqreturn_t gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { @@ -2938,12 +2964,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) if (iir & GEN8_PIPE_FIFO_UNDERRUN) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - fault_errors = iir; - if (INTEL_GEN(dev_priv) >= 9) - fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; - else - fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; - + fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); if (fault_errors) DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", pipe_name(pipe), @@ -3002,7 +3023,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs) static irqreturn_t gen8_irq_handler(int irq, void *arg) { - struct drm_i915_private *dev_priv = to_i915(arg); + struct drm_i915_private *dev_priv = arg; void __iomem * const regs = dev_priv->uncore.regs; u32 master_ctl; u32 gt_iir[4]; @@ -3034,14 +3055,14 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } static u32 -gen11_gt_engine_identity(struct drm_i915_private * const i915, +gen11_gt_engine_identity(struct intel_gt *gt, const unsigned int bank, const unsigned int bit) { - void __iomem * const regs = i915->uncore.regs; + void __iomem * const regs = gt->uncore->regs; u32 timeout_ts; u32 ident; - lockdep_assert_held(&i915->irq_lock); + lockdep_assert_held(>->i915->irq_lock); raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); @@ -3068,27 +3089,27 @@ gen11_gt_engine_identity(struct drm_i915_private * const i915, } static void -gen11_other_irq_handler(struct drm_i915_private * const i915, - const u8 instance, const u16 iir) +gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, + const u16 iir) { if (instance == OTHER_GUC_INSTANCE) - return gen11_guc_irq_handler(i915, iir); + return guc_irq_handler(>->uc.guc, iir); if (instance == OTHER_GTPM_INSTANCE) - return gen11_rps_irq_handler(i915, iir); + return gen11_rps_irq_handler(gt, iir); WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", instance, iir); } static void -gen11_engine_irq_handler(struct drm_i915_private * const i915, - const u8 class, const u8 instance, const u16 iir) +gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, + const u8 instance, const u16 iir) { struct intel_engine_cs *engine; if (instance <= MAX_ENGINE_INSTANCE) - engine = i915->engine_class[class][instance]; + engine = gt->i915->engine_class[class][instance]; else engine = NULL; @@ -3100,8 +3121,7 @@ gen11_engine_irq_handler(struct drm_i915_private * const i915, } static void -gen11_gt_identity_handler(struct drm_i915_private * const i915, - const u32 identity) +gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity) { const u8 class = GEN11_INTR_ENGINE_CLASS(identity); const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); @@ -3111,31 +3131,30 @@ gen11_gt_identity_handler(struct drm_i915_private * const i915, return; if (class <= COPY_ENGINE_CLASS) - return gen11_engine_irq_handler(i915, class, instance, intr); + return gen11_engine_irq_handler(gt, class, instance, intr); if (class == OTHER_CLASS) - return gen11_other_irq_handler(i915, instance, intr); + return gen11_other_irq_handler(gt, instance, intr); WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", class, instance, intr); } static void -gen11_gt_bank_handler(struct drm_i915_private * const i915, - const unsigned int bank) +gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank) { - void __iomem * const regs = i915->uncore.regs; + void __iomem * const regs = gt->uncore->regs; unsigned long intr_dw; unsigned int bit; - lockdep_assert_held(&i915->irq_lock); + lockdep_assert_held(>->i915->irq_lock); intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); for_each_set_bit(bit, &intr_dw, 32) { - const u32 ident = gen11_gt_engine_identity(i915, bank, bit); + const u32 ident = gen11_gt_engine_identity(gt, bank, bit); - gen11_gt_identity_handler(i915, ident); + gen11_gt_identity_handler(gt, ident); } /* Clear must be after shared has been served for engine */ @@ -3143,25 +3162,25 @@ gen11_gt_bank_handler(struct drm_i915_private * const i915, } static void -gen11_gt_irq_handler(struct drm_i915_private * const i915, - const u32 master_ctl) +gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl) { + struct drm_i915_private *i915 = gt->i915; unsigned int bank; spin_lock(&i915->irq_lock); for (bank = 0; bank < 2; bank++) { if (master_ctl & GEN11_GT_DW_IRQ(bank)) - gen11_gt_bank_handler(i915, bank); + gen11_gt_bank_handler(gt, bank); } spin_unlock(&i915->irq_lock); } static u32 -gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl) +gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl) { - void __iomem * const regs = dev_priv->uncore.regs; + void __iomem * const regs = gt->uncore->regs; u32 iir; if (!(master_ctl & GEN11_GU_MISC_IRQ)) @@ -3175,10 +3194,10 @@ gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl) } static void -gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir) +gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir) { if (iir & GEN11_GU_MISC_GSE) - intel_opregion_asle_intr(dev_priv); + intel_opregion_asle_intr(gt->i915); } static inline u32 gen11_master_intr_disable(void __iomem * const regs) @@ -3201,8 +3220,9 @@ static inline void gen11_master_intr_enable(void __iomem * const regs) static irqreturn_t gen11_irq_handler(int irq, void *arg) { - struct drm_i915_private * const i915 = to_i915(arg); + struct drm_i915_private * const i915 = arg; void __iomem * const regs = i915->uncore.regs; + struct intel_gt *gt = &i915->gt; u32 master_ctl; u32 gu_misc_iir; @@ -3216,7 +3236,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) } /* Find, clear, then process each source of interrupt. */ - gen11_gt_irq_handler(i915, master_ctl); + gen11_gt_irq_handler(gt, master_ctl); /* IRQs are synced during runtime_suspend, we don't require a wakeref */ if (master_ctl & GEN11_DISPLAY_IRQ) { @@ -3231,11 +3251,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) enable_rpm_wakeref_asserts(&i915->runtime_pm); } - gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); + gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); gen11_master_intr_enable(regs); - gen11_gu_misc_irq_handler(i915, gu_misc_iir); + gen11_gu_misc_irq_handler(gt, gu_misc_iir); return IRQ_HANDLED; } @@ -3243,9 +3263,10 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ -static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) +int i8xx_enable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3255,19 +3276,20 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) return 0; } -static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe) +int i945gm_enable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); if (dev_priv->i945gm_vblank.enabled++ == 0) schedule_work(&dev_priv->i945gm_vblank.work); - return i8xx_enable_vblank(dev, pipe); + return i8xx_enable_vblank(crtc); } -static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) +int i965_enable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3278,9 +3300,10 @@ static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) return 0; } -static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) +int ilk_enable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; u32 bit = INTEL_GEN(dev_priv) >= 7 ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); @@ -3293,14 +3316,15 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) * PSR is active as no frames are generated. */ if (HAS_PSR(dev_priv)) - drm_vblank_restore(dev, pipe); + drm_crtc_vblank_restore(crtc); return 0; } -static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) +int bdw_enable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3311,7 +3335,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) * PSR is active as no frames are generated, so check only for PSR. */ if (HAS_PSR(dev_priv)) - drm_vblank_restore(dev, pipe); + drm_crtc_vblank_restore(crtc); return 0; } @@ -3319,9 +3343,10 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ -static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) +void i8xx_disable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3329,19 +3354,20 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe) +void i945gm_disable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); - i8xx_disable_vblank(dev, pipe); + i8xx_disable_vblank(crtc); if (--dev_priv->i945gm_vblank.enabled == 0) schedule_work(&dev_priv->i945gm_vblank.work); } -static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) +void i965_disable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3350,9 +3376,10 @@ static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) +void ilk_disable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; u32 bit = INTEL_GEN(dev_priv) >= 7 ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); @@ -3362,9 +3389,10 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) +void bdw_disable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3447,10 +3475,8 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv) * * This function needs to be called before interrupts are enabled. */ -static void ibx_irq_pre_postinstall(struct drm_device *dev) +static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - if (HAS_PCH_NOP(dev_priv)) return; @@ -3473,12 +3499,12 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) struct intel_uncore *uncore = &dev_priv->uncore; if (IS_CHERRYVIEW(dev_priv)) - I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); + intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); else - I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); + intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK); i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); i9xx_pipestat_irq_reset(dev_priv); @@ -3519,18 +3545,17 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) /* drm_dma.h hooks */ -static void ironlake_irq_reset(struct drm_device *dev) +static void ironlake_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; GEN3_IRQ_RESET(uncore, DE); if (IS_GEN(dev_priv, 7)) - I915_WRITE(GEN7_ERR_INT, 0xffffffff); + intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); if (IS_HASWELL(dev_priv)) { - I915_WRITE(EDP_PSR_IMR, 0xffffffff); - I915_WRITE(EDP_PSR_IIR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); } gen5_gt_irq_reset(dev_priv); @@ -3538,10 +3563,8 @@ static void ironlake_irq_reset(struct drm_device *dev) ibx_irq_reset(dev_priv); } -static void valleyview_irq_reset(struct drm_device *dev) +static void valleyview_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - I915_WRITE(VLV_MASTER_IER, 0); POSTING_READ(VLV_MASTER_IER); @@ -3563,9 +3586,8 @@ static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) GEN8_IRQ_RESET_NDX(uncore, GT, 3); } -static void gen8_irq_reset(struct drm_device *dev) +static void gen8_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; int pipe; @@ -3573,8 +3595,8 @@ static void gen8_irq_reset(struct drm_device *dev) gen8_gt_irq_reset(dev_priv); - I915_WRITE(EDP_PSR_IMR, 0xffffffff); - I915_WRITE(EDP_PSR_IIR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); for_each_pipe(dev_priv, pipe) if (intel_display_power_is_enabled(dev_priv, @@ -3589,39 +3611,40 @@ static void gen8_irq_reset(struct drm_device *dev) ibx_irq_reset(dev_priv); } -static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) +static void gen11_gt_irq_reset(struct intel_gt *gt) { + struct intel_uncore *uncore = gt->uncore; + /* Disable RCS, BCS, VCS and VECS class engines. */ - I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); - I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0); /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ - I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); - I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); - I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); - I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); - I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0); - I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); - I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); - I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0); - I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); } -static void gen11_irq_reset(struct drm_device *dev) +static void gen11_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_uncore *uncore = &dev_priv->uncore; int pipe; gen11_master_intr_disable(dev_priv->uncore.regs); - gen11_gt_irq_reset(dev_priv); + gen11_gt_irq_reset(&dev_priv->gt); - I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); + intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); - I915_WRITE(EDP_PSR_IMR, 0xffffffff); - I915_WRITE(EDP_PSR_IIR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); for_each_pipe(dev_priv, pipe) if (intel_display_power_is_enabled(dev_priv, @@ -3680,12 +3703,11 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, spin_unlock_irq(&dev_priv->irq_lock); /* make sure we're done processing display irqs */ - synchronize_irq(dev_priv->drm.irq); + intel_synchronize_irq(dev_priv); } -static void cherryview_irq_reset(struct drm_device *dev) +static void cherryview_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; I915_WRITE(GEN8_MASTER_IRQ, 0); @@ -3950,9 +3972,8 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) __bxt_hpd_detection_setup(dev_priv, enabled_irqs); } -static void ibx_irq_postinstall(struct drm_device *dev) +static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 mask; if (HAS_PCH_NOP(dev_priv)) @@ -3975,9 +3996,8 @@ static void ibx_irq_postinstall(struct drm_device *dev) spt_hpd_detection_setup(dev_priv); } -static void gen5_gt_irq_postinstall(struct drm_device *dev) +static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; u32 pm_irqs, gt_irqs; @@ -4006,17 +4026,16 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) */ if (HAS_ENGINE(dev_priv, VECS0)) { pm_irqs |= PM_VEBOX_USER_INTERRUPT; - dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; + dev_priv->gt.pm_ier |= PM_VEBOX_USER_INTERRUPT; } - dev_priv->pm_imr = 0xffffffff; - GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs); + dev_priv->gt.pm_imr = 0xffffffff; + GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->gt.pm_imr, pm_irqs); } } -static int ironlake_irq_postinstall(struct drm_device *dev) +static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; u32 display_mask, extra_mask; @@ -4043,16 +4062,16 @@ static int ironlake_irq_postinstall(struct drm_device *dev) dev_priv->irq_mask = ~display_mask; - ibx_irq_pre_postinstall(dev); + ibx_irq_pre_postinstall(dev_priv); GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, display_mask | extra_mask); - gen5_gt_irq_postinstall(dev); + gen5_gt_irq_postinstall(dev_priv); ilk_hpd_detection_setup(dev_priv); - ibx_irq_postinstall(dev); + ibx_irq_postinstall(dev_priv); if (IS_IRONLAKE_M(dev_priv)) { /* Enable PCU event interrupts @@ -4064,8 +4083,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev) ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); spin_unlock_irq(&dev_priv->irq_lock); } - - return 0; } void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) @@ -4097,11 +4114,9 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) } -static int valleyview_irq_postinstall(struct drm_device *dev) +static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - - gen5_gt_irq_postinstall(dev); + gen5_gt_irq_postinstall(dev_priv); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display_irqs_enabled) @@ -4110,13 +4125,12 @@ static int valleyview_irq_postinstall(struct drm_device *dev) I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); POSTING_READ(VLV_MASTER_IER); - - return 0; } -static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) +static void gen8_gt_irq_postinstall(struct drm_i915_private *i915) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &i915->gt; + struct intel_uncore *uncore = gt->uncore; /* These are interrupts we'll toggle with the ring mask register */ u32 gt_interrupts[] = { @@ -4136,15 +4150,15 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT) }; - dev_priv->pm_ier = 0x0; - dev_priv->pm_imr = ~dev_priv->pm_ier; + gt->pm_ier = 0x0; + gt->pm_imr = ~gt->pm_ier; GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]); GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]); /* * RPS interrupts will get enabled/disabled on demand when RPS itself * is enabled/disabled. Same wil be the case for GuC interrupts. */ - GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); + GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier); GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]); } @@ -4218,58 +4232,56 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) } } -static int gen8_irq_postinstall(struct drm_device *dev) +static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - if (HAS_PCH_SPLIT(dev_priv)) - ibx_irq_pre_postinstall(dev); + ibx_irq_pre_postinstall(dev_priv); gen8_gt_irq_postinstall(dev_priv); gen8_de_irq_postinstall(dev_priv); if (HAS_PCH_SPLIT(dev_priv)) - ibx_irq_postinstall(dev); + ibx_irq_postinstall(dev_priv); gen8_master_intr_enable(dev_priv->uncore.regs); - - return 0; } -static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) +static void gen11_gt_irq_postinstall(struct intel_gt *gt) { const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; + struct intel_uncore *uncore = gt->uncore; + const u32 dmask = irqs << 16 | irqs; + const u32 smask = irqs << 16; BUILD_BUG_ON(irqs & 0xffff0000); /* Enable RCS, BCS, VCS and VECS class interrupts. */ - I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); - I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); + intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask); + intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask); /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ - I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); - I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); - I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); - I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); - I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); + intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask); + intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask); + intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask); + intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask); + intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask); /* * RPS interrupts will get enabled/disabled on demand when RPS itself * is enabled/disabled. */ - dev_priv->pm_ier = 0x0; - dev_priv->pm_imr = ~dev_priv->pm_ier; - I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); - I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); + gt->pm_ier = 0x0; + gt->pm_imr = ~gt->pm_ier; + intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); /* Same thing for GuC interrupts */ - I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0); - I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); } -static void icp_irq_postinstall(struct drm_device *dev) +static void icp_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 mask = SDE_GMBUS_ICP; WARN_ON(I915_READ(SDEIER) != 0); @@ -4282,32 +4294,27 @@ static void icp_irq_postinstall(struct drm_device *dev) icp_hpd_detection_setup(dev_priv); } -static int gen11_irq_postinstall(struct drm_device *dev) +static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_uncore *uncore = &dev_priv->uncore; u32 gu_misc_masked = GEN11_GU_MISC_GSE; if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_irq_postinstall(dev); + icp_irq_postinstall(dev_priv); - gen11_gt_irq_postinstall(dev_priv); + gen11_gt_irq_postinstall(&dev_priv->gt); gen8_de_irq_postinstall(dev_priv); GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); - gen11_master_intr_enable(dev_priv->uncore.regs); + gen11_master_intr_enable(uncore->regs); POSTING_READ(GEN11_GFX_MSTR_IRQ); - - return 0; } -static int cherryview_irq_postinstall(struct drm_device *dev) +static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - gen8_gt_irq_postinstall(dev_priv); spin_lock_irq(&dev_priv->irq_lock); @@ -4317,13 +4324,10 @@ static int cherryview_irq_postinstall(struct drm_device *dev) I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); POSTING_READ(GEN8_MASTER_IRQ); - - return 0; } -static void i8xx_irq_reset(struct drm_device *dev) +static void i8xx_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; i9xx_pipestat_irq_reset(dev_priv); @@ -4331,9 +4335,8 @@ static void i8xx_irq_reset(struct drm_device *dev) GEN2_IRQ_RESET(uncore); } -static int i8xx_irq_postinstall(struct drm_device *dev) +static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; u16 enable_mask; @@ -4362,8 +4365,6 @@ static int i8xx_irq_postinstall(struct drm_device *dev) i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); - - return 0; } static void i8xx_error_irq_ack(struct drm_i915_private *i915, @@ -4444,8 +4445,7 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, static irqreturn_t i8xx_irq_handler(int irq, void *arg) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = arg; irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -4488,9 +4488,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) return ret; } -static void i915_irq_reset(struct drm_device *dev) +static void i915_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; if (I915_HAS_HOTPLUG(dev_priv)) { @@ -4503,9 +4502,8 @@ static void i915_irq_reset(struct drm_device *dev) GEN3_IRQ_RESET(uncore, GEN2_); } -static int i915_irq_postinstall(struct drm_device *dev) +static void i915_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; u32 enable_mask; @@ -4543,14 +4541,11 @@ static int i915_irq_postinstall(struct drm_device *dev) spin_unlock_irq(&dev_priv->irq_lock); i915_enable_asle_pipestat(dev_priv); - - return 0; } static irqreturn_t i915_irq_handler(int irq, void *arg) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = arg; irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -4601,9 +4596,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) return ret; } -static void i965_irq_reset(struct drm_device *dev) +static void i965_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); @@ -4614,9 +4608,8 @@ static void i965_irq_reset(struct drm_device *dev) GEN3_IRQ_RESET(uncore, GEN2_); } -static int i965_irq_postinstall(struct drm_device *dev) +static void i965_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; u32 enable_mask; u32 error_mask; @@ -4666,8 +4659,6 @@ static int i965_irq_postinstall(struct drm_device *dev) spin_unlock_irq(&dev_priv->irq_lock); i915_enable_asle_pipestat(dev_priv); - - return 0; } static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) @@ -4697,8 +4688,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) static irqreturn_t i965_irq_handler(int irq, void *arg) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = arg; irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -4775,8 +4765,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv) for (i = 0; i < MAX_L3_SLICES; ++i) dev_priv->l3_parity.remap_info[i] = NULL; - if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11) - dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; + /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ + if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11) + dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16; /* Let's track the enabled rps events */ if (IS_VALLEYVIEW(dev_priv)) @@ -4805,11 +4796,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) >= 8) rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) - dev->driver->get_vblank_counter = g4x_get_vblank_counter; - else if (INTEL_GEN(dev_priv) >= 3) - dev->driver->get_vblank_counter = i915_get_vblank_counter; - dev->vblank_disable_immediate = true; /* Most platforms treat the display irq block as an always-on @@ -4831,86 +4817,18 @@ void intel_irq_init(struct drm_i915_private *dev_priv) */ dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); - dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; - dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; - - if (IS_CHERRYVIEW(dev_priv)) { - dev->driver->irq_handler = cherryview_irq_handler; - dev->driver->irq_preinstall = cherryview_irq_reset; - dev->driver->irq_postinstall = cherryview_irq_postinstall; - dev->driver->irq_uninstall = cherryview_irq_reset; - dev->driver->enable_vblank = i965_enable_vblank; - dev->driver->disable_vblank = i965_disable_vblank; - dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; - } else if (IS_VALLEYVIEW(dev_priv)) { - dev->driver->irq_handler = valleyview_irq_handler; - dev->driver->irq_preinstall = valleyview_irq_reset; - dev->driver->irq_postinstall = valleyview_irq_postinstall; - dev->driver->irq_uninstall = valleyview_irq_reset; - dev->driver->enable_vblank = i965_enable_vblank; - dev->driver->disable_vblank = i965_disable_vblank; - dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; - } else if (INTEL_GEN(dev_priv) >= 11) { - dev->driver->irq_handler = gen11_irq_handler; - dev->driver->irq_preinstall = gen11_irq_reset; - dev->driver->irq_postinstall = gen11_irq_postinstall; - dev->driver->irq_uninstall = gen11_irq_reset; - dev->driver->enable_vblank = gen8_enable_vblank; - dev->driver->disable_vblank = gen8_disable_vblank; - dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; - } else if (INTEL_GEN(dev_priv) >= 8) { - dev->driver->irq_handler = gen8_irq_handler; - dev->driver->irq_preinstall = gen8_irq_reset; - dev->driver->irq_postinstall = gen8_irq_postinstall; - dev->driver->irq_uninstall = gen8_irq_reset; - dev->driver->enable_vblank = gen8_enable_vblank; - dev->driver->disable_vblank = gen8_disable_vblank; - if (IS_GEN9_LP(dev_priv)) + if (HAS_GMCH(dev_priv)) { + if (I915_HAS_HOTPLUG(dev_priv)) + dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; + } else { + if (INTEL_GEN(dev_priv) >= 11) + dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; + else if (IS_GEN9_LP(dev_priv)) dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; else dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; - } else if (HAS_PCH_SPLIT(dev_priv)) { - dev->driver->irq_handler = ironlake_irq_handler; - dev->driver->irq_preinstall = ironlake_irq_reset; - dev->driver->irq_postinstall = ironlake_irq_postinstall; - dev->driver->irq_uninstall = ironlake_irq_reset; - dev->driver->enable_vblank = ironlake_enable_vblank; - dev->driver->disable_vblank = ironlake_disable_vblank; - dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; - } else { - if (IS_GEN(dev_priv, 2)) { - dev->driver->irq_preinstall = i8xx_irq_reset; - dev->driver->irq_postinstall = i8xx_irq_postinstall; - dev->driver->irq_handler = i8xx_irq_handler; - dev->driver->irq_uninstall = i8xx_irq_reset; - dev->driver->enable_vblank = i8xx_enable_vblank; - dev->driver->disable_vblank = i8xx_disable_vblank; - } else if (IS_I945GM(dev_priv)) { - dev->driver->irq_preinstall = i915_irq_reset; - dev->driver->irq_postinstall = i915_irq_postinstall; - dev->driver->irq_uninstall = i915_irq_reset; - dev->driver->irq_handler = i915_irq_handler; - dev->driver->enable_vblank = i945gm_enable_vblank; - dev->driver->disable_vblank = i945gm_disable_vblank; - } else if (IS_GEN(dev_priv, 3)) { - dev->driver->irq_preinstall = i915_irq_reset; - dev->driver->irq_postinstall = i915_irq_postinstall; - dev->driver->irq_uninstall = i915_irq_reset; - dev->driver->irq_handler = i915_irq_handler; - dev->driver->enable_vblank = i8xx_enable_vblank; - dev->driver->disable_vblank = i8xx_disable_vblank; - } else { - dev->driver->irq_preinstall = i965_irq_reset; - dev->driver->irq_postinstall = i965_irq_postinstall; - dev->driver->irq_uninstall = i965_irq_reset; - dev->driver->irq_handler = i965_irq_handler; - dev->driver->enable_vblank = i965_enable_vblank; - dev->driver->disable_vblank = i965_disable_vblank; - } - if (I915_HAS_HOTPLUG(dev_priv)) - dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; } } @@ -4931,6 +4849,75 @@ void intel_irq_fini(struct drm_i915_private *i915) kfree(i915->l3_parity.remap_info[i]); } +static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) +{ + if (HAS_GMCH(dev_priv)) { + if (IS_CHERRYVIEW(dev_priv)) + return cherryview_irq_handler; + else if (IS_VALLEYVIEW(dev_priv)) + return valleyview_irq_handler; + else if (IS_GEN(dev_priv, 4)) + return i965_irq_handler; + else if (IS_GEN(dev_priv, 3)) + return i915_irq_handler; + else + return i8xx_irq_handler; + } else { + if (INTEL_GEN(dev_priv) >= 11) + return gen11_irq_handler; + else if (INTEL_GEN(dev_priv) >= 8) + return gen8_irq_handler; + else + return ironlake_irq_handler; + } +} + +static void intel_irq_reset(struct drm_i915_private *dev_priv) +{ + if (HAS_GMCH(dev_priv)) { + if (IS_CHERRYVIEW(dev_priv)) + cherryview_irq_reset(dev_priv); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_irq_reset(dev_priv); + else if (IS_GEN(dev_priv, 4)) + i965_irq_reset(dev_priv); + else if (IS_GEN(dev_priv, 3)) + i915_irq_reset(dev_priv); + else + i8xx_irq_reset(dev_priv); + } else { + if (INTEL_GEN(dev_priv) >= 11) + gen11_irq_reset(dev_priv); + else if (INTEL_GEN(dev_priv) >= 8) + gen8_irq_reset(dev_priv); + else + ironlake_irq_reset(dev_priv); + } +} + +static void intel_irq_postinstall(struct drm_i915_private *dev_priv) +{ + if (HAS_GMCH(dev_priv)) { + if (IS_CHERRYVIEW(dev_priv)) + cherryview_irq_postinstall(dev_priv); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_irq_postinstall(dev_priv); + else if (IS_GEN(dev_priv, 4)) + i965_irq_postinstall(dev_priv); + else if (IS_GEN(dev_priv, 3)) + i915_irq_postinstall(dev_priv); + else + i8xx_irq_postinstall(dev_priv); + } else { + if (INTEL_GEN(dev_priv) >= 11) + gen11_irq_postinstall(dev_priv); + else if (INTEL_GEN(dev_priv) >= 8) + gen8_irq_postinstall(dev_priv); + else + ironlake_irq_postinstall(dev_priv); + } +} + /** * intel_irq_install - enables the hardware interrupt * @dev_priv: i915 device instance @@ -4944,6 +4931,9 @@ void intel_irq_fini(struct drm_i915_private *i915) */ int intel_irq_install(struct drm_i915_private *dev_priv) { + int irq = dev_priv->drm.pdev->irq; + int ret; + /* * We enable some interrupt sources in our postinstall hooks, so mark * interrupts as enabled _before_ actually enabling them to avoid @@ -4951,7 +4941,20 @@ int intel_irq_install(struct drm_i915_private *dev_priv) */ dev_priv->runtime_pm.irqs_enabled = true; - return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); + dev_priv->drm.irq_enabled = true; + + intel_irq_reset(dev_priv); + + ret = request_irq(irq, intel_irq_handler(dev_priv), + IRQF_SHARED, DRIVER_NAME, dev_priv); + if (ret < 0) { + dev_priv->drm.irq_enabled = false; + return ret; + } + + intel_irq_postinstall(dev_priv); + + return ret; } /** @@ -4963,7 +4966,23 @@ int intel_irq_install(struct drm_i915_private *dev_priv) */ void intel_irq_uninstall(struct drm_i915_private *dev_priv) { - drm_irq_uninstall(&dev_priv->drm); + int irq = dev_priv->drm.pdev->irq; + + /* + * FIXME we can get called twice during driver load + * error handling due to intel_modeset_cleanup() + * calling us out of sequence. Would be nice if + * it didn't do that... + */ + if (!dev_priv->drm.irq_enabled) + return; + + dev_priv->drm.irq_enabled = false; + + intel_irq_reset(dev_priv); + + free_irq(irq, dev_priv); + intel_hpd_cancel_work(dev_priv); dev_priv->runtime_pm.irqs_enabled = false; } @@ -4977,9 +4996,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv) */ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) { - dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); + intel_irq_reset(dev_priv); dev_priv->runtime_pm.irqs_enabled = false; - synchronize_irq(dev_priv->drm.irq); + intel_synchronize_irq(dev_priv); } /** @@ -4992,6 +5011,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) { dev_priv->runtime_pm.irqs_enabled = true; - dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); - dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); + intel_irq_reset(dev_priv); + intel_irq_postinstall(dev_priv); } diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index cb25dd213308..8918809cd805 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -12,9 +12,10 @@ struct drm_i915_private; struct intel_crtc; +struct intel_guc; -extern void intel_irq_init(struct drm_i915_private *dev_priv); -extern void intel_irq_fini(struct drm_i915_private *dev_priv); +void intel_irq_init(struct drm_i915_private *dev_priv); +void intel_irq_fini(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); @@ -77,8 +78,8 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits) void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask); void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask); -void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask); -void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask); +void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask); +void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask); void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv); void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv); void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv); @@ -102,16 +103,40 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) return dev_priv->runtime_pm.irqs_enabled; } +static inline void intel_synchronize_irq(struct drm_i915_private *i915) +{ + synchronize_irq(i915->drm.pdev->irq); +} + int intel_get_crtc_scanline(struct intel_crtc *crtc); void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask); void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, u8 pipe_mask); -void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv); -void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv); -void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv); -void gen11_reset_guc_interrupts(struct drm_i915_private *i915); -void gen11_enable_guc_interrupts(struct drm_i915_private *i915); -void gen11_disable_guc_interrupts(struct drm_i915_private *i915); +void gen9_reset_guc_interrupts(struct intel_guc *guc); +void gen9_enable_guc_interrupts(struct intel_guc *guc); +void gen9_disable_guc_interrupts(struct intel_guc *guc); +void gen11_reset_guc_interrupts(struct intel_guc *guc); +void gen11_enable_guc_interrupts(struct intel_guc *guc); +void gen11_disable_guc_interrupts(struct intel_guc *guc); + +bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, + bool in_vblank_irq, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode); + +u32 i915_get_vblank_counter(struct drm_crtc *crtc); +u32 g4x_get_vblank_counter(struct drm_crtc *crtc); + +int i8xx_enable_vblank(struct drm_crtc *crtc); +int i945gm_enable_vblank(struct drm_crtc *crtc); +int i965_enable_vblank(struct drm_crtc *crtc); +int ilk_enable_vblank(struct drm_crtc *crtc); +int bdw_enable_vblank(struct drm_crtc *crtc); +void i8xx_disable_vblank(struct drm_crtc *crtc); +void i945gm_disable_vblank(struct drm_crtc *crtc); +void i965_disable_vblank(struct drm_crtc *crtc); +void ilk_disable_vblank(struct drm_crtc *crtc); +void bdw_disable_vblank(struct drm_crtc *crtc); #endif /* __I915_IRQ_H__ */ diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 5b07766a1c26..296452f9efe4 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -169,8 +169,9 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400, "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); #endif -i915_param_named(enable_dpcd_backlight, bool, 0600, - "Enable support for DPCD backlight control (default:false)"); +i915_param_named(enable_dpcd_backlight, int, 0600, + "Enable support for DPCD backlight control" + "(-1=use per-VBT LFP backlight type setting, 0=disabled [default], 1=enabled)"); #if IS_ENABLED(CONFIG_DRM_I915_GVT) i915_param_named(enable_gvt, bool, 0400, diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index a4770ce46bd2..d29ade3b7de6 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -64,6 +64,7 @@ struct drm_printer; param(int, reset, 2) \ param(unsigned int, inject_load_failure, 0) \ param(int, fastboot, -1) \ + param(int, enable_dpcd_backlight, 0) \ param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \ /* leave bools at the end to not create holes */ \ param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \ @@ -76,7 +77,6 @@ struct drm_printer; param(bool, verbose_state_checks, true) \ param(bool, nuclear_pageflip, false) \ param(bool, enable_dp_mst, true) \ - param(bool, enable_dpcd_backlight, false) \ param(bool, enable_gvt, false) #define MEMBER(T, member, ...) T member; diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 6c9f46fc3e12..bd9211b3d76e 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -595,7 +595,7 @@ static const struct intel_device_info intel_cherryview_info = { GEN9_DEFAULT_PAGE_SIZES, \ .has_logical_ring_preemption = 1, \ .display.has_csr = 1, \ - .has_guc = 1, \ + .has_gt_uc = 1, \ .display.has_ipc = 1, \ .ddb_size = 896 @@ -647,7 +647,7 @@ static const struct intel_device_info intel_skylake_gt4_info = { .display.has_dp_mst = 1, \ .has_logical_ring_contexts = 1, \ .has_logical_ring_preemption = 1, \ - .has_guc = 1, \ + .has_gt_uc = 1, \ .ppgtt_type = INTEL_PPGTT_FULL, \ .ppgtt_size = 48, \ .has_reset_engine = 1, \ @@ -761,10 +761,40 @@ static const struct intel_device_info intel_elkhartlake_info = { GEN11_FEATURES, PLATFORM(INTEL_ELKHARTLAKE), .require_force_probe = 1, - .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0), + .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), .ppgtt_size = 36, }; +#define GEN12_FEATURES \ + GEN11_FEATURES, \ + GEN(12), \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + [TRANSCODER_C] = PIPE_C_OFFSET, \ + [TRANSCODER_D] = PIPE_D_OFFSET, \ + [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ + [TRANSCODER_D] = TRANSCODER_D_OFFSET, \ + [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ + } + +static const struct intel_device_info intel_tigerlake_12_info = { + GEN12_FEATURES, + PLATFORM(INTEL_TIGERLAKE), + .num_pipes = 4, + .require_force_probe = 1, + .display.has_modular_fia = 1, + .engine_mask = + BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), +}; + #undef GEN #undef PLATFORM @@ -836,6 +866,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_CNL_IDS(&intel_cannonlake_info), INTEL_ICL_11_IDS(&intel_icelake_11_info), INTEL_EHL_IDS(&intel_elkhartlake_info), + INTEL_TGL_12_IDS(&intel_tigerlake_12_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); @@ -848,7 +879,7 @@ static void i915_pci_remove(struct pci_dev *pdev) if (!dev) /* driver load aborted, nothing to cleanup */ return; - i915_driver_unload(dev); + i915_driver_remove(dev); drm_dev_put(dev); pci_set_drvdata(pdev, NULL); @@ -923,11 +954,11 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; - err = i915_driver_load(pdev, ent); + err = i915_driver_probe(pdev, ent); if (err) return err; - if (i915_inject_load_failure()) { + if (i915_inject_probe_failure()) { i915_pci_remove(pdev); return -ENODEV; } diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index a700c5c3d167..988a4092164e 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -200,20 +200,20 @@ #include "gt/intel_lrc_reg.h" #include "i915_drv.h" -#include "i915_oa_hsw.h" -#include "i915_oa_bdw.h" -#include "i915_oa_chv.h" -#include "i915_oa_sklgt2.h" -#include "i915_oa_sklgt3.h" -#include "i915_oa_sklgt4.h" -#include "i915_oa_bxt.h" -#include "i915_oa_kblgt2.h" -#include "i915_oa_kblgt3.h" -#include "i915_oa_glk.h" -#include "i915_oa_cflgt2.h" -#include "i915_oa_cflgt3.h" -#include "i915_oa_cnl.h" -#include "i915_oa_icl.h" +#include "oa/i915_oa_hsw.h" +#include "oa/i915_oa_bdw.h" +#include "oa/i915_oa_chv.h" +#include "oa/i915_oa_sklgt2.h" +#include "oa/i915_oa_sklgt3.h" +#include "oa/i915_oa_sklgt4.h" +#include "oa/i915_oa_bxt.h" +#include "oa/i915_oa_kblgt2.h" +#include "oa/i915_oa_kblgt3.h" +#include "oa/i915_oa_glk.h" +#include "oa/i915_oa_cflgt2.h" +#include "oa/i915_oa_cflgt3.h" +#include "oa/i915_oa_cnl.h" +#include "oa/i915_oa_icl.h" /* HW requires this to be a power of two, between 128k and 16M, though driver * is currently generally designed assuming the largest 16M size is used such @@ -1567,28 +1567,10 @@ static void config_oa_regs(struct drm_i915_private *dev_priv, } } -static int hsw_enable_metric_set(struct i915_perf_stream *stream) +static void delay_after_mux(void) { - struct drm_i915_private *dev_priv = stream->dev_priv; - const struct i915_oa_config *oa_config = stream->oa_config; - - /* PRM: - * - * OA unit is using “crclk†for its functionality. When trunk - * level clock gating takes place, OA clock would be gated, - * unable to count the events from non-render clock domain. - * Render clock gating must be disabled when OA is enabled to - * count the events from non-render domain. Unit level clock - * gating for RCS should also be disabled. - */ - I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & - ~GEN7_DOP_CLOCK_GATE_ENABLE)); - I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | - GEN6_CSUNIT_CLOCK_GATE_DISABLE)); - - config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); - - /* It apparently takes a fairly long time for a new MUX + /* + * It apparently takes a fairly long time for a new MUX * configuration to be be applied after these register writes. * This delay duration was derived empirically based on the * render_basic config but hopefully it covers the maximum @@ -1610,6 +1592,30 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream) * a delay at this location would mitigate any invalid reports. */ usleep_range(15000, 20000); +} + +static int hsw_enable_metric_set(struct i915_perf_stream *stream) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + const struct i915_oa_config *oa_config = stream->oa_config; + + /* + * PRM: + * + * OA unit is using “crclk†for its functionality. When trunk + * level clock gating takes place, OA clock would be gated, + * unable to count the events from non-render clock domain. + * Render clock gating must be disabled when OA is enabled to + * count the events from non-render domain. Unit level clock + * gating for RCS should also be disabled. + */ + I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & + ~GEN7_DOP_CLOCK_GATE_ENABLE)); + I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | + GEN6_CSUNIT_CLOCK_GATE_DISABLE)); + + config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); + delay_after_mux(); config_oa_regs(dev_priv, oa_config->b_counter_regs, oa_config->b_counter_regs_len); @@ -1628,6 +1634,27 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv) ~GT_NOA_ENABLE)); } +static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config, + i915_reg_t reg) +{ + u32 mmio = i915_mmio_reg_offset(reg); + int i; + + /* + * This arbitrary default will select the 'EU FPU0 Pipeline + * Active' event. In the future it's anticipated that there + * will be an explicit 'No Event' we can select, but not yet... + */ + if (!oa_config) + return 0; + + for (i = 0; i < oa_config->flex_regs_len; i++) { + if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio) + return oa_config->flex_regs[i].value; + } + + return 0; +} /* * NB: It must always remain pointer safe to run this even if the OA unit * has been disabled. @@ -1661,33 +1688,138 @@ gen8_update_reg_state_unlocked(struct intel_context *ce, GEN8_OA_COUNTER_RESUME); for (i = 0; i < ARRAY_SIZE(flex_regs); i++) { - u32 state_offset = ctx_flexeu0 + i * 2; - u32 mmio = i915_mmio_reg_offset(flex_regs[i]); + CTX_REG(reg_state, ctx_flexeu0 + i * 2, flex_regs[i], + oa_config_flex_reg(oa_config, flex_regs[i])); + } - /* - * This arbitrary default will select the 'EU FPU0 Pipeline - * Active' event. In the future it's anticipated that there - * will be an explicit 'No Event' we can select, but not yet... - */ - u32 value = 0; + CTX_REG(reg_state, + CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, + intel_sseu_make_rpcs(i915, &ce->sseu)); +} + +struct flex { + i915_reg_t reg; + u32 offset; + u32 value; +}; - if (oa_config) { - u32 j; +static int +gen8_store_flex(struct i915_request *rq, + struct intel_context *ce, + const struct flex *flex, unsigned int count) +{ + u32 offset; + u32 *cs; - for (j = 0; j < oa_config->flex_regs_len; j++) { - if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) { - value = oa_config->flex_regs[j].value; - break; - } - } - } + cs = intel_ring_begin(rq, 4 * count); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; + do { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = offset + (flex->offset + 1) * sizeof(u32); + *cs++ = 0; + *cs++ = flex->value; + } while (flex++, --count); + + intel_ring_advance(rq, cs); + + return 0; +} + +static int +gen8_load_flex(struct i915_request *rq, + struct intel_context *ce, + const struct flex *flex, unsigned int count) +{ + u32 *cs; + + GEM_BUG_ON(!count || count > 63); + + cs = intel_ring_begin(rq, 2 * count + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(count); + do { + *cs++ = i915_mmio_reg_offset(flex->reg); + *cs++ = flex->value; + } while (flex++, --count); + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + + return 0; +} - CTX_REG(reg_state, state_offset, flex_regs[i], value); +static int gen8_modify_context(struct intel_context *ce, + const struct flex *flex, unsigned int count) +{ + struct i915_request *rq; + int err; + + lockdep_assert_held(&ce->pin_mutex); + + rq = i915_request_create(ce->engine->kernel_context); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + /* Serialise with the remote context */ + err = intel_context_prepare_remote_request(ce, rq); + if (err == 0) + err = gen8_store_flex(rq, ce, flex, count); + + i915_request_add(rq); + return err; +} + +static int gen8_modify_self(struct intel_context *ce, + const struct flex *flex, unsigned int count) +{ + struct i915_request *rq; + int err; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + err = gen8_load_flex(rq, ce, flex, count); + + i915_request_add(rq); + return err; +} + +static int gen8_configure_context(struct i915_gem_context *ctx, + struct flex *flex, unsigned int count) +{ + struct i915_gem_engines_iter it; + struct intel_context *ce; + int err = 0; + + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + GEM_BUG_ON(ce == ce->engine->kernel_context); + + if (ce->engine->class != RENDER_CLASS) + continue; + + err = intel_context_lock_pinned(ce); + if (err) + break; + + flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu); + + /* Otherwise OA settings will be set upon first use */ + if (intel_context_is_pinned(ce)) + err = gen8_modify_context(ce, flex, count); + + intel_context_unlock_pinned(ce); + if (err) + break; } + i915_gem_context_unlock_engines(ctx); - CTX_REG(reg_state, - CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, - intel_sseu_make_rpcs(i915, &ce->sseu)); + return err; } /* @@ -1714,15 +1846,42 @@ gen8_update_reg_state_unlocked(struct intel_context *ce, * * Note: it's only the RCS/Render context that has any OA state. */ -static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, +static int gen8_configure_all_contexts(struct drm_i915_private *i915, const struct i915_oa_config *oa_config) { - unsigned int map_type = i915_coherent_map_type(dev_priv); + /* The MMIO offsets for Flex EU registers aren't contiguous */ + const u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset; +#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N)) + struct flex regs[] = { + { + GEN8_R_PWR_CLK_STATE, + CTX_R_PWR_CLK_STATE, + }, + { + GEN8_OACTXCONTROL, + i915->perf.oa.ctx_oactxctrl_offset, + ((i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | + (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) | + GEN8_OA_COUNTER_RESUME) + }, + { EU_PERF_CNTL0, ctx_flexeuN(0) }, + { EU_PERF_CNTL1, ctx_flexeuN(1) }, + { EU_PERF_CNTL2, ctx_flexeuN(2) }, + { EU_PERF_CNTL3, ctx_flexeuN(3) }, + { EU_PERF_CNTL4, ctx_flexeuN(4) }, + { EU_PERF_CNTL5, ctx_flexeuN(5) }, + { EU_PERF_CNTL6, ctx_flexeuN(6) }, + }; +#undef ctx_flexeuN + struct intel_engine_cs *engine; struct i915_gem_context *ctx; - struct i915_request *rq; - int ret; + enum intel_engine_id id; + int i; + + for (i = 2; i < ARRAY_SIZE(regs); i++) + regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); - lockdep_assert_held(&dev_priv->drm.struct_mutex); + lockdep_assert_held(&i915->drm.struct_mutex); /* * The OA register config is setup through the context image. This image @@ -1734,58 +1893,41 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, * this might leave small interval of time where the OA unit is * configured at an invalid sampling period. * - * So far the best way to work around this issue seems to be draining - * the GPU from any submitted work. + * Note that since we emit all requests from a single ring, there + * is still an implicit global barrier here that may cause a high + * priority context to wait for an otherwise independent low priority + * context. Contexts idle at the time of reconfiguration are not + * trapped behind the barrier. */ - ret = i915_gem_wait_for_idle(dev_priv, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (ret) - return ret; - - /* Update all contexts now that we've stalled the submission. */ - list_for_each_entry(ctx, &dev_priv->contexts.list, link) { - struct i915_gem_engines_iter it; - struct intel_context *ce; - - for_each_gem_engine(ce, - i915_gem_context_lock_engines(ctx), - it) { - u32 *regs; - - if (ce->engine->class != RENDER_CLASS) - continue; - - /* OA settings will be set upon first use */ - if (!ce->state) - continue; - - regs = i915_gem_object_pin_map(ce->state->obj, - map_type); - if (IS_ERR(regs)) { - i915_gem_context_unlock_engines(ctx); - return PTR_ERR(regs); - } + list_for_each_entry(ctx, &i915->contexts.list, link) { + int err; - ce->state->obj->mm.dirty = true; - regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs); - - gen8_update_reg_state_unlocked(ce, regs, oa_config); + if (ctx == i915->kernel_context) + continue; - i915_gem_object_unpin_map(ce->state->obj); - } - i915_gem_context_unlock_engines(ctx); + err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs)); + if (err) + return err; } /* - * Apply the configuration by doing one context restore of the edited - * context image. + * After updating all other contexts, we need to modify ourselves. + * If we don't modify the kernel_context, we do not get events while + * idle. */ - rq = i915_request_create(dev_priv->engine[RCS0]->kernel_context); - if (IS_ERR(rq)) - return PTR_ERR(rq); + for_each_engine(engine, i915, id) { + struct intel_context *ce = engine->kernel_context; + int err; - i915_request_add(rq); + if (engine->class != RENDER_CLASS) + continue; + + regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu); + + err = gen8_modify_self(ce, regs, ARRAY_SIZE(regs)); + if (err) + return err; + } return 0; } @@ -1835,6 +1977,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream) return ret; config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); + delay_after_mux(); config_oa_regs(dev_priv, oa_config->b_counter_regs, oa_config->b_counter_regs_len); @@ -2515,6 +2658,9 @@ static int i915_perf_release(struct inode *inode, struct file *file) i915_perf_destroy_locked(stream); mutex_unlock(&dev_priv->perf.lock); + /* Release the reference the perf stream kept on the driver. */ + drm_dev_put(&dev_priv->drm); + return 0; } @@ -2650,6 +2796,11 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, if (!(param->flags & I915_PERF_FLAG_DISABLED)) i915_perf_enable_locked(stream); + /* Take a reference on the driver that will be kept with stream_fd + * until its release. + */ + drm_dev_get(&dev_priv->drm); + return stream_fd; err_open: @@ -3477,9 +3628,13 @@ void i915_perf_init(struct drm_i915_private *dev_priv) dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set; - dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; - dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; - + if (IS_GEN(dev_priv, 10)) { + dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; + dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; + } else { + dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124; + dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e; + } dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); } } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 8fe46ee920a0..eff86483bec0 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -102,10 +102,8 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) /* * Also there is software busyness tracking available we do not * need the timer for I915_SAMPLE_BUSY counter. - * - * Use RCS as proxy for all engines. */ - else if (intel_engine_supports_stats(i915->engine[RCS0])) + else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) enable &= ~BIT(I915_SAMPLE_BUSY); /* diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h index 49709de69875..b02dea17dcab 100644 --- a/drivers/gpu/drm/i915/i915_priolist_types.h +++ b/drivers/gpu/drm/i915/i915_priolist_types.h @@ -17,6 +17,16 @@ enum { I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1, + /* + * Requests containing performance queries must not be preempted by + * another context. They get scheduled with their default priority and + * once they reach the execlist ports we ensure that they stick on the + * HW until finished by pretending that they have maximum priority, + * i.e. nothing can have higher priority and force us to usurp the + * active request. + */ + I915_PRIORITY_UNPREEMPTABLE = INT_MAX, + I915_PRIORITY_INVALID = INT_MIN }; diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h index 969e514916ab..683e97ac2430 100644 --- a/drivers/gpu/drm/i915/i915_pvinfo.h +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -24,6 +24,8 @@ #ifndef _I915_PVINFO_H_ #define _I915_PVINFO_H_ +#include <linux/types.h> + /* The MMIO offset of the shared info between guest and host emulator */ #define VGT_PVINFO_PAGE 0x78000 #define VGT_PVINFO_SIZE 0x1000 @@ -110,8 +112,9 @@ struct vgt_if { u32 rsv7[0x200 - 24]; /* pad to one page */ } __packed; -#define vgtif_reg(x) \ - _MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))) +#define vgtif_offset(x) (offsetof(struct vgt_if, x)) + +#define vgtif_reg(x) _MMIO(VGT_PVINFO_PAGE + vgtif_offset(x)) /* vGPU display status to be used by the host side */ #define VGT_DRV_DISPLAY_NOT_READY 0 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d6483b5dc8e5..d2b76121d863 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -242,6 +242,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) +#define _MMIO_PLL3(pll, a, b, c) _MMIO(_PICK(pll, a, b, c)) /* * Device info offset array based helpers for groups of registers with unevenly @@ -1793,19 +1794,21 @@ enum i915_power_well_id { */ #define _ICL_COMBOPHY_A 0x162000 #define _ICL_COMBOPHY_B 0x6C000 -#define _ICL_COMBOPHY(port) _PICK(port, _ICL_COMBOPHY_A, \ - _ICL_COMBOPHY_B) +#define _EHL_COMBOPHY_C 0x160000 +#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \ + _ICL_COMBOPHY_B, \ + _EHL_COMBOPHY_C) /* CNL/ICL Port CL_DW registers */ -#define _ICL_PORT_CL_DW(dw, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \ 4 * (dw)) #define CNL_PORT_CL1CM_DW5 _MMIO(0x162014) -#define ICL_PORT_CL_DW5(port) _MMIO(_ICL_PORT_CL_DW(5, port)) +#define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy)) #define CL_POWER_DOWN_ENABLE (1 << 4) #define SUS_CLOCK_CONFIG (3 << 0) -#define ICL_PORT_CL_DW10(port) _MMIO(_ICL_PORT_CL_DW(10, port)) +#define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy)) #define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25) #define PG_SEQ_DELAY_OVERRIDE_SHIFT 25 #define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24) @@ -1820,23 +1823,23 @@ enum i915_power_well_id { #define PWR_DOWN_LN_MASK (0xf << 4) #define PWR_DOWN_LN_SHIFT 4 -#define ICL_PORT_CL_DW12(port) _MMIO(_ICL_PORT_CL_DW(12, port)) +#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy)) #define ICL_LANE_ENABLE_AUX (1 << 0) /* CNL/ICL Port COMP_DW registers */ #define _ICL_PORT_COMP 0x100 -#define _ICL_PORT_COMP_DW(dw, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_COMP_DW(dw, phy) (_ICL_COMBOPHY(phy) + \ _ICL_PORT_COMP + 4 * (dw)) #define CNL_PORT_COMP_DW0 _MMIO(0x162100) -#define ICL_PORT_COMP_DW0(port) _MMIO(_ICL_PORT_COMP_DW(0, port)) +#define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy)) #define COMP_INIT (1 << 31) #define CNL_PORT_COMP_DW1 _MMIO(0x162104) -#define ICL_PORT_COMP_DW1(port) _MMIO(_ICL_PORT_COMP_DW(1, port)) +#define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy)) #define CNL_PORT_COMP_DW3 _MMIO(0x16210c) -#define ICL_PORT_COMP_DW3(port) _MMIO(_ICL_PORT_COMP_DW(3, port)) +#define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy)) #define PROCESS_INFO_DOT_0 (0 << 26) #define PROCESS_INFO_DOT_1 (1 << 26) #define PROCESS_INFO_DOT_4 (2 << 26) @@ -1848,14 +1851,14 @@ enum i915_power_well_id { #define VOLTAGE_INFO_MASK (3 << 24) #define VOLTAGE_INFO_SHIFT 24 -#define ICL_PORT_COMP_DW8(port) _MMIO(_ICL_PORT_COMP_DW(8, port)) +#define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy)) #define IREFGEN (1 << 24) #define CNL_PORT_COMP_DW9 _MMIO(0x162124) -#define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port)) +#define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy)) #define CNL_PORT_COMP_DW10 _MMIO(0x162128) -#define ICL_PORT_COMP_DW10(port) _MMIO(_ICL_PORT_COMP_DW(10, port)) +#define ICL_PORT_COMP_DW10(phy) _MMIO(_ICL_PORT_COMP_DW(10, phy)) /* CNL/ICL Port PCS registers */ #define _CNL_PORT_PCS_DW1_GRP_AE 0x162304 @@ -1868,14 +1871,14 @@ enum i915_power_well_id { #define _CNL_PORT_PCS_DW1_LN0_C 0x162C04 #define _CNL_PORT_PCS_DW1_LN0_D 0x162E04 #define _CNL_PORT_PCS_DW1_LN0_F 0x162804 -#define CNL_PORT_PCS_DW1_GRP(port) _MMIO(_PICK(port, \ +#define CNL_PORT_PCS_DW1_GRP(phy) _MMIO(_PICK(phy, \ _CNL_PORT_PCS_DW1_GRP_AE, \ _CNL_PORT_PCS_DW1_GRP_B, \ _CNL_PORT_PCS_DW1_GRP_C, \ _CNL_PORT_PCS_DW1_GRP_D, \ _CNL_PORT_PCS_DW1_GRP_AE, \ _CNL_PORT_PCS_DW1_GRP_F)) -#define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \ +#define CNL_PORT_PCS_DW1_LN0(phy) _MMIO(_PICK(phy, \ _CNL_PORT_PCS_DW1_LN0_AE, \ _CNL_PORT_PCS_DW1_LN0_B, \ _CNL_PORT_PCS_DW1_LN0_C, \ @@ -1886,16 +1889,18 @@ enum i915_power_well_id { #define _ICL_PORT_PCS_AUX 0x300 #define _ICL_PORT_PCS_GRP 0x600 #define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100) -#define _ICL_PORT_PCS_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_PCS_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \ _ICL_PORT_PCS_AUX + 4 * (dw)) -#define _ICL_PORT_PCS_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_PCS_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \ _ICL_PORT_PCS_GRP + 4 * (dw)) -#define _ICL_PORT_PCS_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_PCS_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \ _ICL_PORT_PCS_LN(ln) + 4 * (dw)) -#define ICL_PORT_PCS_DW1_AUX(port) _MMIO(_ICL_PORT_PCS_DW_AUX(1, port)) -#define ICL_PORT_PCS_DW1_GRP(port) _MMIO(_ICL_PORT_PCS_DW_GRP(1, port)) -#define ICL_PORT_PCS_DW1_LN0(port) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port)) +#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy)) +#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy)) +#define ICL_PORT_PCS_DW1_LN0(phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, phy)) #define COMMON_KEEPER_EN (1 << 26) +#define LATENCY_OPTIM_MASK (0x3 << 2) +#define LATENCY_OPTIM_VAL(x) ((x) << 2) /* CNL/ICL Port TX registers */ #define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340 @@ -1929,18 +1934,18 @@ enum i915_power_well_id { #define _ICL_PORT_TX_GRP 0x680 #define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100) -#define _ICL_PORT_TX_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_TX_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \ _ICL_PORT_TX_AUX + 4 * (dw)) -#define _ICL_PORT_TX_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_TX_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \ _ICL_PORT_TX_GRP + 4 * (dw)) -#define _ICL_PORT_TX_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_TX_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \ _ICL_PORT_TX_LN(ln) + 4 * (dw)) #define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(2, port)) #define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(2, port)) -#define ICL_PORT_TX_DW2_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(2, port)) -#define ICL_PORT_TX_DW2_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(2, port)) -#define ICL_PORT_TX_DW2_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, port)) +#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy)) +#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy)) +#define ICL_PORT_TX_DW2_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, phy)) #define SWING_SEL_UPPER(x) (((x) >> 3) << 15) #define SWING_SEL_UPPER_MASK (1 << 15) #define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) @@ -1957,10 +1962,10 @@ enum i915_power_well_id { #define CNL_PORT_TX_DW4_LN(ln, port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \ ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ _CNL_PORT_TX_DW4_LN0_AE))) -#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) -#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port)) -#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port)) -#define ICL_PORT_TX_DW4_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port)) +#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy)) +#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy)) +#define ICL_PORT_TX_DW4_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, phy)) +#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy)) #define LOADGEN_SELECT (1 << 31) #define POST_CURSOR_1(x) ((x) << 12) #define POST_CURSOR_1_MASK (0x3F << 12) @@ -1971,9 +1976,9 @@ enum i915_power_well_id { #define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(5, port)) #define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(5, port)) -#define ICL_PORT_TX_DW5_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(5, port)) -#define ICL_PORT_TX_DW5_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(5, port)) -#define ICL_PORT_TX_DW5_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, port)) +#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy)) +#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy)) +#define ICL_PORT_TX_DW5_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, phy)) #define TX_TRAINING_EN (1 << 31) #define TAP2_DISABLE (1 << 30) #define TAP3_DISABLE (1 << 29) @@ -1984,13 +1989,17 @@ enum i915_power_well_id { #define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port))) #define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port))) -#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port)) -#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port)) -#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port)) -#define ICL_PORT_TX_DW7_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port)) +#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy)) +#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy)) +#define ICL_PORT_TX_DW7_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, phy)) +#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy)) #define N_SCALAR(x) ((x) << 24) #define N_SCALAR_MASK (0x7F << 24) +#define _ICL_DPHY_CHKN_REG 0x194 +#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG) +#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7) + #define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \ _MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1))) @@ -2195,9 +2204,13 @@ enum i915_power_well_id { #define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28) #define FIA1_BASE 0x163000 +#define FIA2_BASE 0x16E000 +#define FIA3_BASE 0x16F000 +#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE) +#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off)) /* ICL PHY DFLEX registers */ -#define PORT_TX_DFLEXDPMLE1 _MMIO(FIA1_BASE + 0x008C0) +#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0) #define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port))) #define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port))) #define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port))) @@ -2513,13 +2526,19 @@ enum i915_power_well_id { #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ #define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4) -#define RING_FORCE_TO_NONPRIV_RW (0 << 28) /* CFL+ & Gen11+ */ -#define RING_FORCE_TO_NONPRIV_RD (1 << 28) -#define RING_FORCE_TO_NONPRIV_WR (2 << 28) +#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */ +#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28) +#define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28) +#define RING_FORCE_TO_NONPRIV_ACCESS_INVALID (3 << 28) +#define RING_FORCE_TO_NONPRIV_ACCESS_MASK (3 << 28) #define RING_FORCE_TO_NONPRIV_RANGE_1 (0 << 0) /* CFL+ & Gen11+ */ #define RING_FORCE_TO_NONPRIV_RANGE_4 (1 << 0) #define RING_FORCE_TO_NONPRIV_RANGE_16 (2 << 0) #define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0) +#define RING_FORCE_TO_NONPRIV_RANGE_MASK (3 << 0) +#define RING_FORCE_TO_NONPRIV_MASK_VALID \ + (RING_FORCE_TO_NONPRIV_RANGE_MASK \ + | RING_FORCE_TO_NONPRIV_ACCESS_MASK) #define RING_MAX_NONPRIV_SLOTS 12 #define GEN7_TLB_RD_ADDR _MMIO(0x4700) @@ -3246,8 +3265,10 @@ enum i915_power_well_id { #define GMBUS_PIN_10_TC2_ICP 10 #define GMBUS_PIN_11_TC3_ICP 11 #define GMBUS_PIN_12_TC4_ICP 12 +#define GMBUS_PIN_13_TC5_TGP 13 +#define GMBUS_PIN_14_TC6_TGP 14 -#define GMBUS_NUM_PINS 13 /* including 0 */ +#define GMBUS_NUM_PINS 15 /* including 0 */ #define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */ #define GMBUS_SW_CLR_INT (1 << 31) #define GMBUS_SW_RDY (1 << 30) @@ -4209,6 +4230,7 @@ enum { #define TRANSCODER_B_OFFSET 0x61000 #define TRANSCODER_C_OFFSET 0x62000 #define CHV_TRANSCODER_C_OFFSET 0x63000 +#define TRANSCODER_D_OFFSET 0x63000 #define TRANSCODER_EDP_OFFSET 0x6f000 #define TRANSCODER_DSI0_OFFSET 0x6b000 #define TRANSCODER_DSI1_OFFSET 0x6b800 @@ -5755,6 +5777,7 @@ enum { #define PIPE_A_OFFSET 0x70000 #define PIPE_B_OFFSET 0x71000 #define PIPE_C_OFFSET 0x72000 +#define PIPE_D_OFFSET 0x73000 #define CHV_PIPE_C_OFFSET 0x74000 /* * There's actually no pipe EDP. Some pipe registers have @@ -6284,6 +6307,7 @@ enum { #define _DSPATILEOFF 0x701A4 /* 965+ only */ #define _DSPAOFFSET 0x701A4 /* HSW */ #define _DSPASURFLIVE 0x701AC +#define _DSPAGAMC 0x701E0 #define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR) #define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR) @@ -6295,6 +6319,7 @@ enum { #define DSPLINOFF(plane) DSPADDR(plane) #define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET) #define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE) +#define DSPGAMC(plane, i) _MMIO(_PIPE2(plane, _DSPAGAMC) + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */ /* CHV pipe B blender and primary plane */ #define _CHV_BLEND_A 0x60a00 @@ -6397,6 +6422,7 @@ enum { #define _DVSAKEYMAXVAL 0x721a0 #define _DVSATILEOFF 0x721a4 #define _DVSASURFLIVE 0x721ac +#define _DVSAGAMC_G4X 0x721e0 /* g4x */ #define _DVSASCALE 0x72204 #define DVS_SCALE_ENABLE (1 << 31) #define DVS_FILTER_MASK (3 << 29) @@ -6405,7 +6431,8 @@ enum { #define DVS_FILTER_SOFTENING (2 << 29) #define DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */ #define DVS_VERTICAL_OFFSET_ENABLE (1 << 27) -#define _DVSAGAMC 0x72300 +#define _DVSAGAMC_ILK 0x72300 /* ilk/snb */ +#define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */ #define _DVSBCNTR 0x73180 #define _DVSBLINOFF 0x73184 @@ -6418,8 +6445,10 @@ enum { #define _DVSBKEYMAXVAL 0x731a0 #define _DVSBTILEOFF 0x731a4 #define _DVSBSURFLIVE 0x731ac +#define _DVSBGAMC_G4X 0x731e0 /* g4x */ #define _DVSBSCALE 0x73204 -#define _DVSBGAMC 0x73300 +#define _DVSBGAMC_ILK 0x73300 /* ilk/snb */ +#define _DVSBGAMCMAX_ILK 0x73340 /* ilk/snb */ #define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR) #define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF) @@ -6433,6 +6462,9 @@ enum { #define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) #define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) #define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) +#define DVSGAMC_G4X(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_G4X, _DVSBGAMC_G4X) + (5 - (i)) * 4) /* 6 x u0.8 */ +#define DVSGAMC_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_ILK, _DVSBGAMC_ILK) + (i) * 4) /* 16 x u0.10 */ +#define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */ #define _SPRA_CTL 0x70280 #define SPRITE_ENABLE (1 << 31) @@ -6457,7 +6489,7 @@ enum { #define SPRITE_YUV_ORDER_VYUY (3 << 16) #define SPRITE_ROTATE_180 (1 << 15) #define SPRITE_TRICKLE_FEED_DISABLE (1 << 14) -#define SPRITE_INT_GAMMA_ENABLE (1 << 13) +#define SPRITE_INT_GAMMA_DISABLE (1 << 13) #define SPRITE_TILED (1 << 10) #define SPRITE_DEST_KEY (1 << 2) #define _SPRA_LINOFF 0x70284 @@ -6480,6 +6512,8 @@ enum { #define SPRITE_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */ #define SPRITE_VERTICAL_OFFSET_ENABLE (1 << 27) #define _SPRA_GAMC 0x70400 +#define _SPRA_GAMC16 0x70440 +#define _SPRA_GAMC17 0x7044c #define _SPRB_CTL 0x71280 #define _SPRB_LINOFF 0x71284 @@ -6495,6 +6529,8 @@ enum { #define _SPRB_SURFLIVE 0x712ac #define _SPRB_SCALE 0x71304 #define _SPRB_GAMC 0x71400 +#define _SPRB_GAMC16 0x71440 +#define _SPRB_GAMC17 0x7144c #define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL) #define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF) @@ -6508,7 +6544,9 @@ enum { #define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) #define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) #define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) -#define SPRGAMC(pipe) _MMIO_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) +#define SPRGAMC(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) + (i) * 4) /* 16 x u0.10 */ +#define SPRGAMC16(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC16, _SPRB_GAMC16) + (i) * 4) /* 3 x u1.10 */ +#define SPRGAMC17(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC17, _SPRB_GAMC17) + (i) * 4) /* 3 x u2.10 */ #define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) #define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) @@ -6551,7 +6589,7 @@ enum { #define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4) #define SP_SH_SIN(x) (((x) & 0x7ff) << 16) /* s4.7 */ #define SP_SH_COS(x) (x) /* u3.7 */ -#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4) +#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0) #define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280) #define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284) @@ -6566,10 +6604,12 @@ enum { #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) #define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0) #define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4) -#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) +#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0) +#define _VLV_SPR(pipe, plane_id, reg_a, reg_b) \ + _PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b)) #define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \ - _MMIO_PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b)) + _MMIO(_VLV_SPR((pipe), (plane_id), (reg_a), (reg_b))) #define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR) #define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF) @@ -6584,7 +6624,7 @@ enum { #define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA) #define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0) #define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1) -#define SPGAMC(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) +#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */ /* * CHV pipe B sprite CSC @@ -7317,16 +7357,6 @@ enum { #define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which))) #define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which))) -#define GEN9_GUC_TO_HOST_INT_EVENT (1 << 31) -#define GEN9_GUC_EXEC_ERROR_EVENT (1 << 30) -#define GEN9_GUC_DISPLAY_EVENT (1 << 29) -#define GEN9_GUC_SEMA_SIGNAL_EVENT (1 << 28) -#define GEN9_GUC_IOMMU_MSG_EVENT (1 << 27) -#define GEN9_GUC_DB_RING_EVENT (1 << 26) -#define GEN9_GUC_DMA_DONE_EVENT (1 << 25) -#define GEN9_GUC_FATAL_ERROR_EVENT (1 << 24) -#define GEN9_GUC_NOTIFICATION_EVENT (1 << 23) - #define GEN8_RCS_IRQ_SHIFT 0 #define GEN8_BCS_IRQ_SHIFT 16 #define GEN8_VCS0_IRQ_SHIFT 0 /* NB: VCS1 in bspec! */ @@ -7606,6 +7636,7 @@ enum { #define SKL_DFSM_PIPE_A_DISABLE (1 << 30) #define SKL_DFSM_PIPE_B_DISABLE (1 << 21) #define SKL_DFSM_PIPE_C_DISABLE (1 << 28) +#define TGL_DFSM_PIPE_D_DISABLE (1 << 22) #define SKL_DSSM _MMIO(0x51004) #define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31) @@ -7690,6 +7721,9 @@ enum { #define GEN7_L3SQCREG4 _MMIO(0xb034) #define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27) +#define GEN11_SCRATCH2 _MMIO(0xb140) +#define GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE (1 << 19) + #define GEN8_L3SQCREG4 _MMIO(0xb118) #define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6) #define GEN8_LQSC_RO_PERF_DIS (1 << 27) @@ -9119,7 +9153,8 @@ enum { #define GLK_PW_CTL_IDX_DDI_A 1 #define SKL_PW_CTL_IDX_MISC_IO 0 -/* ICL - power wells */ +/* ICL/TGL - power wells */ +#define TGL_PW_CTL_IDX_PW_5 4 #define ICL_PW_CTL_IDX_PW_4 3 #define ICL_PW_CTL_IDX_PW_3 2 #define ICL_PW_CTL_IDX_PW_2 1 @@ -9128,13 +9163,25 @@ enum { #define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440) #define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444) #define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C) +#define TGL_PW_CTL_IDX_AUX_TBT6 14 +#define TGL_PW_CTL_IDX_AUX_TBT5 13 +#define TGL_PW_CTL_IDX_AUX_TBT4 12 #define ICL_PW_CTL_IDX_AUX_TBT4 11 +#define TGL_PW_CTL_IDX_AUX_TBT3 11 #define ICL_PW_CTL_IDX_AUX_TBT3 10 +#define TGL_PW_CTL_IDX_AUX_TBT2 10 #define ICL_PW_CTL_IDX_AUX_TBT2 9 +#define TGL_PW_CTL_IDX_AUX_TBT1 9 #define ICL_PW_CTL_IDX_AUX_TBT1 8 +#define TGL_PW_CTL_IDX_AUX_TC6 8 +#define TGL_PW_CTL_IDX_AUX_TC5 7 +#define TGL_PW_CTL_IDX_AUX_TC4 6 #define ICL_PW_CTL_IDX_AUX_F 5 +#define TGL_PW_CTL_IDX_AUX_TC3 5 #define ICL_PW_CTL_IDX_AUX_E 4 +#define TGL_PW_CTL_IDX_AUX_TC2 4 #define ICL_PW_CTL_IDX_AUX_D 3 +#define TGL_PW_CTL_IDX_AUX_TC1 3 #define ICL_PW_CTL_IDX_AUX_C 2 #define ICL_PW_CTL_IDX_AUX_B 1 #define ICL_PW_CTL_IDX_AUX_A 0 @@ -9142,9 +9189,15 @@ enum { #define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450) #define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454) #define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C) +#define TGL_PW_CTL_IDX_DDI_TC6 8 +#define TGL_PW_CTL_IDX_DDI_TC5 7 +#define TGL_PW_CTL_IDX_DDI_TC4 6 #define ICL_PW_CTL_IDX_DDI_F 5 +#define TGL_PW_CTL_IDX_DDI_TC3 5 #define ICL_PW_CTL_IDX_DDI_E 4 +#define TGL_PW_CTL_IDX_DDI_TC2 4 #define ICL_PW_CTL_IDX_DDI_D 3 +#define TGL_PW_CTL_IDX_DDI_TC1 3 #define ICL_PW_CTL_IDX_DDI_C 2 #define ICL_PW_CTL_IDX_DDI_B 1 #define ICL_PW_CTL_IDX_DDI_A 0 @@ -9197,9 +9250,11 @@ enum skl_power_gate { #define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) #define _ICL_AUX_ANAOVRD1_A 0x162398 #define _ICL_AUX_ANAOVRD1_B 0x6C398 +#define _TGL_AUX_ANAOVRD1_C 0x160398 #define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \ _ICL_AUX_ANAOVRD1_A, \ - _ICL_AUX_ANAOVRD1_B)) + _ICL_AUX_ANAOVRD1_B, \ + _TGL_AUX_ANAOVRD1_C)) #define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7) #define ICL_AUX_ANAOVRD1_ENABLE (1 << 0) @@ -9321,6 +9376,7 @@ enum skl_power_gate { #define _TRANS_DDI_FUNC_CTL_A 0x60400 #define _TRANS_DDI_FUNC_CTL_B 0x61400 #define _TRANS_DDI_FUNC_CTL_C 0x62400 +#define _TRANS_DDI_FUNC_CTL_D 0x63400 #define _TRANS_DDI_FUNC_CTL_EDP 0x6F400 #define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400 #define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00 @@ -9328,10 +9384,12 @@ enum skl_power_gate { #define TRANS_DDI_FUNC_ENABLE (1 << 31) /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ -#define TRANS_DDI_PORT_MASK (7 << 28) #define TRANS_DDI_PORT_SHIFT 28 -#define TRANS_DDI_SELECT_PORT(x) ((x) << 28) -#define TRANS_DDI_PORT_NONE (0 << 28) +#define TGL_TRANS_DDI_PORT_SHIFT 27 +#define TRANS_DDI_PORT_MASK (7 << TRANS_DDI_PORT_SHIFT) +#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT) +#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT) +#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT) #define TRANS_DDI_MODE_SELECT_MASK (7 << 24) #define TRANS_DDI_MODE_SELECT_HDMI (0 << 24) #define TRANS_DDI_MODE_SELECT_DVI (1 << 24) @@ -9541,6 +9599,9 @@ enum skl_power_gate { /* For each transcoder, we need to select the corresponding port clock */ #define TRANS_CLK_SEL_DISABLED (0x0 << 29) #define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29) +#define TGL_TRANS_CLK_SEL_DISABLED (0x0 << 28) +#define TGL_TRANS_CLK_SEL_PORT(x) (((x) + 1) << 28) + #define CDCLK_FREQ _MMIO(0x46200) @@ -9672,17 +9733,22 @@ enum skl_power_gate { * CNL Clocks */ #define DPCLKA_CFGCR0 _MMIO(0x6C200) -#define DPCLKA_CFGCR0_ICL _MMIO(0x164280) #define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) == PORT_F ? 23 : \ (port) + 10)) -#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) + 10)) -#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) == PORT_TC4 ? \ - 21 : (tc_port) + 12)) #define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \ (port) * 2) #define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port)) #define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port)) +#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280) +#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24)) +#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < PORT_TC4 ? \ + (tc_port) + 12 : \ + (tc_port) - PORT_TC4 + 21)) +#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2) +#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) +#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) + /* CNL PLL */ #define DPLL0_ENABLE 0x46010 #define DPLL1_ENABLE 0x46014 @@ -9887,6 +9953,7 @@ enum skl_power_gate { #define DPLL_CFGCR1_PDIV_7 (8 << 2) #define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0) #define DPLL_CFGCR1_CENTRAL_FREQ_8400 (3 << 0) +#define TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL (0 << 0) #define CNL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR1, _CNL_DPLL1_CFGCR1) #define _ICL_DPLL0_CFGCR0 0x164000 @@ -9899,6 +9966,22 @@ enum skl_power_gate { #define ICL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \ _ICL_DPLL1_CFGCR1) +#define _TGL_DPLL0_CFGCR0 0x164284 +#define _TGL_DPLL1_CFGCR0 0x16428C +/* TODO: add DPLL4 */ +#define _TGL_TBTPLL_CFGCR0 0x16429C +#define TGL_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \ + _TGL_DPLL1_CFGCR0, \ + _TGL_TBTPLL_CFGCR0) + +#define _TGL_DPLL0_CFGCR1 0x164288 +#define _TGL_DPLL1_CFGCR1 0x164290 +/* TODO: add DPLL4 */ +#define _TGL_TBTPLL_CFGCR1 0x1642A0 +#define TGL_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \ + _TGL_DPLL1_CFGCR1, \ + _TGL_TBTPLL_CFGCR1) + /* BXT display engine PLL */ #define BXT_DE_PLL_CTL _MMIO(0x6d000) #define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */ @@ -11145,6 +11228,7 @@ enum skl_power_gate { #define _ICL_PHY_MISC_B 0x64C04 #define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, \ _ICL_PHY_MISC_B) +#define ICL_PHY_MISC_MUX_DDID (1 << 28) #define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23) /* Icelake Display Stream Compression Registers */ @@ -11454,17 +11538,18 @@ enum skl_power_gate { _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) -#define PORT_TX_DFLEXDPSP _MMIO(FIA1_BASE + 0x008A0) +#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0) +#define MODULAR_FIA_MASK (1 << 4) #define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6)) #define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5)) #define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8) #define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8)) #define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8)) -#define PORT_TX_DFLEXDPPMS _MMIO(FIA1_BASE + 0x00890) +#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890) #define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port)) -#define PORT_TX_DFLEXDPCSSS _MMIO(FIA1_BASE + 0x00894) +#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894) #define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port)) #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index a195a92d0105..8ac7d14ec8c9 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -119,6 +119,50 @@ const struct dma_fence_ops i915_fence_ops = { .release = i915_fence_release, }; +static void irq_execute_cb(struct irq_work *wrk) +{ + struct execute_cb *cb = container_of(wrk, typeof(*cb), work); + + i915_sw_fence_complete(cb->fence); + kmem_cache_free(global.slab_execute_cbs, cb); +} + +static void irq_execute_cb_hook(struct irq_work *wrk) +{ + struct execute_cb *cb = container_of(wrk, typeof(*cb), work); + + cb->hook(container_of(cb->fence, struct i915_request, submit), + &cb->signal->fence); + i915_request_put(cb->signal); + + irq_execute_cb(wrk); +} + +static void __notify_execute_cb(struct i915_request *rq) +{ + struct execute_cb *cb; + + lockdep_assert_held(&rq->lock); + + if (list_empty(&rq->execute_cb)) + return; + + list_for_each_entry(cb, &rq->execute_cb, link) + irq_work_queue(&cb->work); + + /* + * XXX Rollback on __i915_request_unsubmit() + * + * In the future, perhaps when we have an active time-slicing scheduler, + * it will be interesting to unsubmit parallel execution and remove + * busywaits from the GPU until their master is restarted. This is + * quite hairy, we have to carefully rollback the fence and do a + * preempt-to-idle cycle on the target engine, all the while the + * master execute_cb may refire. + */ + INIT_LIST_HEAD(&rq->execute_cb); +} + static inline void i915_request_remove_from_client(struct i915_request *request) { @@ -232,6 +276,12 @@ static bool i915_request_retire(struct i915_request *rq) local_irq_disable(); + /* + * We only loosely track inflight requests across preemption, + * and so we may find ourselves attempting to retire a _completed_ + * request that we have removed from the HW and put back on a run + * queue. + */ spin_lock(&rq->engine->active.lock); list_del(&rq->sched.link); spin_unlock(&rq->engine->active.lock); @@ -242,10 +292,15 @@ static bool i915_request_retire(struct i915_request *rq) dma_fence_signal_locked(&rq->fence); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) i915_request_cancel_breadcrumb(rq); - if (rq->waitboost) { + if (i915_request_has_waitboost(rq)) { GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); atomic_dec(&rq->i915->gt_pm.rps.num_waiters); } + if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { + set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); + __notify_execute_cb(rq); + } + GEM_BUG_ON(!list_empty(&rq->execute_cb)); spin_unlock(&rq->lock); local_irq_enable(); @@ -285,50 +340,6 @@ void i915_request_retire_upto(struct i915_request *rq) } while (i915_request_retire(tmp) && tmp != rq); } -static void irq_execute_cb(struct irq_work *wrk) -{ - struct execute_cb *cb = container_of(wrk, typeof(*cb), work); - - i915_sw_fence_complete(cb->fence); - kmem_cache_free(global.slab_execute_cbs, cb); -} - -static void irq_execute_cb_hook(struct irq_work *wrk) -{ - struct execute_cb *cb = container_of(wrk, typeof(*cb), work); - - cb->hook(container_of(cb->fence, struct i915_request, submit), - &cb->signal->fence); - i915_request_put(cb->signal); - - irq_execute_cb(wrk); -} - -static void __notify_execute_cb(struct i915_request *rq) -{ - struct execute_cb *cb; - - lockdep_assert_held(&rq->lock); - - if (list_empty(&rq->execute_cb)) - return; - - list_for_each_entry(cb, &rq->execute_cb, link) - irq_work_queue(&cb->work); - - /* - * XXX Rollback on __i915_request_unsubmit() - * - * In the future, perhaps when we have an active time-slicing scheduler, - * it will be interesting to unsubmit parallel execution and remove - * busywaits from the GPU until their master is restarted. This is - * quite hairy, we have to carefully rollback the fence and do a - * preempt-to-idle cycle on the target engine, all the while the - * master execute_cb may refire. - */ - INIT_LIST_HEAD(&rq->execute_cb); -} - static int __i915_request_await_execution(struct i915_request *rq, struct i915_request *signal, @@ -596,7 +607,7 @@ out: struct i915_request * __i915_request_create(struct intel_context *ce, gfp_t gfp) { - struct i915_timeline *tl = ce->ring->timeline; + struct intel_timeline *tl = ce->ring->timeline; struct i915_request *rq; u32 seqno; int ret; @@ -645,7 +656,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) } } - ret = i915_timeline_get_seqno(tl, rq, &seqno); + ret = intel_timeline_get_seqno(tl, rq, &seqno); if (ret) goto err_free; @@ -673,7 +684,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) rq->file_priv = NULL; rq->batch = NULL; rq->capture_list = NULL; - rq->waitboost = false; + rq->flags = 0; rq->execution_mask = ALL_ENGINES; INIT_LIST_HEAD(&rq->active_list); @@ -764,7 +775,7 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal) return 0; signal = list_prev_entry(signal, ring_link); - if (i915_timeline_sync_is_later(rq->timeline, &signal->fence)) + if (intel_timeline_sync_is_later(rq->timeline, &signal->fence)) return 0; return i915_sw_fence_await_dma_fence(&rq->submit, @@ -818,7 +829,7 @@ emit_semaphore_wait(struct i915_request *to, return err; /* We need to pin the signaler's HWSP until we are finished reading. */ - err = i915_timeline_read_hwsp(from, to, &hwsp_offset); + err = intel_timeline_read_hwsp(from, to, &hwsp_offset); if (err) return err; @@ -929,7 +940,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) /* Squash repeated waits to the same timelines */ if (fence->context != rq->i915->mm.unordered_timeline && - i915_timeline_sync_is_later(rq->timeline, fence)) + intel_timeline_sync_is_later(rq->timeline, fence)) continue; if (dma_fence_is_i915(fence)) @@ -943,7 +954,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) /* Record the latest fence used against each timeline */ if (fence->context != rq->i915->mm.unordered_timeline) - i915_timeline_sync_set(rq->timeline, fence); + intel_timeline_sync_set(rq->timeline, fence); } while (--nchild); return 0; @@ -1081,7 +1092,7 @@ void i915_request_skip(struct i915_request *rq, int error) static struct i915_request * __i915_request_add_to_timeline(struct i915_request *rq) { - struct i915_timeline *timeline = rq->timeline; + struct intel_timeline *timeline = rq->timeline; struct i915_request *prev; /* @@ -1390,8 +1401,7 @@ long i915_request_wait(struct i915_request *rq, * serialise wait/reset with an explicit lock, we do want * lockdep to detect potential dependency cycles. */ - mutex_acquire(&rq->i915->gpu_error.wedge_mutex.dep_map, - 0, 0, _THIS_IP_); + mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_); /* * Optimistic spin before touching IRQs. @@ -1447,8 +1457,10 @@ long i915_request_wait(struct i915_request *rq, for (;;) { set_current_state(state); - if (i915_request_completed(rq)) + if (i915_request_completed(rq)) { + dma_fence_signal(&rq->fence); break; + } if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; @@ -1467,7 +1479,7 @@ long i915_request_wait(struct i915_request *rq, dma_fence_remove_callback(&rq->fence, &wait.cb); out: - mutex_release(&rq->i915->gpu_error.wedge_mutex.dep_map, 0, _THIS_IP_); + mutex_release(&rq->engine->gt->reset.mutex.dep_map, 0, _THIS_IP_); trace_i915_request_wait_end(rq); return timeout; } diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index edbbdfec24ab..313df3c37158 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -28,6 +28,7 @@ #include <linux/dma-fence.h> #include <linux/lockdep.h> +#include "gt/intel_context_types.h" #include "gt/intel_engine_types.h" #include "i915_gem.h" @@ -40,8 +41,8 @@ struct drm_file; struct drm_i915_gem_object; struct i915_request; -struct i915_timeline; -struct i915_timeline_cacheline; +struct intel_timeline; +struct intel_timeline_cacheline; struct i915_capture_list { struct i915_capture_list *next; @@ -112,7 +113,7 @@ struct i915_request { struct intel_engine_cs *engine; struct intel_context *hw_context; struct intel_ring *ring; - struct i915_timeline *timeline; + struct intel_timeline *timeline; struct list_head signal_link; /* @@ -175,7 +176,7 @@ struct i915_request { * inside the timeline's HWSP vma, but it is only valid while this * request has not completed and guarded by the timeline mutex. */ - struct i915_timeline_cacheline *hwsp_cacheline; + struct intel_timeline_cacheline *hwsp_cacheline; /** Position in the ring of the start of the request */ u32 head; @@ -215,7 +216,9 @@ struct i915_request { /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; - bool waitboost; + unsigned long flags; +#define I915_REQUEST_WAITBOOST BIT(0) +#define I915_REQUEST_NOPREEMPT BIT(1) /** timeline->request entry for this request */ struct list_head link; @@ -429,6 +432,17 @@ static inline void i915_request_mark_complete(struct i915_request *rq) rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */ } +static inline bool i915_request_has_waitboost(const struct i915_request *rq) +{ + return rq->flags & I915_REQUEST_WAITBOOST; +} + +static inline bool i915_request_has_nopreempt(const struct i915_request *rq) +{ + /* Preemption should only be disabled very rarely */ + return unlikely(rq->flags & I915_REQUEST_NOPREEMPT); +} + bool i915_retire_requests(struct drm_i915_private *i915); #endif /* I915_REQUEST_H */ diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 2e9b38bdc33c..0bd452e851d8 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -179,8 +179,7 @@ static inline int rq_prio(const struct i915_request *rq) static void kick_submission(struct intel_engine_cs *engine, int prio) { - const struct i915_request *inflight = - port_request(engine->execlists.port); + const struct i915_request *inflight = *engine->execlists.active; /* * If we are already the currently executing context, don't @@ -395,6 +394,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, list_add(&dep->wait_link, &signal->waiters_list); list_add(&dep->signal_link, &node->signalers_list); dep->signaler = signal; + dep->waiter = node; dep->flags = flags; /* Keep track of whether anyone on this chain has a semaphore */ diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h index 3e309631bd0b..aad81acba9dc 100644 --- a/drivers/gpu/drm/i915/i915_scheduler_types.h +++ b/drivers/gpu/drm/i915/i915_scheduler_types.h @@ -62,6 +62,7 @@ struct i915_sched_node { struct i915_dependency { struct i915_sched_node *signaler; + struct i915_sched_node *waiter; struct list_head signal_link; struct list_head wait_link; struct list_head dfs_link; diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h index 207e21b478f2..acdf6eb9e262 100644 --- a/drivers/gpu/drm/i915/i915_selftest.h +++ b/drivers/gpu/drm/i915/i915_selftest.h @@ -66,12 +66,37 @@ struct i915_subtest { const char *name; }; +int __i915_nop_setup(void *data); +int __i915_nop_teardown(int err, void *data); + +int __i915_live_setup(void *data); +int __i915_live_teardown(int err, void *data); + +int __intel_gt_live_setup(void *data); +int __intel_gt_live_teardown(int err, void *data); + int __i915_subtests(const char *caller, + int (*setup)(void *data), + int (*teardown)(int err, void *data), const struct i915_subtest *st, unsigned int count, void *data); #define i915_subtests(T, data) \ - __i915_subtests(__func__, T, ARRAY_SIZE(T), data) + __i915_subtests(__func__, \ + __i915_nop_setup, __i915_nop_teardown, \ + T, ARRAY_SIZE(T), data) +#define i915_live_subtests(T, data) ({ \ + typecheck(struct drm_i915_private *, data); \ + __i915_subtests(__func__, \ + __i915_live_setup, __i915_live_teardown, \ + T, ARRAY_SIZE(T), data); \ +}) +#define intel_gt_live_subtests(T, data) ({ \ + typecheck(struct intel_gt *, data); \ + __i915_subtests(__func__, \ + __intel_gt_live_setup, __intel_gt_live_teardown, \ + T, ARRAY_SIZE(T), data); \ +}) #define SUBTEST(x) { x, #x } diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h deleted file mode 100644 index 36e5e5a65155..000000000000 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef I915_TIMELINE_H -#define I915_TIMELINE_H - -#include <linux/lockdep.h> - -#include "i915_active.h" -#include "i915_syncmap.h" -#include "i915_timeline_types.h" - -int i915_timeline_init(struct drm_i915_private *i915, - struct i915_timeline *tl, - struct i915_vma *hwsp); -void i915_timeline_fini(struct i915_timeline *tl); - -struct i915_timeline * -i915_timeline_create(struct drm_i915_private *i915, - struct i915_vma *global_hwsp); - -static inline struct i915_timeline * -i915_timeline_get(struct i915_timeline *timeline) -{ - kref_get(&timeline->kref); - return timeline; -} - -void __i915_timeline_free(struct kref *kref); -static inline void i915_timeline_put(struct i915_timeline *timeline) -{ - kref_put(&timeline->kref, __i915_timeline_free); -} - -static inline int __i915_timeline_sync_set(struct i915_timeline *tl, - u64 context, u32 seqno) -{ - return i915_syncmap_set(&tl->sync, context, seqno); -} - -static inline int i915_timeline_sync_set(struct i915_timeline *tl, - const struct dma_fence *fence) -{ - return __i915_timeline_sync_set(tl, fence->context, fence->seqno); -} - -static inline bool __i915_timeline_sync_is_later(struct i915_timeline *tl, - u64 context, u32 seqno) -{ - return i915_syncmap_is_later(&tl->sync, context, seqno); -} - -static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl, - const struct dma_fence *fence) -{ - return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno); -} - -int i915_timeline_pin(struct i915_timeline *tl); -int i915_timeline_get_seqno(struct i915_timeline *tl, - struct i915_request *rq, - u32 *seqno); -void i915_timeline_unpin(struct i915_timeline *tl); - -int i915_timeline_read_hwsp(struct i915_request *from, - struct i915_request *until, - u32 *hwsp_offset); - -void i915_timelines_init(struct drm_i915_private *i915); -void i915_timelines_park(struct drm_i915_private *i915); -void i915_timelines_fini(struct drm_i915_private *i915); - -#endif diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index f4ce643b3bc3..da18b8d6b80c 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -21,24 +21,22 @@ /* watermark/fifo updates */ TRACE_EVENT(intel_pipe_enable, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), - TP_ARGS(dev_priv, pipe), + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), TP_STRUCT__entry( __array(u32, frame, 3) __array(u32, scanline, 3) __field(enum pipe, pipe) ), - TP_fast_assign( - enum pipe _pipe; - for_each_pipe(dev_priv, _pipe) { - __entry->frame[_pipe] = - dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); - __entry->scanline[_pipe] = - intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe)); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc *it__; + for_each_intel_crtc(&dev_priv->drm, it__) { + __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); + __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); } - __entry->pipe = pipe; + __entry->pipe = crtc->pipe; ), TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", @@ -49,8 +47,8 @@ TRACE_EVENT(intel_pipe_enable, ); TRACE_EVENT(intel_pipe_disable, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), - TP_ARGS(dev_priv, pipe), + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), TP_STRUCT__entry( __array(u32, frame, 3) @@ -59,14 +57,13 @@ TRACE_EVENT(intel_pipe_disable, ), TP_fast_assign( - enum pipe _pipe; - for_each_pipe(dev_priv, _pipe) { - __entry->frame[_pipe] = - dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); - __entry->scanline[_pipe] = - intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe)); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc *it__; + for_each_intel_crtc(&dev_priv->drm, it__) { + __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); + __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); } - __entry->pipe = pipe; + __entry->pipe = crtc->pipe; ), TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", @@ -89,8 +86,7 @@ TRACE_EVENT(intel_pipe_crc, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); ), @@ -112,9 +108,10 @@ TRACE_EVENT(intel_cpu_fifo_underrun, ), TP_fast_assign( + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); __entry->pipe = pipe; - __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); - __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("pipe %c, frame=%u, scanline=%u", @@ -134,9 +131,10 @@ TRACE_EVENT(intel_pch_fifo_underrun, TP_fast_assign( enum pipe pipe = pch_transcoder; + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); __entry->pipe = pipe; - __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); - __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("pch transcoder %c, frame=%u, scanline=%u", @@ -156,12 +154,10 @@ TRACE_EVENT(intel_memory_cxsr, ), TP_fast_assign( - enum pipe pipe; - for_each_pipe(dev_priv, pipe) { - __entry->frame[pipe] = - dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); - __entry->scanline[pipe] = - intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); + struct intel_crtc *crtc; + for_each_intel_crtc(&dev_priv->drm, crtc) { + __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); + __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); } __entry->old = old; __entry->new = new; @@ -198,8 +194,7 @@ TRACE_EVENT(g4x_wm, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; @@ -243,8 +238,7 @@ TRACE_EVENT(vlv_wm, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->level = wm->level; __entry->cxsr = wm->cxsr; @@ -278,8 +272,7 @@ TRACE_EVENT(vlv_fifo_size, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->sprite0_start = sprite0_start; __entry->sprite1_start = sprite1_start; @@ -300,25 +293,24 @@ TRACE_EVENT(intel_update_plane, TP_STRUCT__entry( __field(enum pipe, pipe) - __field(const char *, name) __field(u32, frame) __field(u32, scanline) __array(int, src, 4) __array(int, dst, 4) + __string(name, plane->name) ), TP_fast_assign( + __assign_str(name, plane->name); __entry->pipe = crtc->pipe; - __entry->name = plane->name; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); ), TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, - pipe_name(__entry->pipe), __entry->name, + pipe_name(__entry->pipe), __get_str(name), __entry->frame, __entry->scanline, DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) @@ -330,21 +322,20 @@ TRACE_EVENT(intel_disable_plane, TP_STRUCT__entry( __field(enum pipe, pipe) - __field(const char *, name) __field(u32, frame) __field(u32, scanline) + __string(name, plane->name) ), TP_fast_assign( + __assign_str(name, plane->name); __entry->pipe = crtc->pipe; - __entry->name = plane->name; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("pipe %c, plane %s, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->name, + pipe_name(__entry->pipe), __get_str(name), __entry->frame, __entry->scanline) ); @@ -364,8 +355,7 @@ TRACE_EVENT(i915_pipe_update_start, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->min = crtc->debug.min_vbl; __entry->max = crtc->debug.max_vbl; diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 2987219a6300..4920ff9aba62 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -131,6 +131,18 @@ __check_struct_size(size_t base, size_t arr, size_t count, size_t *size) ((typeof(ptr))((unsigned long)(ptr) | __bits)); \ }) +#define ptr_count_dec(p_ptr) do { \ + typeof(p_ptr) __p = (p_ptr); \ + unsigned long __v = (unsigned long)(*__p); \ + *__p = (typeof(*p_ptr))(--__v); \ +} while (0) + +#define ptr_count_inc(p_ptr) do { \ + typeof(p_ptr) __p = (p_ptr); \ + unsigned long __v = (unsigned long)(*__p); \ + *__p = (typeof(*p_ptr))(++__v); \ +} while (0) + #define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT) #define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT) #define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT) diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 94d3992b599d..dbd1fa3c7d90 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -52,34 +52,53 @@ */ /** - * i915_check_vgpu - detect virtual GPU + * i915_detect_vgpu - detect virtual GPU * @dev_priv: i915 device private * * This function is called at the initialization stage, to detect whether * running on a vGPU. */ -void i915_check_vgpu(struct drm_i915_private *dev_priv) +void i915_detect_vgpu(struct drm_i915_private *dev_priv) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct pci_dev *pdev = dev_priv->drm.pdev; u64 magic; u16 version_major; + void __iomem *shared_area; BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); - magic = __raw_uncore_read64(uncore, vgtif_reg(magic)); - if (magic != VGT_MAGIC) + /* + * This is called before we setup the main MMIO BAR mappings used via + * the uncore structure, so we need to access the BAR directly. Since + * we do not support VGT on older gens, return early so we don't have + * to consider differently numbered or sized MMIO bars + */ + if (INTEL_GEN(dev_priv) < 6) + return; + + shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE); + if (!shared_area) { + DRM_ERROR("failed to map MMIO bar to check for VGT\n"); return; + } + + magic = readq(shared_area + vgtif_offset(magic)); + if (magic != VGT_MAGIC) + goto out; - version_major = __raw_uncore_read16(uncore, vgtif_reg(version_major)); + version_major = readw(shared_area + vgtif_offset(version_major)); if (version_major < VGT_VERSION_MAJOR) { DRM_INFO("VGT interface version mismatch!\n"); - return; + goto out; } - dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps)); + dev_priv->vgpu.caps = readl(shared_area + vgtif_offset(vgt_caps)); dev_priv->vgpu.active = true; DRM_INFO("Virtual GPU for Intel GVT-g detected.\n"); + +out: + pci_iounmap(pdev, shared_area); } bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv) @@ -112,22 +131,22 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt, /** * intel_vgt_deballoon - deballoon reserved graphics address trunks - * @dev_priv: i915 device private data + * @ggtt: the global GGTT from which we reserved earlier * * This function is called to deallocate the ballooned-out graphic memory, when * driver is unloaded or when ballooning fails. */ -void intel_vgt_deballoon(struct drm_i915_private *dev_priv) +void intel_vgt_deballoon(struct i915_ggtt *ggtt) { int i; - if (!intel_vgpu_active(dev_priv)) + if (!intel_vgpu_active(ggtt->vm.i915)) return; DRM_DEBUG("VGT deballoon.\n"); for (i = 0; i < 4; i++) - vgt_deballoon_space(&dev_priv->ggtt, &bl_info.space[i]); + vgt_deballoon_space(ggtt, &bl_info.space[i]); } static int vgt_balloon_space(struct i915_ggtt *ggtt, @@ -153,7 +172,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt, /** * intel_vgt_balloon - balloon out reserved graphics address trunks - * @dev_priv: i915 device private data + * @ggtt: the global GGTT from which to reserve * * This function is called at the initialization stage, to balloon out the * graphic address space allocated to other vGPUs, by marking these spaces as @@ -195,22 +214,26 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt, * Returns: * zero on success, non-zero if configuration invalid or ballooning failed */ -int intel_vgt_balloon(struct drm_i915_private *dev_priv) +int intel_vgt_balloon(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct intel_uncore *uncore = &ggtt->vm.i915->uncore; unsigned long ggtt_end = ggtt->vm.total; unsigned long mappable_base, mappable_size, mappable_end; unsigned long unmappable_base, unmappable_size, unmappable_end; int ret; - if (!intel_vgpu_active(dev_priv)) + if (!intel_vgpu_active(ggtt->vm.i915)) return 0; - mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base)); - mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size)); - unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base)); - unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size)); + mappable_base = + intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.base)); + mappable_size = + intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.size)); + unmappable_base = + intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.base)); + unmappable_size = + intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.size)); mappable_end = mappable_base + mappable_size; unmappable_end = unmappable_base + unmappable_size; diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index ebe1b7bced98..8b3663dad193 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h @@ -24,9 +24,10 @@ #ifndef _I915_VGPU_H_ #define _I915_VGPU_H_ +#include "i915_drv.h" #include "i915_pvinfo.h" -void i915_check_vgpu(struct drm_i915_private *dev_priv); +void i915_detect_vgpu(struct drm_i915_private *dev_priv); bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv); @@ -42,7 +43,7 @@ intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv) return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT; } -int intel_vgt_balloon(struct drm_i915_private *dev_priv); -void intel_vgt_deballoon(struct drm_i915_private *dev_priv); +int intel_vgt_balloon(struct i915_ggtt *ggtt); +void intel_vgt_deballoon(struct i915_ggtt *ggtt); #endif /* _I915_VGPU_H_ */ diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index a57729be8312..eb16a1a93bbc 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -22,11 +22,13 @@ * */ +#include <linux/sched/mm.h> #include <drm/drm_gem.h> #include "display/intel_frontbuffer.h" #include "gt/intel_engine.h" +#include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_globals.h" @@ -77,43 +79,20 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason) #endif -static void obj_bump_mru(struct drm_i915_gem_object *obj) +static inline struct i915_vma *active_to_vma(struct i915_active *ref) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - unsigned long flags; - - spin_lock_irqsave(&i915->mm.obj_lock, flags); - list_move_tail(&obj->mm.link, &i915->mm.shrink_list); - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); + return container_of(ref, typeof(struct i915_vma), active); +} - obj->mm.dirty = true; /* be paranoid */ +static int __i915_vma_active(struct i915_active *ref) +{ + i915_vma_get(active_to_vma(ref)); + return 0; } static void __i915_vma_retire(struct i915_active *ref) { - struct i915_vma *vma = container_of(ref, typeof(*vma), active); - struct drm_i915_gem_object *obj = vma->obj; - - GEM_BUG_ON(!i915_gem_object_is_active(obj)); - if (--obj->active_count) - return; - - /* Prune the shared fence arrays iff completely idle (inc. external) */ - if (reservation_object_trylock(obj->base.resv)) { - if (reservation_object_test_signaled_rcu(obj->base.resv, true)) - reservation_object_add_excl_fence(obj->base.resv, NULL); - reservation_object_unlock(obj->base.resv); - } - - /* - * Bump our place on the bound list to keep it roughly in LRU order - * so that we don't steal from recently used but inactive objects - * (unless we are forced to ofc!) - */ - if (i915_gem_object_is_shrinkable(obj)) - obj_bump_mru(obj); - - i915_gem_object_put(obj); /* and drop the active reference */ + i915_vma_put(active_to_vma(ref)); } static struct i915_vma * @@ -125,7 +104,7 @@ vma_create(struct drm_i915_gem_object *obj, struct rb_node *rb, **p; /* The aliasing_ppgtt should never be used directly! */ - GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); + GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); vma = i915_vma_alloc(); if (vma == NULL) @@ -138,9 +117,17 @@ vma_create(struct drm_i915_gem_object *obj, vma->size = obj->base.size; vma->display_alignment = I915_GTT_MIN_ALIGNMENT; - i915_active_init(vm->i915, &vma->active, __i915_vma_retire); + i915_active_init(vm->i915, &vma->active, + __i915_vma_active, __i915_vma_retire); INIT_ACTIVE_REQUEST(&vma->last_fence); + /* Declare ourselves safe for use inside shrinkers */ + if (IS_ENABLED(CONFIG_LOCKDEP)) { + fs_reclaim_acquire(GFP_KERNEL); + might_lock(&vma->active.mutex); + fs_reclaim_release(GFP_KERNEL); + } + INIT_LIST_HEAD(&vma->closed_link); if (view && view->type != I915_GGTT_VIEW_NORMAL) { @@ -408,7 +395,7 @@ void i915_vma_flush_writes(struct i915_vma *vma) if (!i915_vma_has_ggtt_write(vma)) return; - i915_gem_flush_ggtt_writes(vma->vm->i915); + intel_gt_flush_ggtt_writes(vma->vm->gt); i915_vma_unset_ggtt_write(vma); } @@ -921,6 +908,7 @@ int i915_vma_move_to_active(struct i915_vma *vma, unsigned int flags) { struct drm_i915_gem_object *obj = vma->obj; + int err; assert_vma_held(vma); assert_object_held(obj); @@ -934,17 +922,9 @@ int i915_vma_move_to_active(struct i915_vma *vma, * add the active reference first and queue for it to be dropped * *last*. */ - if (!vma->active.count && !obj->active_count++) - i915_gem_object_get(obj); /* once more for the active ref */ - - if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) { - if (!vma->active.count && !--obj->active_count) - i915_gem_object_put(obj); - return -ENOMEM; - } - - GEM_BUG_ON(!i915_vma_is_active(vma)); - GEM_BUG_ON(!obj->active_count); + err = i915_active_ref(&vma->active, rq->fence.context, rq); + if (unlikely(err)) + return err; obj->write_domain = 0; if (flags & EXEC_OBJECT_WRITE) { @@ -956,11 +936,14 @@ int i915_vma_move_to_active(struct i915_vma *vma, obj->read_domains = 0; } obj->read_domains |= I915_GEM_GPU_DOMAINS; + obj->mm.dirty = true; if (flags & EXEC_OBJECT_NEEDS_FENCE) __i915_active_request_set(&vma->last_fence, rq); export_fence(vma, rq, flags); + + GEM_BUG_ON(!i915_vma_is_active(vma)); return 0; } diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 7135d8dc32a7..f99c9fd497b2 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -58,6 +58,7 @@ static const char * const platform_names[] = { PLATFORM_NAME(CANNONLAKE), PLATFORM_NAME(ICELAKE), PLATFORM_NAME(ELKHARTLAKE), + PLATFORM_NAME(TIGERLAKE), }; #undef PLATFORM_NAME @@ -929,35 +930,28 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) } } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) { u32 dfsm = I915_READ(SKL_DFSM); - u8 disabled_mask = 0; - bool invalid; - int num_bits; + u8 enabled_mask = BIT(info->num_pipes) - 1; if (dfsm & SKL_DFSM_PIPE_A_DISABLE) - disabled_mask |= BIT(PIPE_A); + enabled_mask &= ~BIT(PIPE_A); if (dfsm & SKL_DFSM_PIPE_B_DISABLE) - disabled_mask |= BIT(PIPE_B); + enabled_mask &= ~BIT(PIPE_B); if (dfsm & SKL_DFSM_PIPE_C_DISABLE) - disabled_mask |= BIT(PIPE_C); - - num_bits = hweight8(disabled_mask); - - switch (disabled_mask) { - case BIT(PIPE_A): - case BIT(PIPE_B): - case BIT(PIPE_A) | BIT(PIPE_B): - case BIT(PIPE_A) | BIT(PIPE_C): - invalid = true; - break; - default: - invalid = false; - } + enabled_mask &= ~BIT(PIPE_C); + if (INTEL_GEN(dev_priv) >= 12 && + (dfsm & TGL_DFSM_PIPE_D_DISABLE)) + enabled_mask &= ~BIT(PIPE_D); - if (num_bits > info->num_pipes || invalid) - DRM_ERROR("invalid pipe fuse configuration: 0x%x\n", - disabled_mask); + /* + * At least one pipe should be enabled and if there are + * disabled pipes, they should be the last ones, with no holes + * in the mask. + */ + if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1)) + DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n", + enabled_mask); else - info->num_pipes -= num_bits; + info->num_pipes = hweight8(enabled_mask); } /* Initialize slice/subslice/EU info */ diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index ddafc819bf30..4f58e8d71b67 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -78,6 +78,8 @@ enum intel_platform { /* gen11 */ INTEL_ICELAKE, INTEL_ELKHARTLAKE, + /* gen12 */ + INTEL_TIGERLAKE, INTEL_MAX_PLATFORMS }; @@ -110,7 +112,7 @@ enum intel_ppgtt_type { func(gpu_reset_clobbers_display); \ func(has_reset_engine); \ func(has_fpga_dbg); \ - func(has_guc); \ + func(has_gt_uc); \ func(has_l3_dpf); \ func(has_llc); \ func(has_logical_ring_contexts); \ @@ -136,6 +138,7 @@ enum intel_ppgtt_type { func(has_gmch); \ func(has_hotplug); \ func(has_ipc); \ + func(has_modular_fia); \ func(has_overlay); \ func(has_psr); \ func(overlay_needs_physical); \ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1d58f7ec5d84..c4016164c34e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -101,20 +101,30 @@ struct intel_fbdev { struct mutex hpd_lock; }; +enum intel_hotplug_state { + INTEL_HOTPLUG_UNCHANGED, + INTEL_HOTPLUG_CHANGED, + INTEL_HOTPLUG_RETRY, +}; + struct intel_encoder { struct drm_encoder base; enum intel_output_type type; enum port port; unsigned int cloneable; - bool (*hotplug)(struct intel_encoder *encoder, - struct intel_connector *connector); + enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder, + struct intel_connector *connector, + bool irq_received); enum intel_output_type (*compute_output_type)(struct intel_encoder *, struct intel_crtc_state *, struct drm_connector_state *); int (*compute_config)(struct intel_encoder *, struct intel_crtc_state *, struct drm_connector_state *); + void (*update_prepare)(struct intel_atomic_state *, + struct intel_encoder *, + struct intel_crtc *); void (*pre_pll_enable)(struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); @@ -124,6 +134,9 @@ struct intel_encoder { void (*enable)(struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); + void (*update_complete)(struct intel_atomic_state *, + struct intel_encoder *, + struct intel_crtc *); void (*disable)(struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); @@ -812,6 +825,15 @@ struct intel_crtc_state { /* Actual register state of the dpll, for shared dpll cross-checking. */ struct intel_dpll_hw_state dpll_hw_state; + /* + * ICL reserved DPLLs for the CRTC/port. The active PLL is selected by + * setting shared_dpll and dpll_hw_state to one of these reserved ones. + */ + struct icl_port_dpll { + struct intel_shared_dpll *pll; + struct intel_dpll_hw_state hw_state; + } icl_port_dplls[ICL_PORT_DPLL_COUNT]; + /* DSI PLL registers */ struct { u32 ctrl, div; @@ -1224,8 +1246,13 @@ struct intel_digital_port { /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */ enum aux_ch aux_ch; enum intel_display_power_domain ddi_io_power_domain; + struct mutex tc_lock; /* protects the TypeC port mode */ + intel_wakeref_t tc_lock_wakeref; + int tc_link_refcount; bool tc_legacy_port:1; - enum tc_port_type tc_type; + char tc_port_name[8]; + enum tc_port_mode tc_mode; + enum phy_fia tc_phy_fia; void (*write_infoframe)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -1473,8 +1500,8 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv); void intel_encoder_destroy(struct drm_encoder *encoder); struct drm_display_mode * intel_encoder_current_mode(struct intel_encoder *encoder); -bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port); -bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port); +bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy); +bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy); enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port); int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c deleted file mode 100644 index 72cdafd9636a..000000000000 --- a/drivers/gpu/drm/i915/intel_guc_fw.c +++ /dev/null @@ -1,308 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Authors: - * Vinit Azad <vinit.azad@intel.com> - * Ben Widawsky <ben@bwidawsk.net> - * Dave Gordon <david.s.gordon@intel.com> - * Alex Dai <yu.dai@intel.com> - */ - -#include "intel_guc_fw.h" -#include "i915_drv.h" - -#define __MAKE_GUC_FW_PATH(KEY) \ - "i915/" \ - __stringify(KEY##_GUC_FW_PREFIX) "_guc_" \ - __stringify(KEY##_GUC_FW_MAJOR) "." \ - __stringify(KEY##_GUC_FW_MINOR) "." \ - __stringify(KEY##_GUC_FW_PATCH) ".bin" - -#define SKL_GUC_FW_PREFIX skl -#define SKL_GUC_FW_MAJOR 32 -#define SKL_GUC_FW_MINOR 0 -#define SKL_GUC_FW_PATCH 3 -#define SKL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(SKL) -MODULE_FIRMWARE(SKL_GUC_FIRMWARE_PATH); - -#define BXT_GUC_FW_PREFIX bxt -#define BXT_GUC_FW_MAJOR 32 -#define BXT_GUC_FW_MINOR 0 -#define BXT_GUC_FW_PATCH 3 -#define BXT_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(BXT) -MODULE_FIRMWARE(BXT_GUC_FIRMWARE_PATH); - -#define KBL_GUC_FW_PREFIX kbl -#define KBL_GUC_FW_MAJOR 32 -#define KBL_GUC_FW_MINOR 0 -#define KBL_GUC_FW_PATCH 3 -#define KBL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(KBL) -MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH); - -#define GLK_GUC_FW_PREFIX glk -#define GLK_GUC_FW_MAJOR 32 -#define GLK_GUC_FW_MINOR 0 -#define GLK_GUC_FW_PATCH 3 -#define GLK_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(GLK) -MODULE_FIRMWARE(GLK_GUC_FIRMWARE_PATH); - -#define ICL_GUC_FW_PREFIX icl -#define ICL_GUC_FW_MAJOR 32 -#define ICL_GUC_FW_MINOR 0 -#define ICL_GUC_FW_PATCH 3 -#define ICL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(ICL) -MODULE_FIRMWARE(ICL_GUC_FIRMWARE_PATH); - -static void guc_fw_select(struct intel_uc_fw *guc_fw) -{ - struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); - struct drm_i915_private *i915 = guc_to_i915(guc); - - GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); - - if (!HAS_GUC(i915)) - return; - - if (i915_modparams.guc_firmware_path) { - guc_fw->path = i915_modparams.guc_firmware_path; - guc_fw->major_ver_wanted = 0; - guc_fw->minor_ver_wanted = 0; - } else if (IS_ICELAKE(i915)) { - guc_fw->path = ICL_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = ICL_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = ICL_GUC_FW_MINOR; - } else if (IS_GEMINILAKE(i915)) { - guc_fw->path = GLK_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = GLK_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = GLK_GUC_FW_MINOR; - } else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) { - guc_fw->path = KBL_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = KBL_GUC_FW_MINOR; - } else if (IS_BROXTON(i915)) { - guc_fw->path = BXT_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = BXT_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = BXT_GUC_FW_MINOR; - } else if (IS_SKYLAKE(i915)) { - guc_fw->path = SKL_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR; - } -} - -/** - * intel_guc_fw_init_early() - initializes GuC firmware struct - * @guc: intel_guc struct - * - * On platforms with GuC selects firmware for uploading - */ -void intel_guc_fw_init_early(struct intel_guc *guc) -{ - struct intel_uc_fw *guc_fw = &guc->fw; - - intel_uc_fw_init_early(guc_fw, INTEL_UC_FW_TYPE_GUC); - guc_fw_select(guc_fw); -} - -static void guc_prepare_xfer(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - - /* Must program this register before loading the ucode with DMA */ - I915_WRITE(GUC_SHIM_CONTROL, GUC_DISABLE_SRAM_INIT_TO_ZEROES | - GUC_ENABLE_READ_CACHE_LOGIC | - GUC_ENABLE_MIA_CACHING | - GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | - GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | - GUC_ENABLE_MIA_CLOCK_GATING); - - if (IS_GEN9_LP(dev_priv)) - I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE); - else - I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); - - if (IS_GEN(dev_priv, 9)) { - /* DOP Clock Gating Enable for GuC clocks */ - I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE | - I915_READ(GEN7_MISCCPCTL))); - - /* allows for 5us (in 10ns units) before GT can go to RC6 */ - I915_WRITE(GUC_ARAT_C6DIS, 0x1FF); - } -} - -/* Copy RSA signature from the fw image to HW for verification */ -static void guc_xfer_rsa(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_uc_fw *fw = &guc->fw; - struct sg_table *pages = fw->obj->mm.pages; - u32 rsa[UOS_RSA_SCRATCH_COUNT]; - int i; - - sg_pcopy_to_buffer(pages->sgl, pages->nents, - rsa, sizeof(rsa), fw->rsa_offset); - - for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) - I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); -} - -static bool guc_xfer_completed(struct intel_guc *guc, u32 *status) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - - /* Did we complete the xfer? */ - *status = I915_READ(DMA_CTRL); - return !(*status & START_DMA); -} - -/* - * Read the GuC status register (GUC_STATUS) and store it in the - * specified location; then return a boolean indicating whether - * the value matches either of two values representing completion - * of the GuC boot process. - * - * This is used for polling the GuC status in a wait_for() - * loop below. - */ -static inline bool guc_ready(struct intel_guc *guc, u32 *status) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - u32 val = I915_READ(GUC_STATUS); - u32 uk_val = val & GS_UKERNEL_MASK; - - *status = val; - return (uk_val == GS_UKERNEL_READY) || - ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE)); -} - -static int guc_wait_ucode(struct intel_guc *guc) -{ - u32 status; - int ret; - - /* - * Wait for the GuC to start up. - * NB: Docs recommend not using the interrupt for completion. - * Measurements indicate this should take no more than 20ms, so a - * timeout here indicates that the GuC has failed and is unusable. - * (Higher levels of the driver may decide to reset the GuC and - * attempt the ucode load again if this happens.) - */ - ret = wait_for(guc_ready(guc, &status), 100); - DRM_DEBUG_DRIVER("GuC status %#x\n", status); - - if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { - DRM_ERROR("GuC firmware signature verification failed\n"); - ret = -ENOEXEC; - } - - if (ret == 0 && !guc_xfer_completed(guc, &status)) { - DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n", - status); - ret = -ENXIO; - } - - return ret; -} - -/* - * Transfer the firmware image to RAM for execution by the microcontroller. - * - * Architecturally, the DMA engine is bidirectional, and can potentially even - * transfer between GTT locations. This functionality is left out of the API - * for now as there is no need for it. - */ -static int guc_xfer_ucode(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_uc_fw *guc_fw = &guc->fw; - unsigned long offset; - - /* - * The header plus uCode will be copied to WOPCM via DMA, excluding any - * other components - */ - I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); - - /* Set the source address for the new blob */ - offset = intel_uc_fw_ggtt_offset(guc_fw) + guc_fw->header_offset; - I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); - I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); - - /* - * Set the DMA destination. Current uCode expects the code to be - * loaded at 8k; locations below this are used for the stack. - */ - I915_WRITE(DMA_ADDR_1_LOW, 0x2000); - I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); - - /* Finally start the DMA */ - I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); - - return guc_wait_ucode(guc); -} -/* - * Load the GuC firmware blob into the MinuteIA. - */ -static int guc_fw_xfer(struct intel_uc_fw *guc_fw) -{ - struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); - struct drm_i915_private *dev_priv = guc_to_i915(guc); - int ret; - - GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); - - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - guc_prepare_xfer(guc); - - /* - * Note that GuC needs the CSS header plus uKernel code to be copied - * by the DMA engine in one operation, whereas the RSA signature is - * loaded via MMIO. - */ - guc_xfer_rsa(guc); - - ret = guc_xfer_ucode(guc); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - - return ret; -} - -/** - * intel_guc_fw_upload() - load GuC uCode to device - * @guc: intel_guc structure - * - * Called from intel_uc_init_hw() during driver load, resume from sleep and - * after a GPU reset. - * - * The firmware image should have already been fetched into memory, so only - * check that fetch succeeded, and then transfer the image to the h/w. - * - * Return: non-zero code on error - */ -int intel_guc_fw_upload(struct intel_guc *guc) -{ - return intel_uc_fw_upload(&guc->fw, guc_fw_xfer); -} diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 1d7d26e4cf14..c66b2d8a6219 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -95,7 +95,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) { int ret; - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -ENODEV; if (!i915_modparams.enable_gvt) { @@ -122,13 +122,14 @@ bail: } /** - * intel_gvt_cleanup - cleanup GVT components when i915 driver is unloading + * intel_gvt_driver_remove - cleanup GVT components when i915 driver is + * unbinding * @dev_priv: drm i915 private * * * This function is called at the i915 driver unloading stage, to shutdown * GVT components and release the related resources. */ -void intel_gvt_cleanup(struct drm_i915_private *dev_priv) +void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) { if (!intel_gvt_active(dev_priv)) return; diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h index 61b246470282..502fad8a8652 100644 --- a/drivers/gpu/drm/i915/intel_gvt.h +++ b/drivers/gpu/drm/i915/intel_gvt.h @@ -24,11 +24,11 @@ #ifndef _INTEL_GVT_H_ #define _INTEL_GVT_H_ -struct intel_gvt; +struct drm_i915_private; #ifdef CONFIG_DRM_I915_GVT int intel_gvt_init(struct drm_i915_private *dev_priv); -void intel_gvt_cleanup(struct drm_i915_private *dev_priv); +void intel_gvt_driver_remove(struct drm_i915_private *dev_priv); int intel_gvt_init_device(struct drm_i915_private *dev_priv); void intel_gvt_clean_device(struct drm_i915_private *dev_priv); int intel_gvt_init_host(void); @@ -38,7 +38,8 @@ static inline int intel_gvt_init(struct drm_i915_private *dev_priv) { return 0; } -static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv) + +static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) { } diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c deleted file mode 100644 index 05cbf8338f53..000000000000 --- a/drivers/gpu/drm/i915/intel_huc_fw.c +++ /dev/null @@ -1,215 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2014-2018 Intel Corporation - */ - -#include "intel_huc_fw.h" -#include "i915_drv.h" - -/** - * DOC: HuC Firmware - * - * Motivation: - * GEN9 introduces a new dedicated firmware for usage in media HEVC (High - * Efficiency Video Coding) operations. Userspace can use the firmware - * capabilities by adding HuC specific commands to batch buffers. - * - * Implementation: - * The same firmware loader is used as the GuC. However, the actual - * loading to HW is deferred until GEM initialization is done. - * - * Note that HuC firmware loading must be done before GuC loading. - */ - -#define BXT_HUC_FW_MAJOR 01 -#define BXT_HUC_FW_MINOR 8 -#define BXT_BLD_NUM 2893 - -#define SKL_HUC_FW_MAJOR 01 -#define SKL_HUC_FW_MINOR 07 -#define SKL_BLD_NUM 1398 - -#define KBL_HUC_FW_MAJOR 02 -#define KBL_HUC_FW_MINOR 00 -#define KBL_BLD_NUM 1810 - -#define GLK_HUC_FW_MAJOR 03 -#define GLK_HUC_FW_MINOR 01 -#define GLK_BLD_NUM 2893 - -#define ICL_HUC_FW_MAJOR 8 -#define ICL_HUC_FW_MINOR 4 -#define ICL_BLD_NUM 3238 - -#define HUC_FW_PATH(platform, major, minor, bld_num) \ - "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \ - __stringify(minor) "_" __stringify(bld_num) ".bin" - -#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \ - SKL_HUC_FW_MINOR, SKL_BLD_NUM) -MODULE_FIRMWARE(I915_SKL_HUC_UCODE); - -#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \ - BXT_HUC_FW_MINOR, BXT_BLD_NUM) -MODULE_FIRMWARE(I915_BXT_HUC_UCODE); - -#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \ - KBL_HUC_FW_MINOR, KBL_BLD_NUM) -MODULE_FIRMWARE(I915_KBL_HUC_UCODE); - -#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \ - GLK_HUC_FW_MINOR, GLK_BLD_NUM) -MODULE_FIRMWARE(I915_GLK_HUC_UCODE); - -#define I915_ICL_HUC_UCODE HUC_FW_PATH(icl, ICL_HUC_FW_MAJOR, \ - ICL_HUC_FW_MINOR, ICL_BLD_NUM) -MODULE_FIRMWARE(I915_ICL_HUC_UCODE); - -static void huc_fw_select(struct intel_uc_fw *huc_fw) -{ - struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); - struct drm_i915_private *dev_priv = huc_to_i915(huc); - - GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); - - if (!HAS_HUC(dev_priv)) - return; - - if (i915_modparams.huc_firmware_path) { - huc_fw->path = i915_modparams.huc_firmware_path; - huc_fw->major_ver_wanted = 0; - huc_fw->minor_ver_wanted = 0; - } else if (IS_SKYLAKE(dev_priv)) { - huc_fw->path = I915_SKL_HUC_UCODE; - huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR; - } else if (IS_BROXTON(dev_priv)) { - huc_fw->path = I915_BXT_HUC_UCODE; - huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR; - } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { - huc_fw->path = I915_KBL_HUC_UCODE; - huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR; - } else if (IS_GEMINILAKE(dev_priv)) { - huc_fw->path = I915_GLK_HUC_UCODE; - huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR; - } else if (IS_ICELAKE(dev_priv)) { - huc_fw->path = I915_ICL_HUC_UCODE; - huc_fw->major_ver_wanted = ICL_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = ICL_HUC_FW_MINOR; - } -} - -/** - * intel_huc_fw_init_early() - initializes HuC firmware struct - * @huc: intel_huc struct - * - * On platforms with HuC selects firmware for uploading - */ -void intel_huc_fw_init_early(struct intel_huc *huc) -{ - struct intel_uc_fw *huc_fw = &huc->fw; - - intel_uc_fw_init_early(huc_fw, INTEL_UC_FW_TYPE_HUC); - huc_fw_select(huc_fw); -} - -static void huc_xfer_rsa(struct intel_huc *huc) -{ - struct intel_uc_fw *fw = &huc->fw; - struct sg_table *pages = fw->obj->mm.pages; - - /* - * HuC firmware image is outside GuC accessible range. - * Copy the RSA signature out of the image into - * the perma-pinned region set aside for it - */ - sg_pcopy_to_buffer(pages->sgl, pages->nents, - huc->rsa_data_vaddr, fw->rsa_size, - fw->rsa_offset); -} - -static int huc_xfer_ucode(struct intel_huc *huc) -{ - struct intel_uc_fw *huc_fw = &huc->fw; - struct drm_i915_private *dev_priv = huc_to_i915(huc); - struct intel_uncore *uncore = &dev_priv->uncore; - unsigned long offset = 0; - u32 size; - int ret; - - GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); - - intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - - /* Set the source address for the uCode */ - offset = intel_uc_fw_ggtt_offset(huc_fw) + - huc_fw->header_offset; - intel_uncore_write(uncore, DMA_ADDR_0_LOW, - lower_32_bits(offset)); - intel_uncore_write(uncore, DMA_ADDR_0_HIGH, - upper_32_bits(offset) & 0xFFFF); - - /* - * Hardware doesn't look at destination address for HuC. Set it to 0, - * but still program the correct address space. - */ - intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0); - intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); - - size = huc_fw->header_size + huc_fw->ucode_size; - intel_uncore_write(uncore, DMA_COPY_SIZE, size); - - /* Start the DMA */ - intel_uncore_write(uncore, DMA_CTRL, - _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA)); - - /* Wait for DMA to finish */ - ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100); - - DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret); - - /* Disable the bits once DMA is over */ - intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL)); - - intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); - - return ret; -} - -/** - * huc_fw_xfer() - DMA's the firmware - * @huc_fw: the firmware descriptor - * - * Transfer the firmware image to RAM for execution by the microcontroller. - * - * Return: 0 on success, non-zero on failure - */ -static int huc_fw_xfer(struct intel_uc_fw *huc_fw) -{ - struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); - - huc_xfer_rsa(huc); - - return huc_xfer_ucode(huc); -} - -/** - * intel_huc_fw_upload() - load HuC uCode to device - * @huc: intel_huc structure - * - * Called from intel_uc_init_hw() during driver load, resume from sleep and - * after a GPU reset. Note that HuC must be loaded before GuC. - * - * The firmware image should have already been fetched into memory, so only - * check that fetch succeeded, and then transfer the image to the h/w. - * - * Return: non-zero code on error - */ -int intel_huc_fw_upload(struct intel_huc *huc) -{ - return intel_uc_fw_upload(&huc->fw, huc_fw_xfer); -} diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d9a7a13ce32a..30399b245f07 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1116,6 +1116,8 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; + cpp = plane_state->base.fb->format->cpp[0]; + /* * Not 100% sure which way ELK should go here as the * spec only says CL/CTG should assume 32bpp and BW @@ -1129,9 +1131,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, */ if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY && level != G4X_WM_LEVEL_NORMAL) - cpp = 4; - else - cpp = plane_state->base.fb->format->cpp[0]; + cpp = max(cpp, 4u); clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; @@ -1198,8 +1198,8 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, return dirty; } -static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, +static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, u32 pri_val); static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, @@ -1566,13 +1566,13 @@ static void g4x_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); if (!crtc_state->wm.need_postvbl_update) return; mutex_lock(&dev_priv->wm.wm_mutex); - intel_crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; + crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; g4x_program_watermarks(dev_priv); mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -2185,13 +2185,13 @@ static void vlv_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); if (!crtc_state->wm.need_postvbl_update) return; mutex_lock(&dev_priv->wm.wm_mutex); - intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; + crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; vlv_program_watermarks(dev_priv); mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -2493,8 +2493,8 @@ struct ilk_wm_maximums { * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ -static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, +static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, u32 mem_value, bool is_lp) { u32 method1, method2; @@ -2503,19 +2503,19 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate, if (mem_value == 0) return U32_MAX; - if (!intel_wm_plane_visible(cstate, pstate)) + if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; - cpp = pstate->base.fb->format->cpp[0]; + cpp = plane_state->base.fb->format->cpp[0]; - method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value); + method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); if (!is_lp) return method1; - method2 = ilk_wm_method2(cstate->pixel_rate, - cstate->base.adjusted_mode.crtc_htotal, - drm_rect_width(&pstate->base.dst), + method2 = ilk_wm_method2(crtc_state->pixel_rate, + crtc_state->base.adjusted_mode.crtc_htotal, + drm_rect_width(&plane_state->base.dst), cpp, mem_value); return min(method1, method2); @@ -2525,8 +2525,8 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate, * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ -static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, +static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, u32 mem_value) { u32 method1, method2; @@ -2535,15 +2535,15 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate, if (mem_value == 0) return U32_MAX; - if (!intel_wm_plane_visible(cstate, pstate)) + if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; - cpp = pstate->base.fb->format->cpp[0]; + cpp = plane_state->base.fb->format->cpp[0]; - method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value); - method2 = ilk_wm_method2(cstate->pixel_rate, - cstate->base.adjusted_mode.crtc_htotal, - drm_rect_width(&pstate->base.dst), + method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); + method2 = ilk_wm_method2(crtc_state->pixel_rate, + crtc_state->base.adjusted_mode.crtc_htotal, + drm_rect_width(&plane_state->base.dst), cpp, mem_value); return min(method1, method2); } @@ -2552,8 +2552,8 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate, * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ -static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, +static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, u32 mem_value) { int cpp; @@ -2561,29 +2561,29 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate, if (mem_value == 0) return U32_MAX; - if (!intel_wm_plane_visible(cstate, pstate)) + if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; - cpp = pstate->base.fb->format->cpp[0]; + cpp = plane_state->base.fb->format->cpp[0]; - return ilk_wm_method2(cstate->pixel_rate, - cstate->base.adjusted_mode.crtc_htotal, - pstate->base.crtc_w, cpp, mem_value); + return ilk_wm_method2(crtc_state->pixel_rate, + crtc_state->base.adjusted_mode.crtc_htotal, + plane_state->base.crtc_w, cpp, mem_value); } /* Only for WM_LP. */ -static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, +static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, u32 pri_val) { int cpp; - if (!intel_wm_plane_visible(cstate, pstate)) + if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; - cpp = pstate->base.fb->format->cpp[0]; + cpp = plane_state->base.fb->format->cpp[0]; - return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp); + return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->base.dst), cpp); } static unsigned int @@ -2752,7 +2752,7 @@ static bool ilk_validate_wm_level(int level, static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, const struct intel_crtc *intel_crtc, int level, - struct intel_crtc_state *cstate, + struct intel_crtc_state *crtc_state, const struct intel_plane_state *pristate, const struct intel_plane_state *sprstate, const struct intel_plane_state *curstate, @@ -2770,30 +2770,30 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, } if (pristate) { - result->pri_val = ilk_compute_pri_wm(cstate, pristate, + result->pri_val = ilk_compute_pri_wm(crtc_state, pristate, pri_latency, level); - result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val); + result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val); } if (sprstate) - result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency); + result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency); if (curstate) - result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency); + result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency); result->enable = true; } static u32 -hsw_compute_linetime_wm(const struct intel_crtc_state *cstate) +hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state) { const struct intel_atomic_state *intel_state = - to_intel_atomic_state(cstate->base.state); + to_intel_atomic_state(crtc_state->base.state); const struct drm_display_mode *adjusted_mode = - &cstate->base.adjusted_mode; + &crtc_state->base.adjusted_mode; u32 linetime, ips_linetime; - if (!cstate->base.active) + if (!crtc_state->base.active) return 0; if (WARN_ON(adjusted_mode->crtc_clock == 0)) return 0; @@ -3101,10 +3101,10 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, } /* Compute new watermarks for the pipe */ -static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) +static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) { - struct drm_atomic_state *state = cstate->base.state; - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct drm_atomic_state *state = crtc_state->base.state; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct intel_pipe_wm *pipe_wm; struct drm_device *dev = state->dev; const struct drm_i915_private *dev_priv = to_i915(dev); @@ -3116,9 +3116,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) int level, max_level = ilk_wm_max_level(dev_priv), usable_level; struct ilk_wm_maximums max; - pipe_wm = &cstate->wm.ilk.optimal; + pipe_wm = &crtc_state->wm.ilk.optimal; - drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) { + drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &crtc_state->base) { const struct intel_plane_state *ps = to_intel_plane_state(plane_state); if (plane->type == DRM_PLANE_TYPE_PRIMARY) @@ -3129,7 +3129,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) curstate = ps; } - pipe_wm->pipe_enabled = cstate->base.active; + pipe_wm->pipe_enabled = crtc_state->base.active; if (sprstate) { pipe_wm->sprites_enabled = sprstate->base.visible; pipe_wm->sprites_scaled = sprstate->base.visible && @@ -3148,11 +3148,11 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) usable_level = 0; memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); - ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, + ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state, pristate, sprstate, curstate, &pipe_wm->wm[0]); if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - pipe_wm->linetime = hsw_compute_linetime_wm(cstate); + pipe_wm->linetime = hsw_compute_linetime_wm(crtc_state); if (!ilk_validate_pipe_wm(dev_priv, pipe_wm)) return -EINVAL; @@ -3162,7 +3162,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) for (level = 1; level <= usable_level; level++) { struct intel_wm_level *wm = &pipe_wm->wm[level]; - ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, + ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state, pristate, sprstate, curstate, wm); /* @@ -3736,14 +3736,13 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) return 0; } -bool intel_can_enable_sagv(struct drm_atomic_state *state) +bool intel_can_enable_sagv(struct intel_atomic_state *state) { - struct drm_device *dev = state->dev; + struct drm_device *dev = state->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct intel_crtc *crtc; struct intel_plane *plane; - struct intel_crtc_state *cstate; + struct intel_crtc_state *crtc_state; enum pipe pipe; int level, latency; int sagv_block_time_us; @@ -3761,27 +3760,27 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) /* * If there are no active CRTCs, no additional checks need be performed */ - if (hweight32(intel_state->active_crtcs) == 0) + if (hweight32(state->active_crtcs) == 0) return true; /* * SKL+ workaround: bspec recommends we disable SAGV when we have * more then one pipe enabled */ - if (hweight32(intel_state->active_crtcs) > 1) + if (hweight32(state->active_crtcs) > 1) return false; /* Since we're now guaranteed to only have one active CRTC... */ - pipe = ffs(intel_state->active_crtcs) - 1; + pipe = ffs(state->active_crtcs) - 1; crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - cstate = to_intel_crtc_state(crtc->base.state); + crtc_state = to_intel_crtc_state(crtc->base.state); if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) return false; for_each_intel_plane_on_crtc(dev, crtc, plane) { struct skl_plane_wm *wm = - &cstate->wm.skl.optimal.planes[plane->id]; + &crtc_state->wm.skl.optimal.planes[plane->id]; /* Skip this plane if it's not enabled */ if (!wm->wm[0].plane_en) @@ -3812,7 +3811,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) } static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *cstate, + const struct intel_crtc_state *crtc_state, const u64 total_data_rate, const int num_active, struct skl_ddb_allocation *ddb) @@ -3826,7 +3825,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, if (INTEL_GEN(dev_priv) < 11) return ddb_size - 4; /* 4 blocks for bypass path allocation */ - adjusted_mode = &cstate->base.adjusted_mode; + adjusted_mode = &crtc_state->base.adjusted_mode; total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode); /* @@ -3849,23 +3848,22 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, static void skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *cstate, + const struct intel_crtc_state *crtc_state, const u64 total_data_rate, struct skl_ddb_allocation *ddb, struct skl_ddb_entry *alloc, /* out */ int *num_active /* out */) { - struct drm_atomic_state *state = cstate->base.state; + struct drm_atomic_state *state = crtc_state->base.state; struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_crtc *for_crtc = cstate->base.crtc; - const struct drm_crtc_state *crtc_state; - const struct drm_crtc *crtc; + struct drm_crtc *for_crtc = crtc_state->base.crtc; + const struct intel_crtc *crtc; u32 pipe_width = 0, total_width = 0, width_before_pipe = 0; enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe; u16 ddb_size; u32 i; - if (WARN_ON(!state) || !cstate->base.active) { + if (WARN_ON(!state) || !crtc_state->base.active) { alloc->start = 0; alloc->end = 0; *num_active = hweight32(dev_priv->active_crtcs); @@ -3877,7 +3875,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, else *num_active = hweight32(dev_priv->active_crtcs); - ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate, + ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate, *num_active, ddb); /* @@ -3902,16 +3900,15 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, * framebuffer, So instead of allocating DDB equally among pipes * distribute DDB based on resolution/width of the display. */ - for_each_new_crtc_in_state(state, crtc, crtc_state, i) { - const struct drm_display_mode *adjusted_mode; + for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + enum pipe pipe = crtc->pipe; int hdisplay, vdisplay; - enum pipe pipe; - if (!crtc_state->enable) + if (!crtc_state->base.enable) continue; - pipe = to_intel_crtc(crtc)->pipe; - adjusted_mode = &crtc_state->adjusted_mode; drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay); total_width += hdisplay; @@ -3930,7 +3927,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, u64 modifier, unsigned int rotation, u32 plane_pixel_rate, struct skl_wm_params *wp, int color_plane); -static void skl_compute_plane_wm(const struct intel_crtc_state *cstate, +static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, int level, const struct skl_wm_params *wp, const struct skl_wm_level *result_prev, @@ -4062,15 +4059,15 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, * Caller should take care of dividing & rounding off the value. */ static uint_fixed_16_16_t -skl_plane_downscale_amount(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate) +skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(pstate->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); u32 src_w, src_h, dst_w, dst_h; uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; uint_fixed_16_16_t downscale_h, downscale_w; - if (WARN_ON(!intel_wm_plane_visible(cstate, pstate))) + if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state))) return u32_to_fixed16(0); /* n.b., src is 16.16 fixed point, dst is whole integer */ @@ -4079,20 +4076,20 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate, * Cursors only support 0/180 degree rotation, * hence no need to account for rotation here. */ - src_w = pstate->base.src_w >> 16; - src_h = pstate->base.src_h >> 16; - dst_w = pstate->base.crtc_w; - dst_h = pstate->base.crtc_h; + src_w = plane_state->base.src_w >> 16; + src_h = plane_state->base.src_h >> 16; + dst_w = plane_state->base.crtc_w; + dst_h = plane_state->base.crtc_h; } else { /* * Src coordinates are already rotated by 270 degrees for * the 90/270 degree plane rotation cases (to match the * GTT mapping), hence no need to account for rotation here. */ - src_w = drm_rect_width(&pstate->base.src) >> 16; - src_h = drm_rect_height(&pstate->base.src) >> 16; - dst_w = drm_rect_width(&pstate->base.dst); - dst_h = drm_rect_height(&pstate->base.dst); + src_w = drm_rect_width(&plane_state->base.src) >> 16; + src_h = drm_rect_height(&plane_state->base.src) >> 16; + dst_w = drm_rect_width(&plane_state->base.dst); + dst_h = drm_rect_height(&plane_state->base.dst); } fp_w_ratio = div_fixed16(src_w, dst_w); @@ -4137,49 +4134,46 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state) } int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, - struct intel_crtc_state *cstate) + struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - struct drm_crtc_state *crtc_state = &cstate->base; - struct drm_atomic_state *state = crtc_state->state; + struct drm_atomic_state *state = crtc_state->base.state; struct drm_plane *plane; - const struct drm_plane_state *pstate; - struct intel_plane_state *intel_pstate; + const struct drm_plane_state *drm_plane_state; int crtc_clock, dotclk; u32 pipe_max_pixel_rate; uint_fixed_16_16_t pipe_downscale; uint_fixed_16_16_t max_downscale = u32_to_fixed16(1); - if (!cstate->base.enable) + if (!crtc_state->base.enable) return 0; - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { + drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) { uint_fixed_16_16_t plane_downscale; uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8); int bpp; + const struct intel_plane_state *plane_state = + to_intel_plane_state(drm_plane_state); - if (!intel_wm_plane_visible(cstate, - to_intel_plane_state(pstate))) + if (!intel_wm_plane_visible(crtc_state, plane_state)) continue; - if (WARN_ON(!pstate->fb)) + if (WARN_ON(!plane_state->base.fb)) return -EINVAL; - intel_pstate = to_intel_plane_state(pstate); - plane_downscale = skl_plane_downscale_amount(cstate, - intel_pstate); - bpp = pstate->fb->format->cpp[0] * 8; + plane_downscale = skl_plane_downscale_amount(crtc_state, plane_state); + bpp = plane_state->base.fb->format->cpp[0] * 8; if (bpp == 64) plane_downscale = mul_fixed16(plane_downscale, fp_9_div_8); max_downscale = max_fixed16(plane_downscale, max_downscale); } - pipe_downscale = skl_pipe_downscale_amount(cstate); + pipe_downscale = skl_pipe_downscale_amount(crtc_state); pipe_downscale = mul_fixed16(pipe_downscale, max_downscale); - crtc_clock = crtc_state->adjusted_mode.crtc_clock; + crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk; if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) @@ -4196,12 +4190,11 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, } static u64 -skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, - const struct intel_plane_state *intel_pstate, +skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, const int plane) { - struct intel_plane *intel_plane = - to_intel_plane(intel_pstate->base.plane); + struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane); u32 data_rate; u32 width = 0, height = 0; struct drm_framebuffer *fb; @@ -4209,10 +4202,10 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, uint_fixed_16_16_t down_scale_amount; u64 rate; - if (!intel_pstate->base.visible) + if (!plane_state->base.visible) return 0; - fb = intel_pstate->base.fb; + fb = plane_state->base.fb; format = fb->format->format; if (intel_plane->id == PLANE_CURSOR) @@ -4225,8 +4218,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, * the 90/270 degree plane rotation cases (to match the * GTT mapping), hence no need to account for rotation here. */ - width = drm_rect_width(&intel_pstate->base.src) >> 16; - height = drm_rect_height(&intel_pstate->base.src) >> 16; + width = drm_rect_width(&plane_state->base.src) >> 16; + height = drm_rect_height(&plane_state->base.src) >> 16; /* UV plane does 1/2 pixel sub-sampling */ if (plane == 1 && is_planar_yuv_format(format)) { @@ -4236,7 +4229,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, data_rate = width * height; - down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate); + down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state); rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount); @@ -4245,35 +4238,32 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, } static u64 -skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, +skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, u64 *plane_data_rate, u64 *uv_plane_data_rate) { - struct drm_crtc_state *cstate = &intel_cstate->base; - struct drm_atomic_state *state = cstate->state; + struct drm_atomic_state *state = crtc_state->base.state; struct drm_plane *plane; - const struct drm_plane_state *pstate; + const struct drm_plane_state *drm_plane_state; u64 total_data_rate = 0; if (WARN_ON(!state)) return 0; /* Calculate and cache data rate for each plane */ - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { + drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) { enum plane_id plane_id = to_intel_plane(plane)->id; + const struct intel_plane_state *plane_state = + to_intel_plane_state(drm_plane_state); u64 rate; - const struct intel_plane_state *intel_pstate = - to_intel_plane_state(pstate); /* packed/y */ - rate = skl_plane_relative_data_rate(intel_cstate, - intel_pstate, 0); + rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); plane_data_rate[plane_id] = rate; total_data_rate += rate; /* uv-plane */ - rate = skl_plane_relative_data_rate(intel_cstate, - intel_pstate, 1); + rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1); uv_plane_data_rate[plane_id] = rate; total_data_rate += rate; } @@ -4282,28 +4272,25 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, } static u64 -icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, +icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, u64 *plane_data_rate) { - struct drm_crtc_state *cstate = &intel_cstate->base; - struct drm_atomic_state *state = cstate->state; struct drm_plane *plane; - const struct drm_plane_state *pstate; + const struct drm_plane_state *drm_plane_state; u64 total_data_rate = 0; - if (WARN_ON(!state)) + if (WARN_ON(!crtc_state->base.state)) return 0; /* Calculate and cache data rate for each plane */ - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { - const struct intel_plane_state *intel_pstate = - to_intel_plane_state(pstate); + drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) { + const struct intel_plane_state *plane_state = + to_intel_plane_state(drm_plane_state); enum plane_id plane_id = to_intel_plane(plane)->id; u64 rate; - if (!intel_pstate->linked_plane) { - rate = skl_plane_relative_data_rate(intel_cstate, - intel_pstate, 0); + if (!plane_state->linked_plane) { + rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); plane_data_rate[plane_id] = rate; total_data_rate += rate; } else { @@ -4316,18 +4303,16 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, * NULL if we try get_new_plane_state(), so we * always calculate from the master. */ - if (intel_pstate->slave) + if (plane_state->slave) continue; /* Y plane rate is calculated on the slave */ - rate = skl_plane_relative_data_rate(intel_cstate, - intel_pstate, 0); - y_plane_id = intel_pstate->linked_plane->id; + rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); + y_plane_id = plane_state->linked_plane->id; plane_data_rate[y_plane_id] = rate; total_data_rate += rate; - rate = skl_plane_relative_data_rate(intel_cstate, - intel_pstate, 1); + rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1); plane_data_rate[plane_id] = rate; total_data_rate += rate; } @@ -4337,14 +4322,14 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, } static int -skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, +skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, struct skl_ddb_allocation *ddb /* out */) { - struct drm_atomic_state *state = cstate->base.state; - struct drm_crtc *crtc = cstate->base.crtc; + struct drm_atomic_state *state = crtc_state->base.state; + struct drm_crtc *crtc = crtc_state->base.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; + struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb; u16 alloc_size, start = 0; u16 total[I915_MAX_PLANES] = {}; u16 uv_total[I915_MAX_PLANES] = {}; @@ -4357,40 +4342,40 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, int level; /* Clear the partitioning for disabled planes. */ - memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y)); - memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv)); + memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y)); + memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv)); if (WARN_ON(!state)) return 0; - if (!cstate->base.active) { + if (!crtc_state->base.active) { alloc->start = alloc->end = 0; return 0; } if (INTEL_GEN(dev_priv) >= 11) total_data_rate = - icl_get_total_relative_data_rate(cstate, + icl_get_total_relative_data_rate(crtc_state, plane_data_rate); else total_data_rate = - skl_get_total_relative_data_rate(cstate, + skl_get_total_relative_data_rate(crtc_state, plane_data_rate, uv_plane_data_rate); - skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate, + skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate, ddb, alloc, &num_active); alloc_size = skl_ddb_entry_size(alloc); if (alloc_size == 0) return 0; /* Allocate fixed number of blocks for cursor. */ - total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active); + total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active); alloc_size -= total[PLANE_CURSOR]; - cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = + crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - total[PLANE_CURSOR]; - cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end; + crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end; if (total_data_rate == 0) return 0; @@ -4403,7 +4388,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, blocks = 0; for_each_plane_id_on_crtc(intel_crtc, plane_id) { const struct skl_plane_wm *wm = - &cstate->wm.skl.optimal.planes[plane_id]; + &crtc_state->wm.skl.optimal.planes[plane_id]; if (plane_id == PLANE_CURSOR) { if (WARN_ON(wm->wm[level].min_ddb_alloc > @@ -4438,7 +4423,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, */ for_each_plane_id_on_crtc(intel_crtc, plane_id) { const struct skl_plane_wm *wm = - &cstate->wm.skl.optimal.planes[plane_id]; + &crtc_state->wm.skl.optimal.planes[plane_id]; u64 rate; u16 extra; @@ -4477,9 +4462,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, start = alloc->start; for_each_plane_id_on_crtc(intel_crtc, plane_id) { struct skl_ddb_entry *plane_alloc = - &cstate->wm.skl.plane_ddb_y[plane_id]; + &crtc_state->wm.skl.plane_ddb_y[plane_id]; struct skl_ddb_entry *uv_plane_alloc = - &cstate->wm.skl.plane_ddb_uv[plane_id]; + &crtc_state->wm.skl.plane_ddb_uv[plane_id]; if (plane_id == PLANE_CURSOR) continue; @@ -4510,7 +4495,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, for (level++; level <= ilk_wm_max_level(dev_priv); level++) { for_each_plane_id_on_crtc(intel_crtc, plane_id) { struct skl_plane_wm *wm = - &cstate->wm.skl.optimal.planes[plane_id]; + &crtc_state->wm.skl.optimal.planes[plane_id]; /* * We only disable the watermarks for each plane if @@ -4547,7 +4532,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, */ for_each_plane_id_on_crtc(intel_crtc, plane_id) { struct skl_plane_wm *wm = - &cstate->wm.skl.optimal.planes[plane_id]; + &crtc_state->wm.skl.optimal.planes[plane_id]; if (wm->trans_wm.plane_res_b >= total[plane_id]) memset(&wm->trans_wm, 0, sizeof(wm->trans_wm)); @@ -4599,43 +4584,43 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency, } static uint_fixed_16_16_t -intel_get_linetime_us(const struct intel_crtc_state *cstate) +intel_get_linetime_us(const struct intel_crtc_state *crtc_state) { u32 pixel_rate; u32 crtc_htotal; uint_fixed_16_16_t linetime_us; - if (!cstate->base.active) + if (!crtc_state->base.active) return u32_to_fixed16(0); - pixel_rate = cstate->pixel_rate; + pixel_rate = crtc_state->pixel_rate; if (WARN_ON(pixel_rate == 0)) return u32_to_fixed16(0); - crtc_htotal = cstate->base.adjusted_mode.crtc_htotal; + crtc_htotal = crtc_state->base.adjusted_mode.crtc_htotal; linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate); return linetime_us; } static u32 -skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate) +skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { u64 adjusted_pixel_rate; uint_fixed_16_16_t downscale_amount; /* Shouldn't reach here on disabled planes... */ - if (WARN_ON(!intel_wm_plane_visible(cstate, pstate))) + if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state))) return 0; /* * Adjusted plane pixel rate is just the pipe's adjusted pixel rate * with additional adjustments for plane-specific scaling. */ - adjusted_pixel_rate = cstate->pixel_rate; - downscale_amount = skl_plane_downscale_amount(cstate, pstate); + adjusted_pixel_rate = crtc_state->pixel_rate; + downscale_amount = skl_plane_downscale_amount(crtc_state, plane_state); return mul_round_up_u32_fixed16(adjusted_pixel_rate, downscale_amount); @@ -4768,13 +4753,13 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level) return level > 0; } -static void skl_compute_plane_wm(const struct intel_crtc_state *cstate, +static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, int level, const struct skl_wm_params *wp, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */) { - struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); u32 latency = dev_priv->wm.skl_latency[level]; uint_fixed_16_16_t method1, method2; uint_fixed_16_16_t selected_result; @@ -4800,14 +4785,14 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate, method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate, wp->cpp, latency, wp->dbuf_block_size); method2 = skl_wm_method2(wp->plane_pixel_rate, - cstate->base.adjusted_mode.crtc_htotal, + crtc_state->base.adjusted_mode.crtc_htotal, latency, wp->plane_blocks_per_line); if (wp->y_tiled) { selected_result = max_fixed16(method2, wp->y_tile_minimum); } else { - if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal / + if ((wp->cpp * crtc_state->base.adjusted_mode.crtc_htotal / wp->dbuf_block_size < 1) && (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { selected_result = method2; @@ -4894,18 +4879,18 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate, } static void -skl_compute_wm_levels(const struct intel_crtc_state *cstate, +skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, const struct skl_wm_params *wm_params, struct skl_wm_level *levels) { - struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); int level, max_level = ilk_wm_max_level(dev_priv); struct skl_wm_level *result_prev = &levels[0]; for (level = 0; level <= max_level; level++) { struct skl_wm_level *result = &levels[level]; - skl_compute_plane_wm(cstate, level, wm_params, + skl_compute_plane_wm(crtc_state, level, wm_params, result_prev, result); result_prev = result; @@ -4913,14 +4898,14 @@ skl_compute_wm_levels(const struct intel_crtc_state *cstate, } static u32 -skl_compute_linetime_wm(const struct intel_crtc_state *cstate) +skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state) { - struct drm_atomic_state *state = cstate->base.state; + struct drm_atomic_state *state = crtc_state->base.state; struct drm_i915_private *dev_priv = to_i915(state->dev); uint_fixed_16_16_t linetime_us; u32 linetime_wm; - linetime_us = intel_get_linetime_us(cstate); + linetime_us = intel_get_linetime_us(crtc_state); linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us)); /* Display WA #1135: BXT:ALL GLK:ALL */ @@ -4930,11 +4915,11 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate) return linetime_wm; } -static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, +static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state, const struct skl_wm_params *wp, struct skl_plane_wm *wm) { - struct drm_device *dev = cstate->base.crtc->dev; + struct drm_device *dev = crtc_state->base.crtc->dev; const struct drm_i915_private *dev_priv = to_i915(dev); u16 trans_min, trans_y_tile_min; const u16 trans_amount = 10; /* This is configurable amount */ @@ -5092,13 +5077,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, return 0; } -static int skl_build_pipe_wm(struct intel_crtc_state *cstate) +static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); - struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; - struct drm_crtc_state *crtc_state = &cstate->base; + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; struct drm_plane *plane; - const struct drm_plane_state *pstate; + const struct drm_plane_state *drm_plane_state; int ret; /* @@ -5107,19 +5091,20 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate) */ memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes)); - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { - const struct intel_plane_state *intel_pstate = - to_intel_plane_state(pstate); + drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, + &crtc_state->base) { + const struct intel_plane_state *plane_state = + to_intel_plane_state(drm_plane_state); if (INTEL_GEN(dev_priv) >= 11) - ret = icl_build_plane_wm(cstate, intel_pstate); + ret = icl_build_plane_wm(crtc_state, plane_state); else - ret = skl_build_plane_wm(cstate, intel_pstate); + ret = skl_build_plane_wm(crtc_state, plane_state); if (ret) return ret; } - pipe_wm->linetime = skl_compute_linetime_wm(cstate); + pipe_wm->linetime = skl_compute_linetime_wm(crtc_state); return 0; } @@ -5273,10 +5258,10 @@ static u32 pipes_modified(struct intel_atomic_state *state) { struct intel_crtc *crtc; - struct intel_crtc_state *cstate; + struct intel_crtc_state *crtc_state; u32 i, ret = 0; - for_each_new_intel_crtc_in_state(state, crtc, cstate, i) + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) ret |= drm_crtc_mask(&crtc->base); return ret; @@ -5652,11 +5637,11 @@ skl_compute_wm(struct intel_atomic_state *state) } static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, - struct intel_crtc_state *cstate) + struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; + struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; enum pipe pipe = crtc->pipe; if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) @@ -5666,9 +5651,9 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, } static void skl_initial_wm(struct intel_atomic_state *state, - struct intel_crtc_state *cstate) + struct intel_crtc_state *crtc_state) { - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct skl_ddb_values *results = &state->wm_results; @@ -5678,8 +5663,8 @@ static void skl_initial_wm(struct intel_atomic_state *state, mutex_lock(&dev_priv->wm.wm_mutex); - if (cstate->base.active_changed) - skl_atomic_update_crtc_wm(state, cstate); + if (crtc_state->base.active_changed) + skl_atomic_update_crtc_wm(state, crtc_state); mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -5735,28 +5720,29 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv) } static void ilk_initial_watermarks(struct intel_atomic_state *state, - struct intel_crtc_state *cstate) + struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); mutex_lock(&dev_priv->wm.wm_mutex); - intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate; + crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; ilk_program_watermarks(dev_priv); mutex_unlock(&dev_priv->wm.wm_mutex); } static void ilk_optimize_watermarks(struct intel_atomic_state *state, - struct intel_crtc_state *cstate) + struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + + if (!crtc_state->wm.need_postvbl_update) + return; mutex_lock(&dev_priv->wm.wm_mutex); - if (cstate->wm.need_postvbl_update) { - intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal; - ilk_program_watermarks(dev_priv); - } + crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; + ilk_program_watermarks(dev_priv); mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -5812,13 +5798,13 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) struct skl_ddb_values *hw = &dev_priv->wm.skl_hw; struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; struct intel_crtc *crtc; - struct intel_crtc_state *cstate; + struct intel_crtc_state *crtc_state; skl_ddb_get_hw_state(dev_priv, ddb); for_each_intel_crtc(&dev_priv->drm, crtc) { - cstate = to_intel_crtc_state(crtc->base.state); + crtc_state = to_intel_crtc_state(crtc->base.state); - skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal); + skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); if (crtc->active) hw->dirty_pipes |= drm_crtc_mask(&crtc->base); @@ -5835,8 +5821,8 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct ilk_wm_values *hw = &dev_priv->wm.hw; - struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state); - struct intel_pipe_wm *active = &cstate->wm.ilk.optimal; + struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); + struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; enum pipe pipe = crtc->pipe; static const i915_reg_t wm0_pipe_reg[] = { [PIPE_A] = WM0_PIPEA_ILK, @@ -6891,9 +6877,10 @@ void gen6_rps_boost(struct i915_request *rq) /* Serializes with i915_request_retire() */ boost = false; spin_lock_irqsave(&rq->lock, flags); - if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) { + if (!i915_request_has_waitboost(rq) && + !dma_fence_is_signaled_locked(&rq->fence)) { boost = !atomic_fetch_inc(&rps->num_waiters); - rq->waitboost = true; + rq->flags |= I915_REQUEST_WAITBOOST; } spin_unlock_irqrestore(&rq->lock, flags); if (!boost) @@ -7175,7 +7162,7 @@ static void gen11_enable_rc6(struct drm_i915_private *dev_priv) for_each_engine(engine, dev_priv, id) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - if (HAS_GUC(dev_priv)) + if (HAS_GT_UC(dev_priv)) I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); I915_WRITE(GEN6_RC_SLEEP, 0); @@ -7192,7 +7179,7 @@ static void gen11_enable_rc6(struct drm_i915_private *dev_priv) * next request to execute. If the idle hysteresis is less than that * interrupt service latency, the hardware will automatically gate * the power well and we will then incur the wake up cost on top of - * the service latency. A similar guide from intel_pstate is that we + * the service latency. A similar guide from plane_state is that we * do not want the enable hysteresis to less than the wakeup latency. * * igt/gem_exec_nop/sequential provides a rough estimate for the @@ -7256,7 +7243,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) for_each_engine(engine, dev_priv, id) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - if (HAS_GUC(dev_priv)) + if (HAS_GT_UC(dev_priv)) I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); I915_WRITE(GEN6_RC_SLEEP, 0); @@ -7271,7 +7258,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) * next request to execute. If the idle hysteresis is less than that * interrupt service latency, the hardware will automatically gate * the power well and we will then incur the wake up cost on top of - * the service latency. A similar guide from intel_pstate is that we + * the service latency. A similar guide from plane_state is that we * do not want the enable hysteresis to less than the wakeup latency. * * igt/gem_exec_nop/sequential provides a rough estimate for the diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index 1b489fa399e1..e3573e1e16e3 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -10,10 +10,10 @@ #include "i915_reg.h" -struct drm_atomic_state; struct drm_device; struct drm_i915_private; struct i915_request; +struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; struct intel_plane; @@ -52,7 +52,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, struct skl_pipe_wm *out); void g4x_wm_sanitize(struct drm_i915_private *dev_priv); void vlv_wm_sanitize(struct drm_i915_private *dev_priv); -bool intel_can_enable_sagv(struct drm_atomic_state *state); +bool intel_can_enable_sagv(struct intel_atomic_state *state); int intel_enable_sagv(struct drm_i915_private *dev_priv); int intel_disable_sagv(struct drm_i915_private *dev_priv); bool skl_wm_level_equals(const struct skl_wm_level *l1, diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 502c54428570..b2a05850ea42 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -221,13 +221,11 @@ __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug, static void dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) { - struct drm_printer p; + if (debug->count) { + struct drm_printer p = drm_debug_printer("i915"); - if (!debug->count) - return; - - p = drm_debug_printer("i915"); - __print_intel_runtime_pm_wakeref(&p, debug); + __print_intel_runtime_pm_wakeref(&p, debug); + } kfree(debug->owners); } @@ -594,7 +592,7 @@ void intel_runtime_pm_disable(struct intel_runtime_pm *rpm) pm_runtime_put(kdev); } -void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm) +void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm) { int count = atomic_read(&rpm->wakeref_count); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index 2ee8f9522e05..ae64ff14c642 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -173,7 +173,7 @@ enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm) void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm); void intel_runtime_pm_enable(struct intel_runtime_pm *rpm); void intel_runtime_pm_disable(struct intel_runtime_pm *rpm); -void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm); +void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm); diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c deleted file mode 100644 index ae45651ac73c..000000000000 --- a/drivers/gpu/drm/i915/intel_uc.c +++ /dev/null @@ -1,561 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "gt/intel_reset.h" -#include "intel_uc.h" -#include "intel_guc.h" -#include "intel_guc_ads.h" -#include "intel_guc_submission.h" -#include "i915_drv.h" - -static void guc_free_load_err_log(struct intel_guc *guc); - -/* Reset GuC providing us with fresh state for both GuC and HuC. - */ -static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv) -{ - int ret; - u32 guc_status; - - ret = intel_reset_guc(dev_priv); - if (ret) { - DRM_ERROR("Failed to reset GuC, ret = %d\n", ret); - return ret; - } - - guc_status = I915_READ(GUC_STATUS); - WARN(!(guc_status & GS_MIA_IN_RESET), - "GuC status: 0x%x, MIA core expected to be in reset\n", - guc_status); - - return ret; -} - -static int __get_platform_enable_guc(struct drm_i915_private *i915) -{ - struct intel_uc_fw *guc_fw = &i915->guc.fw; - struct intel_uc_fw *huc_fw = &i915->huc.fw; - int enable_guc = 0; - - /* Default is to use HuC if we know GuC and HuC firmwares */ - if (intel_uc_fw_is_selected(guc_fw) && intel_uc_fw_is_selected(huc_fw)) - enable_guc |= ENABLE_GUC_LOAD_HUC; - - /* Any platform specific fine-tuning can be done here */ - - return enable_guc; -} - -static int __get_default_guc_log_level(struct drm_i915_private *i915) -{ - int guc_log_level; - - if (!HAS_GUC(i915) || !intel_uc_is_using_guc(i915)) - guc_log_level = GUC_LOG_LEVEL_DISABLED; - else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || - IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - guc_log_level = GUC_LOG_LEVEL_MAX; - else - guc_log_level = GUC_LOG_LEVEL_NON_VERBOSE; - - /* Any platform specific fine-tuning can be done here */ - - return guc_log_level; -} - -/** - * sanitize_options_early - sanitize uC related modparam options - * @i915: device private - * - * In case of "enable_guc" option this function will attempt to modify - * it only if it was initially set to "auto(-1)". Default value for this - * modparam varies between platforms and it is hardcoded in driver code. - * Any other modparam value is only monitored against availability of the - * related hardware or firmware definitions. - * - * In case of "guc_log_level" option this function will attempt to modify - * it only if it was initially set to "auto(-1)" or if initial value was - * "enable(1..4)" on platforms without the GuC. Default value for this - * modparam varies between platforms and is usually set to "disable(0)" - * unless GuC is enabled on given platform and the driver is compiled with - * debug config when this modparam will default to "enable(1..4)". - */ -static void sanitize_options_early(struct drm_i915_private *i915) -{ - struct intel_uc_fw *guc_fw = &i915->guc.fw; - struct intel_uc_fw *huc_fw = &i915->huc.fw; - - /* A negative value means "use platform default" */ - if (i915_modparams.enable_guc < 0) - i915_modparams.enable_guc = __get_platform_enable_guc(i915); - - DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n", - i915_modparams.enable_guc, - yesno(intel_uc_is_using_guc_submission(i915)), - yesno(intel_uc_is_using_huc(i915))); - - /* Verify GuC firmware availability */ - if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) { - DRM_WARN("Incompatible option detected: %s=%d, %s!\n", - "enable_guc", i915_modparams.enable_guc, - !HAS_GUC(i915) ? "no GuC hardware" : - "no GuC firmware"); - } - - /* Verify HuC firmware availability */ - if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) { - DRM_WARN("Incompatible option detected: %s=%d, %s!\n", - "enable_guc", i915_modparams.enable_guc, - !HAS_HUC(i915) ? "no HuC hardware" : - "no HuC firmware"); - } - - /* XXX: GuC submission is unavailable for now */ - if (intel_uc_is_using_guc_submission(i915)) { - DRM_INFO("Incompatible option detected: %s=%d, %s!\n", - "enable_guc", i915_modparams.enable_guc, - "GuC submission not supported"); - DRM_INFO("Switching to non-GuC submission mode!\n"); - i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION; - } - - /* A negative value means "use platform/config default" */ - if (i915_modparams.guc_log_level < 0) - i915_modparams.guc_log_level = - __get_default_guc_log_level(i915); - - if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) { - DRM_WARN("Incompatible option detected: %s=%d, %s!\n", - "guc_log_level", i915_modparams.guc_log_level, - !HAS_GUC(i915) ? "no GuC hardware" : - "GuC not enabled"); - i915_modparams.guc_log_level = 0; - } - - if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) { - DRM_WARN("Incompatible option detected: %s=%d, %s!\n", - "guc_log_level", i915_modparams.guc_log_level, - "verbosity too high"); - i915_modparams.guc_log_level = GUC_LOG_LEVEL_MAX; - } - - DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n", - i915_modparams.guc_log_level, - yesno(i915_modparams.guc_log_level), - yesno(GUC_LOG_LEVEL_IS_VERBOSE(i915_modparams.guc_log_level)), - GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level)); - - /* Make sure that sanitization was done */ - GEM_BUG_ON(i915_modparams.enable_guc < 0); - GEM_BUG_ON(i915_modparams.guc_log_level < 0); -} - -void intel_uc_init_early(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; - - intel_guc_init_early(guc); - intel_huc_init_early(huc); - - sanitize_options_early(i915); -} - -void intel_uc_cleanup_early(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - - guc_free_load_err_log(guc); -} - -/** - * intel_uc_init_mmio - setup uC MMIO access - * @i915: device private - * - * Setup minimal state necessary for MMIO accesses later in the - * initialization sequence. - */ -void intel_uc_init_mmio(struct drm_i915_private *i915) -{ - intel_guc_init_send_regs(&i915->guc); -} - -static void guc_capture_load_err_log(struct intel_guc *guc) -{ - if (!guc->log.vma || !intel_guc_log_get_level(&guc->log)) - return; - - if (!guc->load_err_log) - guc->load_err_log = i915_gem_object_get(guc->log.vma->obj); - - return; -} - -static void guc_free_load_err_log(struct intel_guc *guc) -{ - if (guc->load_err_log) - i915_gem_object_put(guc->load_err_log); -} - -static void guc_reset_interrupts(struct intel_guc *guc) -{ - guc->interrupts.reset(guc_to_i915(guc)); -} - -static void guc_enable_interrupts(struct intel_guc *guc) -{ - guc->interrupts.enable(guc_to_i915(guc)); -} - -static void guc_disable_interrupts(struct intel_guc *guc) -{ - guc->interrupts.disable(guc_to_i915(guc)); -} - -static int guc_enable_communication(struct intel_guc *guc) -{ - guc_enable_interrupts(guc); - - return intel_guc_ct_enable(&guc->ct); -} - -static void guc_stop_communication(struct intel_guc *guc) -{ - intel_guc_ct_stop(&guc->ct); - - guc->send = intel_guc_send_nop; - guc->handler = intel_guc_to_host_event_handler_nop; -} - -static void guc_disable_communication(struct intel_guc *guc) -{ - intel_guc_ct_disable(&guc->ct); - - guc_disable_interrupts(guc); - - guc->send = intel_guc_send_nop; - guc->handler = intel_guc_to_host_event_handler_nop; -} - -int intel_uc_init_misc(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; - int ret; - - if (!USES_GUC(i915)) - return 0; - - ret = intel_guc_init_misc(guc); - if (ret) - return ret; - - if (USES_HUC(i915)) { - ret = intel_huc_init_misc(huc); - if (ret) - goto err_guc; - } - - return 0; - -err_guc: - intel_guc_fini_misc(guc); - return ret; -} - -void intel_uc_fini_misc(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; - - if (!USES_GUC(i915)) - return; - - if (USES_HUC(i915)) - intel_huc_fini_misc(huc); - - intel_guc_fini_misc(guc); -} - -int intel_uc_init(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; - int ret; - - if (!USES_GUC(i915)) - return 0; - - if (!HAS_GUC(i915)) - return -ENODEV; - - /* XXX: GuC submission is unavailable for now */ - GEM_BUG_ON(USES_GUC_SUBMISSION(i915)); - - ret = intel_guc_init(guc); - if (ret) - return ret; - - if (USES_HUC(i915)) { - ret = intel_huc_init(huc); - if (ret) - goto err_guc; - } - - if (USES_GUC_SUBMISSION(i915)) { - /* - * This is stuff we need to have available at fw load time - * if we are planning to enable submission later - */ - ret = intel_guc_submission_init(guc); - if (ret) - goto err_huc; - } - - return 0; - -err_huc: - if (USES_HUC(i915)) - intel_huc_fini(huc); -err_guc: - intel_guc_fini(guc); - return ret; -} - -void intel_uc_fini(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - - if (!USES_GUC(i915)) - return; - - GEM_BUG_ON(!HAS_GUC(i915)); - - if (USES_GUC_SUBMISSION(i915)) - intel_guc_submission_fini(guc); - - if (USES_HUC(i915)) - intel_huc_fini(&i915->huc); - - intel_guc_fini(guc); -} - -static void __uc_sanitize(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; - - GEM_BUG_ON(!HAS_GUC(i915)); - - intel_huc_sanitize(huc); - intel_guc_sanitize(guc); - - __intel_uc_reset_hw(i915); -} - -void intel_uc_sanitize(struct drm_i915_private *i915) -{ - if (!USES_GUC(i915)) - return; - - __uc_sanitize(i915); -} - -int intel_uc_init_hw(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; - int ret, attempts; - - if (!USES_GUC(i915)) - return 0; - - GEM_BUG_ON(!HAS_GUC(i915)); - - guc_reset_interrupts(guc); - - /* WaEnableuKernelHeaderValidFix:skl */ - /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ - if (IS_GEN(i915, 9)) - attempts = 3; - else - attempts = 1; - - while (attempts--) { - /* - * Always reset the GuC just before (re)loading, so - * that the state and timing are fairly predictable - */ - ret = __intel_uc_reset_hw(i915); - if (ret) - goto err_out; - - if (USES_HUC(i915)) { - ret = intel_huc_fw_upload(huc); - if (ret) - goto err_out; - } - - intel_guc_ads_reset(guc); - intel_guc_init_params(guc); - ret = intel_guc_fw_upload(guc); - if (ret == 0) - break; - - DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and " - "retry %d more time(s)\n", ret, attempts); - } - - /* Did we succeded or run out of retries? */ - if (ret) - goto err_log_capture; - - ret = guc_enable_communication(guc); - if (ret) - goto err_log_capture; - - if (USES_HUC(i915)) { - ret = intel_huc_auth(huc); - if (ret) - goto err_communication; - } - - ret = intel_guc_sample_forcewake(guc); - if (ret) - goto err_communication; - - if (USES_GUC_SUBMISSION(i915)) { - ret = intel_guc_submission_enable(guc); - if (ret) - goto err_communication; - } - - dev_info(i915->drm.dev, "GuC firmware version %u.%u\n", - guc->fw.major_ver_found, guc->fw.minor_ver_found); - dev_info(i915->drm.dev, "GuC submission %s\n", - enableddisabled(USES_GUC_SUBMISSION(i915))); - dev_info(i915->drm.dev, "HuC %s\n", - enableddisabled(USES_HUC(i915))); - - return 0; - - /* - * We've failed to load the firmware :( - */ -err_communication: - guc_disable_communication(guc); -err_log_capture: - guc_capture_load_err_log(guc); -err_out: - __uc_sanitize(i915); - - /* - * Note that there is no fallback as either user explicitly asked for - * the GuC or driver default option was to run with the GuC enabled. - */ - if (GEM_WARN_ON(ret == -EIO)) - ret = -EINVAL; - - dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret); - return ret; -} - -void intel_uc_fini_hw(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - - if (!intel_guc_is_loaded(guc)) - return; - - GEM_BUG_ON(!HAS_GUC(i915)); - - if (USES_GUC_SUBMISSION(i915)) - intel_guc_submission_disable(guc); - - guc_disable_communication(guc); - __uc_sanitize(i915); -} - -/** - * intel_uc_reset_prepare - Prepare for reset - * @i915: device private - * - * Preparing for full gpu reset. - */ -void intel_uc_reset_prepare(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - - if (!intel_guc_is_loaded(guc)) - return; - - guc_stop_communication(guc); - __uc_sanitize(i915); -} - -void intel_uc_runtime_suspend(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - int err; - - if (!intel_guc_is_loaded(guc)) - return; - - err = intel_guc_suspend(guc); - if (err) - DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err); - - guc_disable_communication(guc); -} - -void intel_uc_suspend(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - intel_wakeref_t wakeref; - - if (!intel_guc_is_loaded(guc)) - return; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - intel_uc_runtime_suspend(i915); -} - -int intel_uc_resume(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - int err; - - if (!intel_guc_is_loaded(guc)) - return 0; - - guc_enable_communication(guc); - - err = intel_guc_resume(guc); - if (err) { - DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err); - return err; - } - - return 0; -} diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c deleted file mode 100644 index f342ddd47df8..000000000000 --- a/drivers/gpu/drm/i915/intel_uc_fw.c +++ /dev/null @@ -1,357 +0,0 @@ -/* - * Copyright © 2016-2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include <linux/bitfield.h> -#include <linux/firmware.h> -#include <drm/drm_print.h> - -#include "intel_uc_fw.h" -#include "i915_drv.h" - -/** - * intel_uc_fw_fetch - fetch uC firmware - * - * @dev_priv: device private - * @uc_fw: uC firmware - * - * Fetch uC firmware into GEM obj. - */ -void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, - struct intel_uc_fw *uc_fw) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - struct drm_i915_gem_object *obj; - const struct firmware *fw = NULL; - struct uc_css_header *css; - size_t size; - int err; - - if (!uc_fw->path) { - dev_info(dev_priv->drm.dev, - "%s: No firmware was defined for %s!\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_platform_name(INTEL_INFO(dev_priv)->platform)); - return; - } - - DRM_DEBUG_DRIVER("%s fw fetch %s\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); - - uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING; - DRM_DEBUG_DRIVER("%s fw fetch %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->fetch_status)); - - err = request_firmware(&fw, uc_fw->path, &pdev->dev); - if (err) { - DRM_DEBUG_DRIVER("%s fw request_firmware err=%d\n", - intel_uc_fw_type_repr(uc_fw->type), err); - goto fail; - } - - DRM_DEBUG_DRIVER("%s fw size %zu ptr %p\n", - intel_uc_fw_type_repr(uc_fw->type), fw->size, fw); - - /* Check the size of the blob before examining buffer contents */ - if (fw->size < sizeof(struct uc_css_header)) { - DRM_WARN("%s: Unexpected firmware size (%zu, min %zu)\n", - intel_uc_fw_type_repr(uc_fw->type), - fw->size, sizeof(struct uc_css_header)); - err = -ENODATA; - goto fail; - } - - css = (struct uc_css_header *)fw->data; - - /* Firmware bits always start from header */ - uc_fw->header_offset = 0; - uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw - - css->key_size_dw - css->exponent_size_dw) * - sizeof(u32); - - if (uc_fw->header_size != sizeof(struct uc_css_header)) { - DRM_WARN("%s: Mismatched firmware header definition\n", - intel_uc_fw_type_repr(uc_fw->type)); - err = -ENOEXEC; - goto fail; - } - - /* then, uCode */ - uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size; - uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32); - - /* now RSA */ - if (css->key_size_dw != UOS_RSA_SCRATCH_COUNT) { - DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n", - intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw); - err = -ENOEXEC; - goto fail; - } - uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size; - uc_fw->rsa_size = css->key_size_dw * sizeof(u32); - - /* At least, it should have header, uCode and RSA. Size of all three. */ - size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size; - if (fw->size < size) { - DRM_WARN("%s: Truncated firmware (%zu, expected %zu)\n", - intel_uc_fw_type_repr(uc_fw->type), fw->size, size); - err = -ENOEXEC; - goto fail; - } - - /* Get version numbers from the CSS header */ - switch (uc_fw->type) { - case INTEL_UC_FW_TYPE_GUC: - uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR, - css->sw_version); - uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR, - css->sw_version); - break; - - case INTEL_UC_FW_TYPE_HUC: - uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR, - css->sw_version); - uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR, - css->sw_version); - break; - - default: - MISSING_CASE(uc_fw->type); - break; - } - - DRM_DEBUG_DRIVER("%s fw version %u.%u (wanted %u.%u)\n", - intel_uc_fw_type_repr(uc_fw->type), - uc_fw->major_ver_found, uc_fw->minor_ver_found, - uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); - - if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) { - DRM_NOTE("%s: Skipping firmware version check\n", - intel_uc_fw_type_repr(uc_fw->type)); - } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted || - uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) { - DRM_NOTE("%s: Wrong firmware version (%u.%u, required %u.%u)\n", - intel_uc_fw_type_repr(uc_fw->type), - uc_fw->major_ver_found, uc_fw->minor_ver_found, - uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); - err = -ENOEXEC; - goto fail; - } - - obj = i915_gem_object_create_shmem_from_data(dev_priv, - fw->data, fw->size); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - DRM_DEBUG_DRIVER("%s fw object_create err=%d\n", - intel_uc_fw_type_repr(uc_fw->type), err); - goto fail; - } - - uc_fw->obj = obj; - uc_fw->size = fw->size; - uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS; - DRM_DEBUG_DRIVER("%s fw fetch %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->fetch_status)); - - release_firmware(fw); - return; - -fail: - uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL; - DRM_DEBUG_DRIVER("%s fw fetch %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->fetch_status)); - - DRM_WARN("%s: Failed to fetch firmware %s (error %d)\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); - DRM_INFO("%s: Firmware can be downloaded from %s\n", - intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL); - - release_firmware(fw); /* OK even if fw is NULL */ -} - -static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw) -{ - struct drm_i915_gem_object *obj = uc_fw->obj; - struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; - struct i915_vma dummy = { - .node.start = intel_uc_fw_ggtt_offset(uc_fw), - .node.size = obj->base.size, - .pages = obj->mm.pages, - .vm = &ggtt->vm, - }; - - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size); - - /* uc_fw->obj cache domains were not controlled across suspend */ - drm_clflush_sg(dummy.pages); - - ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0); -} - -static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw) -{ - struct drm_i915_gem_object *obj = uc_fw->obj; - struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; - u64 start = intel_uc_fw_ggtt_offset(uc_fw); - - ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size); -} - -/** - * intel_uc_fw_upload - load uC firmware using custom loader - * @uc_fw: uC firmware - * @xfer: custom uC firmware loader function - * - * Loads uC firmware using custom loader and updates internal flags. - * - * Return: 0 on success, non-zero on failure. - */ -int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, - int (*xfer)(struct intel_uc_fw *uc_fw)) -{ - int err; - - DRM_DEBUG_DRIVER("%s fw load %s\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); - - if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) - return -ENOEXEC; - - uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING; - DRM_DEBUG_DRIVER("%s fw load %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->load_status)); - - /* Call custom loader */ - intel_uc_fw_ggtt_bind(uc_fw); - err = xfer(uc_fw); - intel_uc_fw_ggtt_unbind(uc_fw); - if (err) - goto fail; - - uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS; - DRM_DEBUG_DRIVER("%s fw load %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->load_status)); - - DRM_INFO("%s: Loaded firmware %s (version %u.%u)\n", - intel_uc_fw_type_repr(uc_fw->type), - uc_fw->path, - uc_fw->major_ver_found, uc_fw->minor_ver_found); - - return 0; - -fail: - uc_fw->load_status = INTEL_UC_FIRMWARE_FAIL; - DRM_DEBUG_DRIVER("%s fw load %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->load_status)); - - DRM_WARN("%s: Failed to load firmware %s (error %d)\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); - - return err; -} - -int intel_uc_fw_init(struct intel_uc_fw *uc_fw) -{ - int err; - - if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) - return -ENOEXEC; - - err = i915_gem_object_pin_pages(uc_fw->obj); - if (err) - DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n", - intel_uc_fw_type_repr(uc_fw->type), err); - - return err; -} - -void intel_uc_fw_fini(struct intel_uc_fw *uc_fw) -{ - if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) - return; - - i915_gem_object_unpin_pages(uc_fw->obj); -} - -u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw) -{ - struct drm_i915_private *i915 = to_i915(uc_fw->obj->base.dev); - struct i915_ggtt *ggtt = &i915->ggtt; - struct drm_mm_node *node = &ggtt->uc_fw; - - GEM_BUG_ON(!node->allocated); - GEM_BUG_ON(upper_32_bits(node->start)); - GEM_BUG_ON(upper_32_bits(node->start + node->size - 1)); - - return lower_32_bits(node->start); -} - -/** - * intel_uc_fw_cleanup_fetch - cleanup uC firmware - * - * @uc_fw: uC firmware - * - * Cleans up uC firmware by releasing the firmware GEM obj. - */ -void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw) -{ - struct drm_i915_gem_object *obj; - - obj = fetch_and_zero(&uc_fw->obj); - if (obj) - i915_gem_object_put(obj); - - uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE; -} - -/** - * intel_uc_fw_dump - dump information about uC firmware - * @uc_fw: uC firmware - * @p: the &drm_printer - * - * Pretty printer for uC firmware. - */ -void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p) -{ - drm_printf(p, "%s firmware: %s\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); - drm_printf(p, "\tstatus: fetch %s, load %s\n", - intel_uc_fw_status_repr(uc_fw->fetch_status), - intel_uc_fw_status_repr(uc_fw->load_status)); - drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n", - uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted, - uc_fw->major_ver_found, uc_fw->minor_ver_found); - drm_printf(p, "\theader: offset %u, size %u\n", - uc_fw->header_offset, uc_fw->header_size); - drm_printf(p, "\tuCode: offset %u, size %u\n", - uc_fw->ucode_offset, uc_fw->ucode_size); - drm_printf(p, "\tRSA: offset %u, size %u\n", - uc_fw->rsa_offset, uc_fw->rsa_size); -} diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index da33aa672c3d..475ab3d4d91d 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -78,6 +78,8 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d) static inline void fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) { + GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask); + d->uncore->fw_domains_timer |= d->mask; d->wake_count++; hrtimer_start_range_ns(&d->timer, NSEC_PER_MSEC, @@ -322,7 +324,7 @@ static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore) /* On VLV, FIFO will be shared by both SW and HW. * So, we need to read the FREE_ENTRIES everytime */ - if (IS_VALLEYVIEW(uncore_to_i915(uncore))) + if (IS_VALLEYVIEW(uncore->i915)) n = fifo_free_entries(uncore); else n = uncore->fifo_count; @@ -344,7 +346,7 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) { struct intel_uncore_forcewake_domain *domain = container_of(timer, struct intel_uncore_forcewake_domain, timer); - struct intel_uncore *uncore = forcewake_domain_to_uncore(domain); + struct intel_uncore *uncore = domain->uncore; unsigned long irqflags; assert_rpm_device_not_suspended(uncore->rpm); @@ -353,9 +355,10 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) return HRTIMER_RESTART; spin_lock_irqsave(&uncore->lock, irqflags); - if (WARN_ON(domain->wake_count == 0)) - domain->wake_count++; + uncore->fw_domains_timer &= ~domain->mask; + + GEM_BUG_ON(!domain->wake_count); if (--domain->wake_count == 0) uncore->funcs.force_wake_put(uncore, domain->mask); @@ -485,15 +488,13 @@ check_for_unclaimed_mmio(struct intel_uncore *uncore) return ret; } -static void __intel_uncore_early_sanitize(struct intel_uncore *uncore, - unsigned int restore_forcewake) +static void forcewake_early_sanitize(struct intel_uncore *uncore, + unsigned int restore_forcewake) { - /* clear out unclaimed reg detection bit */ - if (check_for_unclaimed_mmio(uncore)) - DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); + GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); /* WaDisableShadowRegForCpd:chv */ - if (IS_CHERRYVIEW(uncore_to_i915(uncore))) { + if (IS_CHERRYVIEW(uncore->i915)) { __raw_uncore_write32(uncore, GTFIFOCTL, __raw_uncore_read32(uncore, GTFIFOCTL) | GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | @@ -515,6 +516,9 @@ static void __intel_uncore_early_sanitize(struct intel_uncore *uncore, void intel_uncore_suspend(struct intel_uncore *uncore) { + if (!intel_uncore_has_forcewake(uncore)) + return; + iosf_mbi_punit_acquire(); iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( &uncore->pmic_bus_access_nb); @@ -526,21 +530,24 @@ void intel_uncore_resume_early(struct intel_uncore *uncore) { unsigned int restore_forcewake; + if (intel_uncore_unclaimed_mmio(uncore)) + DRM_DEBUG("unclaimed mmio detected on resume, clearing\n"); + + if (!intel_uncore_has_forcewake(uncore)) + return; + restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved); - __intel_uncore_early_sanitize(uncore, restore_forcewake); + forcewake_early_sanitize(uncore, restore_forcewake); iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); } void intel_uncore_runtime_resume(struct intel_uncore *uncore) { - iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); -} + if (!intel_uncore_has_forcewake(uncore)) + return; -void intel_uncore_sanitize(struct drm_i915_private *dev_priv) -{ - /* BIOS often leaves RC6 enabled, but disable it for hw init */ - intel_sanitize_gt_powersave(dev_priv); + iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); } static void __intel_uncore_forcewake_get(struct intel_uncore *uncore, @@ -628,7 +635,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) spin_lock_irq(&uncore->lock); if (!--uncore->user_forcewake.count) { if (intel_uncore_unclaimed_mmio(uncore)) - dev_info(uncore_to_i915(uncore)->drm.dev, + dev_info(uncore->i915->drm.dev, "Invalid mmio detected during user access\n"); uncore->unclaimed_mmio_check = @@ -669,8 +676,7 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, fw_domains &= uncore->fw_domains; for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { - if (WARN_ON(domain->wake_count == 0)) - continue; + GEM_BUG_ON(!domain->wake_count); if (--domain->wake_count) { domain->active = true; @@ -734,15 +740,42 @@ void assert_forcewakes_inactive(struct intel_uncore *uncore) void assert_forcewakes_active(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { + struct intel_uncore_forcewake_domain *domain; + unsigned int tmp; + + if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) + return; + if (!uncore->funcs.force_wake_get) return; + spin_lock_irq(&uncore->lock); + assert_rpm_wakelock_held(uncore->rpm); fw_domains &= uncore->fw_domains; WARN(fw_domains & ~uncore->fw_domains_active, "Expected %08x fw_domains to be active, but %08x are off\n", fw_domains, fw_domains & ~uncore->fw_domains_active); + + /* + * Check that the caller has an explicit wakeref and we don't mistake + * it for the auto wakeref. + */ + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { + unsigned int actual = READ_ONCE(domain->wake_count); + unsigned int expect = 1; + + if (uncore->fw_domains_timer & domain->mask) + expect++; /* pending automatic release */ + + if (WARN(actual < expect, + "Expected domain %d to be held awake by caller, count=%d\n", + domain->id, actual)) + break; + } + + spin_unlock_irq(&uncore->lock); } /* We give fast paths for the really cool registers */ @@ -901,6 +934,12 @@ static bool is_gen##x##_shadowed(u32 offset) \ __is_genX_shadowed(8) __is_genX_shadowed(11) +static enum forcewake_domains +gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) +{ + return FORCEWAKE_RENDER; +} + #define __gen8_reg_write_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd; \ @@ -1123,8 +1162,7 @@ static noinline void ___force_wake_auto(struct intel_uncore *uncore, static inline void __force_wake_auto(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { - if (WARN_ON(!fw_domains)) - return; + GEM_BUG_ON(!fw_domains); /* Turn on all requested but inactive supported forcewake domains. */ fw_domains &= uncore->fw_domains; @@ -1145,26 +1183,23 @@ func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ val = __raw_uncore_read##x(uncore, reg); \ GEN6_READ_FOOTER; \ } -#define __gen6_read(x) __gen_read(gen6, x) -#define __fwtable_read(x) __gen_read(fwtable, x) -#define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x) - -__gen11_fwtable_read(8) -__gen11_fwtable_read(16) -__gen11_fwtable_read(32) -__gen11_fwtable_read(64) -__fwtable_read(8) -__fwtable_read(16) -__fwtable_read(32) -__fwtable_read(64) -__gen6_read(8) -__gen6_read(16) -__gen6_read(32) -__gen6_read(64) - -#undef __gen11_fwtable_read -#undef __fwtable_read -#undef __gen6_read + +#define __gen_reg_read_funcs(func) \ +static enum forcewake_domains \ +func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ + return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ +} \ +\ +__gen_read(func, 8) \ +__gen_read(func, 16) \ +__gen_read(func, 32) \ +__gen_read(func, 64) + +__gen_reg_read_funcs(gen11_fwtable); +__gen_reg_read_funcs(fwtable); +__gen_reg_read_funcs(gen6); + +#undef __gen_reg_read_funcs #undef GEN6_READ_FOOTER #undef GEN6_READ_HEADER @@ -1225,6 +1260,9 @@ gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) __raw_uncore_write##x(uncore, reg, val); \ GEN6_WRITE_FOOTER; \ } +__gen6_write(8) +__gen6_write(16) +__gen6_write(32) #define __gen_write(func, x) \ static void \ @@ -1237,38 +1275,33 @@ func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trac __raw_uncore_write##x(uncore, reg, val); \ GEN6_WRITE_FOOTER; \ } -#define __gen8_write(x) __gen_write(gen8, x) -#define __fwtable_write(x) __gen_write(fwtable, x) -#define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x) - -__gen11_fwtable_write(8) -__gen11_fwtable_write(16) -__gen11_fwtable_write(32) -__fwtable_write(8) -__fwtable_write(16) -__fwtable_write(32) -__gen8_write(8) -__gen8_write(16) -__gen8_write(32) -__gen6_write(8) -__gen6_write(16) -__gen6_write(32) -#undef __gen11_fwtable_write -#undef __fwtable_write -#undef __gen8_write -#undef __gen6_write +#define __gen_reg_write_funcs(func) \ +static enum forcewake_domains \ +func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ + return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ +} \ +\ +__gen_write(func, 8) \ +__gen_write(func, 16) \ +__gen_write(func, 32) + +__gen_reg_write_funcs(gen11_fwtable); +__gen_reg_write_funcs(fwtable); +__gen_reg_write_funcs(gen8); + +#undef __gen_reg_write_funcs #undef GEN6_WRITE_FOOTER #undef GEN6_WRITE_HEADER -#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \ +#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \ do { \ (uncore)->funcs.mmio_writeb = x##_write8; \ (uncore)->funcs.mmio_writew = x##_write16; \ (uncore)->funcs.mmio_writel = x##_write32; \ } while (0) -#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \ +#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \ do { \ (uncore)->funcs.mmio_readb = x##_read8; \ (uncore)->funcs.mmio_readw = x##_read16; \ @@ -1276,24 +1309,39 @@ do { \ (uncore)->funcs.mmio_readq = x##_read64; \ } while (0) +#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \ +do { \ + ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \ + (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \ +} while (0) + +#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \ +do { \ + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \ + (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \ +} while (0) -static void fw_domain_init(struct intel_uncore *uncore, - enum forcewake_domain_id domain_id, - i915_reg_t reg_set, - i915_reg_t reg_ack) +static int __fw_domain_init(struct intel_uncore *uncore, + enum forcewake_domain_id domain_id, + i915_reg_t reg_set, + i915_reg_t reg_ack) { struct intel_uncore_forcewake_domain *d; - if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) - return; + GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); + GEM_BUG_ON(uncore->fw_domain[domain_id]); - d = &uncore->fw_domain[domain_id]; + if (i915_inject_probe_failure()) + return -ENOMEM; - WARN_ON(d->wake_count); + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; WARN_ON(!i915_mmio_reg_valid(reg_set)); WARN_ON(!i915_mmio_reg_valid(reg_ack)); + d->uncore = uncore; d->wake_count = 0; d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set); d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack); @@ -1310,7 +1358,6 @@ static void fw_domain_init(struct intel_uncore *uncore, BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0)); BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1)); - d->mask = BIT(domain_id); hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); @@ -1319,6 +1366,10 @@ static void fw_domain_init(struct intel_uncore *uncore, uncore->fw_domains |= BIT(domain_id); fw_domain_reset(d); + + uncore->fw_domain[domain_id] = d; + + return 0; } static void fw_domain_fini(struct intel_uncore *uncore, @@ -1326,30 +1377,41 @@ static void fw_domain_fini(struct intel_uncore *uncore, { struct intel_uncore_forcewake_domain *d; - if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) - return; + GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); - d = &uncore->fw_domain[domain_id]; + d = fetch_and_zero(&uncore->fw_domain[domain_id]); + if (!d) + return; + uncore->fw_domains &= ~BIT(domain_id); WARN_ON(d->wake_count); WARN_ON(hrtimer_cancel(&d->timer)); - memset(d, 0, sizeof(*d)); + kfree(d); +} - uncore->fw_domains &= ~BIT(domain_id); +static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore) +{ + struct intel_uncore_forcewake_domain *d; + int tmp; + + for_each_fw_domain(d, uncore, tmp) + fw_domain_fini(uncore, d->id); } -static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) +static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) { - struct drm_i915_private *i915 = uncore_to_i915(uncore); + struct drm_i915_private *i915 = uncore->i915; + int ret = 0; - if (!intel_uncore_has_forcewake(uncore)) - return; + GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); + +#define fw_domain_init(uncore__, id__, set__, ack__) \ + (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__)))) if (INTEL_GEN(i915) >= 11) { int i; - uncore->funcs.force_wake_get = - fw_domains_get_with_fallback; + uncore->funcs.force_wake_get = fw_domains_get_with_fallback; uncore->funcs.force_wake_put = fw_domains_put; fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER_GEN9, @@ -1357,6 +1419,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, FORCEWAKE_BLITTER_GEN9, FORCEWAKE_ACK_BLITTER_GEN9); + for (i = 0; i < I915_MAX_VCS; i++) { if (!HAS_ENGINE(i915, _VCS(i))) continue; @@ -1374,8 +1437,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); } } else if (IS_GEN_RANGE(i915, 9, 10)) { - uncore->funcs.force_wake_get = - fw_domains_get_with_fallback; + uncore->funcs.force_wake_get = fw_domains_get_with_fallback; uncore->funcs.force_wake_put = fw_domains_put; fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER_GEN9, @@ -1424,8 +1486,10 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) __raw_uncore_write32(uncore, FORCEWAKE, 0); __raw_posting_read(uncore, ECOBUS); - fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, - FORCEWAKE_MT, FORCEWAKE_MT_ACK); + ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, + FORCEWAKE_MT, FORCEWAKE_MT_ACK); + if (ret) + goto out; spin_lock_irq(&uncore->lock); fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER); @@ -1436,6 +1500,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) if (!(ecobus & FORCEWAKE_MT_ENABLE)) { DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); DRM_INFO("when using vblank-synced partial screen updates.\n"); + fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER); fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE, FORCEWAKE_ACK); } @@ -1447,8 +1512,16 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) FORCEWAKE, FORCEWAKE_ACK); } +#undef fw_domain_init + /* All future platforms are expected to require complex power gating */ - WARN_ON(uncore->fw_domains == 0); + WARN_ON(!ret && uncore->fw_domains == 0); + +out: + if (ret) + intel_uncore_fw_domains_fini(uncore); + + return ret; } #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \ @@ -1493,7 +1566,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, static int uncore_mmio_setup(struct intel_uncore *uncore) { - struct drm_i915_private *i915 = uncore_to_i915(uncore); + struct drm_i915_private *i915 = uncore->i915; struct pci_dev *pdev = i915->drm.pdev; int mmio_bar; int mmio_size; @@ -1523,49 +1596,46 @@ static int uncore_mmio_setup(struct intel_uncore *uncore) static void uncore_mmio_cleanup(struct intel_uncore *uncore) { - struct drm_i915_private *i915 = uncore_to_i915(uncore); - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = uncore->i915->drm.pdev; pci_iounmap(pdev, uncore->regs); } -void intel_uncore_init_early(struct intel_uncore *uncore) +void intel_uncore_init_early(struct intel_uncore *uncore, + struct drm_i915_private *i915) { spin_lock_init(&uncore->lock); + uncore->i915 = i915; + uncore->rpm = &i915->runtime_pm; } -int intel_uncore_init_mmio(struct intel_uncore *uncore) +static void uncore_raw_init(struct intel_uncore *uncore) { - struct drm_i915_private *i915 = uncore_to_i915(uncore); - int ret; + GEM_BUG_ON(intel_uncore_has_forcewake(uncore)); - ret = uncore_mmio_setup(uncore); - if (ret) - return ret; - - i915_check_vgpu(i915); + if (IS_GEN(uncore->i915, 5)) { + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5); + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5); + } else { + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2); + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2); + } +} - if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) - uncore->flags |= UNCORE_HAS_FORCEWAKE; +static int uncore_forcewake_init(struct intel_uncore *uncore) +{ + struct drm_i915_private *i915 = uncore->i915; + int ret; - intel_uncore_fw_domains_init(uncore); - __intel_uncore_early_sanitize(uncore, 0); + GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); - uncore->unclaimed_mmio_check = 1; - uncore->pmic_bus_access_nb.notifier_call = - i915_pmic_bus_access_notifier; + ret = intel_uncore_fw_domains_init(uncore); + if (ret) + return ret; - uncore->rpm = &i915->runtime_pm; + forcewake_early_sanitize(uncore, 0); - if (!intel_uncore_has_forcewake(uncore)) { - if (IS_GEN(i915, 5)) { - ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5); - ASSIGN_READ_MMIO_VFUNCS(uncore, gen5); - } else { - ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2); - ASSIGN_READ_MMIO_VFUNCS(uncore, gen2); - } - } else if (IS_GEN_RANGE(i915, 6, 7)) { + if (IS_GEN_RANGE(i915, 6, 7)) { ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); if (IS_VALLEYVIEW(i915)) { @@ -1579,7 +1649,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges); ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); - } else { ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8); ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); @@ -1594,6 +1663,40 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); } + uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; + iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); + + return 0; +} + +int intel_uncore_init_mmio(struct intel_uncore *uncore) +{ + struct drm_i915_private *i915 = uncore->i915; + int ret; + + ret = uncore_mmio_setup(uncore); + if (ret) + return ret; + + if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) + uncore->flags |= UNCORE_HAS_FORCEWAKE; + + uncore->unclaimed_mmio_check = 1; + + if (!intel_uncore_has_forcewake(uncore)) { + uncore_raw_init(uncore); + } else { + ret = uncore_forcewake_init(uncore); + if (ret) + goto out_mmio_cleanup; + } + + /* make sure fw funcs are set if and only if we have fw*/ + GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get); + GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put); + GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains); + GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains); + if (HAS_FPGA_DBG_UNCLAIMED(i915)) uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED; @@ -1603,9 +1706,16 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) if (IS_GEN_RANGE(i915, 6, 7)) uncore->flags |= UNCORE_HAS_FIFO; - iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); + /* clear out unclaimed reg detection bit */ + if (check_for_unclaimed_mmio(uncore)) + DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); return 0; + +out_mmio_cleanup: + uncore_mmio_cleanup(uncore); + + return ret; } /* @@ -1615,45 +1725,46 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) */ void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore) { - struct drm_i915_private *i915 = uncore_to_i915(uncore); + struct drm_i915_private *i915 = uncore->i915; + enum forcewake_domains fw_domains = uncore->fw_domains; + enum forcewake_domain_id domain_id; + int i; - if (INTEL_GEN(i915) >= 11) { - enum forcewake_domains fw_domains = uncore->fw_domains; - enum forcewake_domain_id domain_id; - int i; + if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11) + return; - for (i = 0; i < I915_MAX_VCS; i++) { - domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; + for (i = 0; i < I915_MAX_VCS; i++) { + domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; - if (HAS_ENGINE(i915, _VCS(i))) - continue; + if (HAS_ENGINE(i915, _VCS(i))) + continue; - if (fw_domains & BIT(domain_id)) - fw_domain_fini(uncore, domain_id); - } + if (fw_domains & BIT(domain_id)) + fw_domain_fini(uncore, domain_id); + } - for (i = 0; i < I915_MAX_VECS; i++) { - domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; + for (i = 0; i < I915_MAX_VECS; i++) { + domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; - if (HAS_ENGINE(i915, _VECS(i))) - continue; + if (HAS_ENGINE(i915, _VECS(i))) + continue; - if (fw_domains & BIT(domain_id)) - fw_domain_fini(uncore, domain_id); - } + if (fw_domains & BIT(domain_id)) + fw_domain_fini(uncore, domain_id); } } void intel_uncore_fini_mmio(struct intel_uncore *uncore) { - /* Paranoia: make sure we have disabled everything before we exit. */ - intel_uncore_sanitize(uncore_to_i915(uncore)); + if (intel_uncore_has_forcewake(uncore)) { + iosf_mbi_punit_acquire(); + iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( + &uncore->pmic_bus_access_nb); + intel_uncore_forcewake_reset(uncore); + intel_uncore_fw_domains_fini(uncore); + iosf_mbi_punit_release(); + } - iosf_mbi_punit_acquire(); - iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( - &uncore->pmic_bus_access_nb); - intel_uncore_forcewake_reset(uncore); - iosf_mbi_punit_release(); uncore_mmio_cleanup(uncore); } @@ -1871,62 +1982,6 @@ out: return ret; } -static enum forcewake_domains -intel_uncore_forcewake_for_read(struct intel_uncore *uncore, - i915_reg_t reg) -{ - struct drm_i915_private *i915 = uncore_to_i915(uncore); - u32 offset = i915_mmio_reg_offset(reg); - enum forcewake_domains fw_domains; - - if (INTEL_GEN(i915) >= 11) { - fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset); - } else if (HAS_FWTABLE(i915)) { - fw_domains = __fwtable_reg_read_fw_domains(uncore, offset); - } else if (INTEL_GEN(i915) >= 6) { - fw_domains = __gen6_reg_read_fw_domains(uncore, offset); - } else { - /* on devices with FW we expect to hit one of the above cases */ - if (intel_uncore_has_forcewake(uncore)) - MISSING_CASE(INTEL_GEN(i915)); - - fw_domains = 0; - } - - WARN_ON(fw_domains & ~uncore->fw_domains); - - return fw_domains; -} - -static enum forcewake_domains -intel_uncore_forcewake_for_write(struct intel_uncore *uncore, - i915_reg_t reg) -{ - struct drm_i915_private *i915 = uncore_to_i915(uncore); - u32 offset = i915_mmio_reg_offset(reg); - enum forcewake_domains fw_domains; - - if (INTEL_GEN(i915) >= 11) { - fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset); - } else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) { - fw_domains = __fwtable_reg_write_fw_domains(uncore, offset); - } else if (IS_GEN(i915, 8)) { - fw_domains = __gen8_reg_write_fw_domains(uncore, offset); - } else if (IS_GEN_RANGE(i915, 6, 7)) { - fw_domains = FORCEWAKE_RENDER; - } else { - /* on devices with FW we expect to hit one of the above cases */ - if (intel_uncore_has_forcewake(uncore)) - MISSING_CASE(INTEL_GEN(i915)); - - fw_domains = 0; - } - - WARN_ON(fw_domains & ~uncore->fw_domains); - - return fw_domains; -} - /** * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access * a register @@ -1953,10 +2008,12 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, return 0; if (op & FW_REG_READ) - fw_domains = intel_uncore_forcewake_for_read(uncore, reg); + fw_domains = uncore->funcs.read_fw_domains(uncore, reg); if (op & FW_REG_WRITE) - fw_domains |= intel_uncore_forcewake_for_write(uncore, reg); + fw_domains |= uncore->funcs.write_fw_domains(uncore, reg); + + WARN_ON(fw_domains & ~uncore->fw_domains); return fw_domains; } diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 804a0faacc91..2f6ffa309669 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -70,6 +70,11 @@ struct intel_uncore_funcs { void (*force_wake_put)(struct intel_uncore *uncore, enum forcewake_domains domains); + enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore, + i915_reg_t r); + enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore, + i915_reg_t r); + u8 (*mmio_readb)(struct intel_uncore *uncore, i915_reg_t r, bool trace); u16 (*mmio_readw)(struct intel_uncore *uncore, @@ -97,6 +102,7 @@ struct intel_forcewake_range { struct intel_uncore { void __iomem *regs; + struct drm_i915_private *i915; struct intel_runtime_pm *rpm; spinlock_t lock; /** lock is also taken in irq contexts. */ @@ -117,9 +123,11 @@ struct intel_uncore { enum forcewake_domains fw_domains; enum forcewake_domains fw_domains_active; + enum forcewake_domains fw_domains_timer; enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */ struct intel_uncore_forcewake_domain { + struct intel_uncore *uncore; enum forcewake_domain_id id; enum forcewake_domains mask; unsigned int wake_count; @@ -127,7 +135,7 @@ struct intel_uncore { struct hrtimer timer; u32 __iomem *reg_set; u32 __iomem *reg_ack; - } fw_domain[FW_DOMAIN_ID_COUNT]; + } *fw_domain[FW_DOMAIN_ID_COUNT]; struct { unsigned int count; @@ -141,18 +149,12 @@ struct intel_uncore { /* Iterate over initialised fw domains */ #define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \ - for (tmp__ = (mask__); \ - tmp__ ? (domain__ = &(uncore__)->fw_domain[__mask_next_bit(tmp__)]), 1 : 0;) + for (tmp__ = (mask__); tmp__ ;) \ + for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)]) #define for_each_fw_domain(domain__, uncore__, tmp__) \ for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__) -static inline struct intel_uncore * -forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d) -{ - return container_of(d, struct intel_uncore, fw_domain[d->id]); -} - static inline bool intel_uncore_has_forcewake(const struct intel_uncore *uncore) { @@ -177,8 +179,8 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore) return uncore->flags & UNCORE_HAS_FIFO; } -void intel_uncore_sanitize(struct drm_i915_private *dev_priv); -void intel_uncore_init_early(struct intel_uncore *uncore); +void intel_uncore_init_early(struct intel_uncore *uncore, + struct drm_i915_private *i915); int intel_uncore_init_mmio(struct intel_uncore *uncore); void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore); bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index 3db6fa682823..06bd8b215cc2 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -5,7 +5,7 @@ */ #include "intel_runtime_pm.h" -#include "i915_gem.h" +#include "intel_wakeref.h" static void rpm_get(struct intel_runtime_pm *rpm, struct intel_wakeref *wf) { @@ -17,7 +17,7 @@ static void rpm_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf) intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref); intel_runtime_pm_put(rpm, wakeref); - GEM_BUG_ON(!wakeref); + INTEL_WAKEREF_BUG_ON(!wakeref); } int __intel_wakeref_get_first(struct intel_runtime_pm *rpm, @@ -48,6 +48,7 @@ int __intel_wakeref_get_first(struct intel_runtime_pm *rpm, atomic_inc(&wf->count); mutex_unlock(&wf->mutex); + INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); return 0; } @@ -115,7 +116,7 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) if (!refcount_inc_not_zero(&wf->count)) { spin_lock_irqsave(&wf->lock, flags); if (!refcount_inc_not_zero(&wf->count)) { - GEM_BUG_ON(wf->wakeref); + INTEL_WAKEREF_BUG_ON(wf->wakeref); wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm); refcount_set(&wf->count, 1); } @@ -134,5 +135,5 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf) { intel_wakeref_auto(wf, 0); - GEM_BUG_ON(wf->wakeref); + INTEL_WAKEREF_BUG_ON(wf->wakeref); } diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 9cbb2ebf575b..1d6f5986e4e5 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -13,6 +13,12 @@ #include <linux/stackdepot.h> #include <linux/timer.h> +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) +#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr) +#else +#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) +#endif + struct intel_runtime_pm; typedef depot_stack_handle_t intel_wakeref_t; @@ -66,6 +72,21 @@ intel_wakeref_get(struct intel_runtime_pm *rpm, } /** + * intel_wakeref_get_if_in_use: Acquire the wakeref + * @wf: the wakeref + * + * Acquire a hold on the wakeref, but only if the wakeref is already + * active. + * + * Returns: true if the wakeref was acquired, false otherwise. + */ +static inline bool +intel_wakeref_get_if_active(struct intel_wakeref *wf) +{ + return atomic_inc_not_zero(&wf->count); +} + +/** * intel_wakeref_put: Release the wakeref * @i915: the drm_i915_private device * @wf: the wakeref @@ -86,6 +107,7 @@ intel_wakeref_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf, int (*fn)(struct intel_wakeref *wf)) { + INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex)) return __intel_wakeref_put_last(rpm, wf, fn); @@ -121,13 +143,13 @@ intel_wakeref_unlock(struct intel_wakeref *wf) } /** - * intel_wakeref_active: Query whether the wakeref is currently held + * intel_wakeref_is_active: Query whether the wakeref is currently held * @wf: the wakeref * * Returns: true if the wakeref is currently held. */ static inline bool -intel_wakeref_active(struct intel_wakeref *wf) +intel_wakeref_is_active(const struct intel_wakeref *wf) { return READ_ONCE(wf->wakeref); } diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 7b4ba84b9fb8..0e86a9e85b49 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -74,7 +74,7 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm) { struct drm_i915_private *i915 = wopcm_to_i915(wopcm); - if (!HAS_GUC(i915)) + if (!HAS_GT_UC(i915)) return; if (INTEL_GEN(i915) >= 11) @@ -164,8 +164,8 @@ static inline int check_hw_restriction(struct drm_i915_private *i915, int intel_wopcm_init(struct intel_wopcm *wopcm) { struct drm_i915_private *i915 = wopcm_to_i915(wopcm); - u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->guc.fw); - u32 huc_fw_size = intel_uc_fw_get_upload_size(&i915->huc.fw); + u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->gt.uc.guc.fw); + u32 huc_fw_size = intel_uc_fw_get_upload_size(&i915->gt.uc.huc.fw); u32 ctx_rsvd = context_reserved_size(i915); u32 guc_wopcm_base; u32 guc_wopcm_size; @@ -177,7 +177,7 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) GEM_BUG_ON(!wopcm->size); - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -E2BIG; if (guc_fw_size >= wopcm->size) { @@ -225,17 +225,18 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) return 0; } -static inline int write_and_verify(struct drm_i915_private *dev_priv, - i915_reg_t reg, u32 val, u32 mask, - u32 locked_bit) +static int +write_and_verify(struct intel_gt *gt, + i915_reg_t reg, u32 val, u32 mask, u32 locked_bit) { + struct intel_uncore *uncore = gt->uncore; u32 reg_val; GEM_BUG_ON(val & ~mask); - I915_WRITE(reg, val); + intel_uncore_write(uncore, reg, val); - reg_val = I915_READ(reg); + reg_val = intel_uncore_read(uncore, reg); return (reg_val & mask) != (val | locked_bit) ? -EIO : 0; } @@ -243,6 +244,7 @@ static inline int write_and_verify(struct drm_i915_private *dev_priv, /** * intel_wopcm_init_hw() - Setup GuC WOPCM registers. * @wopcm: pointer to intel_wopcm. + * @gt: pointer to the containing GT * * Setup the GuC WOPCM size and offset registers with the calculated values. It * will verify the register values to make sure the registers are locked with @@ -250,29 +252,30 @@ static inline int write_and_verify(struct drm_i915_private *dev_priv, * * Return: 0 on success. -EIO if registers were locked with incorrect values. */ -int intel_wopcm_init_hw(struct intel_wopcm *wopcm) +int intel_wopcm_init_hw(struct intel_wopcm *wopcm, struct intel_gt *gt) { - struct drm_i915_private *dev_priv = wopcm_to_i915(wopcm); + struct drm_i915_private *i915 = wopcm_to_i915(wopcm); + struct intel_uncore *uncore = gt->uncore; u32 huc_agent; u32 mask; int err; - if (!USES_GUC(dev_priv)) + if (!USES_GUC(i915)) return 0; - GEM_BUG_ON(!HAS_GUC(dev_priv)); + GEM_BUG_ON(!HAS_GT_UC(i915)); GEM_BUG_ON(!wopcm->guc.size); GEM_BUG_ON(!wopcm->guc.base); - err = write_and_verify(dev_priv, GUC_WOPCM_SIZE, wopcm->guc.size, + err = write_and_verify(gt, GUC_WOPCM_SIZE, wopcm->guc.size, GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED, GUC_WOPCM_SIZE_LOCKED); if (err) goto err_out; - huc_agent = USES_HUC(dev_priv) ? HUC_LOADING_AGENT_GUC : 0; + huc_agent = USES_HUC(i915) ? HUC_LOADING_AGENT_GUC : 0; mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent; - err = write_and_verify(dev_priv, DMA_GUC_WOPCM_OFFSET, + err = write_and_verify(gt, DMA_GUC_WOPCM_OFFSET, wopcm->guc.base | huc_agent, mask, GUC_WOPCM_OFFSET_VALID); if (err) @@ -283,8 +286,9 @@ int intel_wopcm_init_hw(struct intel_wopcm *wopcm) err_out: DRM_ERROR("Failed to init WOPCM registers:\n"); DRM_ERROR("DMA_GUC_WOPCM_OFFSET=%#x\n", - I915_READ(DMA_GUC_WOPCM_OFFSET)); - DRM_ERROR("GUC_WOPCM_SIZE=%#x\n", I915_READ(GUC_WOPCM_SIZE)); + intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET)); + DRM_ERROR("GUC_WOPCM_SIZE=%#x\n", + intel_uncore_read(uncore, GUC_WOPCM_SIZE)); return err; } diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h index 114401971520..56aaed4d64ff 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.h +++ b/drivers/gpu/drm/i915/intel_wopcm.h @@ -9,6 +9,8 @@ #include <linux/types.h> +struct intel_gt; + /** * struct intel_wopcm - Overall WOPCM info and WOPCM regions. * @size: Size of overall WOPCM. @@ -41,6 +43,6 @@ static inline u32 intel_wopcm_guc_size(struct intel_wopcm *wopcm) void intel_wopcm_init_early(struct intel_wopcm *wopcm); int intel_wopcm_init(struct intel_wopcm *wopcm); -int intel_wopcm_init_hw(struct intel_wopcm *wopcm); +int intel_wopcm_init_hw(struct intel_wopcm *wopcm, struct intel_gt *gt); #endif diff --git a/drivers/gpu/drm/i915/oa/Makefile b/drivers/gpu/drm/i915/oa/Makefile new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/Makefile diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c index 4acdb94555b7..4acdb94555b7 100644 --- a/drivers/gpu/drm/i915/i915_oa_bdw.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h index 0e667f1a8aa1..b5ed68882588 100644 --- a/drivers/gpu/drm/i915/i915_oa_bdw.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_BDW_H__ #define __I915_OA_BDW_H__ -extern void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c index a44195c39923..a44195c39923 100644 --- a/drivers/gpu/drm/i915/i915_oa_bxt.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h index 679e92cf4f1d..43c3e4ab030a 100644 --- a/drivers/gpu/drm/i915/i915_oa_bxt.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_BXT_H__ #define __I915_OA_BXT_H__ -extern void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c index 7f60d51b8761..7f60d51b8761 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h index 4d6025559bbe..1b4b563bc585 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt2.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_CFLGT2_H__ #define __I915_OA_CFLGT2_H__ -extern void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c index a92c38e3a0ce..a92c38e3a0ce 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h index 0697f4077402..500565e055cd 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt3.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_CFLGT3_H__ #define __I915_OA_CFLGT3_H__ -extern void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/oa/i915_oa_chv.c index 71ec889a0114..71ec889a0114 100644 --- a/drivers/gpu/drm/i915/i915_oa_chv.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.c diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/oa/i915_oa_chv.h index 0986eae3135f..ad85d6a6a573 100644 --- a/drivers/gpu/drm/i915/i915_oa_chv.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_CHV_H__ #define __I915_OA_CHV_H__ -extern void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c index 5c23d883d6c9..5c23d883d6c9 100644 --- a/drivers/gpu/drm/i915/i915_oa_cnl.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h index e830a406aff2..9faaca38b587 100644 --- a/drivers/gpu/drm/i915/i915_oa_cnl.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_CNL_H__ #define __I915_OA_CNL_H__ -extern void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/oa/i915_oa_glk.c index 4bdda66df7d2..4bdda66df7d2 100644 --- a/drivers/gpu/drm/i915/i915_oa_glk.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.c diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/oa/i915_oa_glk.h index 06dedf991edb..cc13a1e9fd3e 100644 --- a/drivers/gpu/drm/i915/i915_oa_glk.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_GLK_H__ #define __I915_OA_GLK_H__ -extern void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c index cc6526fdd2bd..cc6526fdd2bd 100644 --- a/drivers/gpu/drm/i915/i915_oa_hsw.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h index 3d0c870cd0bd..f0ddcc79c761 100644 --- a/drivers/gpu/drm/i915/i915_oa_hsw.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_HSW_H__ #define __I915_OA_HSW_H__ -extern void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/oa/i915_oa_icl.c index baa51427a543..baa51427a543 100644 --- a/drivers/gpu/drm/i915/i915_oa_icl.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.c diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/oa/i915_oa_icl.h index 24eaa97d61ba..e501651d385b 100644 --- a/drivers/gpu/drm/i915/i915_oa_icl.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_ICL_H__ #define __I915_OA_ICL_H__ -extern void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c index 168e49ab0d4d..168e49ab0d4d 100644 --- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h index a55398a904de..dc460e6e0fae 100644 --- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_KBLGT2_H__ #define __I915_OA_KBLGT2_H__ -extern void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c index 6ffa553c388e..6ffa553c388e 100644 --- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h index 3ddd3483b7cc..5926992b735a 100644 --- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_KBLGT3_H__ #define __I915_OA_KBLGT3_H__ -extern void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c index 7ce6ee851d43..7ce6ee851d43 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h index be6256037239..353db35b36c1 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_SKLGT2_H__ #define __I915_OA_SKLGT2_H__ -extern void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c index 086ca2631e1c..086ca2631e1c 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h index 650beb068e56..52f94c674b62 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_SKLGT3_H__ #define __I915_OA_SKLGT3_H__ -extern void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c index b291a6eb8a87..b291a6eb8a87 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h index 8dcf849d131e..8e364820cc63 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_SKLGT4_H__ #define __I915_OA_SKLGT4_H__ -extern void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index c0b3537a5fa6..e5cd5d47e380 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -4,7 +4,10 @@ * Copyright © 2018 Intel Corporation */ +#include <linux/kref.h> + #include "gem/i915_gem_pm.h" +#include "gt/intel_gt.h" #include "i915_selftest.h" @@ -13,37 +16,86 @@ struct live_active { struct i915_active base; + struct kref ref; bool retired; }; -static void __live_active_retire(struct i915_active *base) +static void __live_get(struct live_active *active) +{ + kref_get(&active->ref); +} + +static void __live_free(struct live_active *active) +{ + i915_active_fini(&active->base); + kfree(active); +} + +static void __live_release(struct kref *ref) +{ + struct live_active *active = container_of(ref, typeof(*active), ref); + + __live_free(active); +} + +static void __live_put(struct live_active *active) +{ + kref_put(&active->ref, __live_release); +} + +static int __live_active(struct i915_active *base) +{ + struct live_active *active = container_of(base, typeof(*active), base); + + __live_get(active); + return 0; +} + +static void __live_retire(struct i915_active *base) { struct live_active *active = container_of(base, typeof(*active), base); active->retired = true; + __live_put(active); +} + +static struct live_active *__live_alloc(struct drm_i915_private *i915) +{ + struct live_active *active; + + active = kzalloc(sizeof(*active), GFP_KERNEL); + if (!active) + return NULL; + + kref_init(&active->ref); + i915_active_init(i915, &active->base, __live_active, __live_retire); + + return active; } -static int __live_active_setup(struct drm_i915_private *i915, - struct live_active *active) +static struct live_active * +__live_active_setup(struct drm_i915_private *i915) { struct intel_engine_cs *engine; struct i915_sw_fence *submit; + struct live_active *active; enum intel_engine_id id; unsigned int count = 0; int err = 0; - submit = heap_fence_create(GFP_KERNEL); - if (!submit) - return -ENOMEM; + active = __live_alloc(i915); + if (!active) + return ERR_PTR(-ENOMEM); - i915_active_init(i915, &active->base, __live_active_retire); - active->retired = false; + submit = heap_fence_create(GFP_KERNEL); + if (!submit) { + kfree(active); + return ERR_PTR(-ENOMEM); + } - if (!i915_active_acquire(&active->base)) { - pr_err("First i915_active_acquire should report being idle\n"); - err = -EINVAL; + err = i915_active_acquire(&active->base); + if (err) goto out; - } for_each_engine(engine, i915, id) { struct i915_request *rq; @@ -74,74 +126,92 @@ static int __live_active_setup(struct drm_i915_private *i915, pr_err("i915_active retired before submission!\n"); err = -EINVAL; } - if (active->base.count != count) { + if (atomic_read(&active->base.count) != count) { pr_err("i915_active not tracking all requests, found %d, expected %d\n", - active->base.count, count); + atomic_read(&active->base.count), count); err = -EINVAL; } out: i915_sw_fence_commit(submit); heap_fence_put(submit); + if (err) { + __live_put(active); + active = ERR_PTR(err); + } - return err; + return active; } static int live_active_wait(void *arg) { struct drm_i915_private *i915 = arg; - struct live_active active; + struct live_active *active; intel_wakeref_t wakeref; - int err; + int err = 0; /* Check that we get a callback when requests retire upon waiting */ mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); - err = __live_active_setup(i915, &active); + active = __live_active_setup(i915); + if (IS_ERR(active)) { + err = PTR_ERR(active); + goto err; + } - i915_active_wait(&active.base); - if (!active.retired) { + i915_active_wait(&active->base); + if (!active->retired) { pr_err("i915_active not retired after waiting!\n"); err = -EINVAL; } - i915_active_fini(&active.base); + __live_put(active); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; +err: intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); + return err; } static int live_active_retire(void *arg) { struct drm_i915_private *i915 = arg; - struct live_active active; + struct live_active *active; intel_wakeref_t wakeref; - int err; + int err = 0; /* Check that we get a callback when requests are indirectly retired */ mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); - err = __live_active_setup(i915, &active); + active = __live_active_setup(i915); + if (IS_ERR(active)) { + err = PTR_ERR(active); + goto err; + } /* waits for & retires all requests */ if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - if (!active.retired) { + if (!active->retired) { pr_err("i915_active not retired after flushing!\n"); err = -EINVAL; } - i915_active_fini(&active.base); + __live_put(active); + +err: intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); + return err; } @@ -152,7 +222,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915) SUBTEST(live_active_retire), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index c6a01a6e87f1..bb6dd54a6ff3 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -8,6 +8,7 @@ #include "gem/selftests/igt_gem_utils.h" #include "gem/selftests/mock_context.h" +#include "gt/intel_gt.h" #include "i915_selftest.h" @@ -115,7 +116,7 @@ static void pm_resume(struct drm_i915_private *i915) * that runtime-pm just works. */ with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - intel_gt_sanitize(i915, false); + intel_gt_sanitize(&i915->gt, false); i915_gem_sanitize(i915); i915_gem_resume(i915); } @@ -154,8 +155,6 @@ static int igt_gem_suspend(void *arg) mutex_lock(&i915->drm.struct_mutex); err = switch_to_context(i915, ctx); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; mutex_unlock(&i915->drm.struct_mutex); out: mock_file_free(i915, file); @@ -195,8 +194,6 @@ static int igt_gem_hibernate(void *arg) mutex_lock(&i915->drm.struct_mutex); err = switch_to_context(i915, ctx); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; mutex_unlock(&i915->drm.struct_mutex); out: mock_file_free(i915, file); @@ -210,8 +207,8 @@ int i915_gem_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_gem_hibernate), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; - return i915_subtests(tests, i915); + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index a3cb0aade6f1..b6449d0a8c17 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -25,6 +25,7 @@ #include "gem/i915_gem_pm.h" #include "gem/selftests/igt_gem_utils.h" #include "gem/selftests/mock_context.h" +#include "gt/intel_gt.h" #include "i915_selftest.h" @@ -557,7 +558,7 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_evict_contexts), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 1a60b9fe8221..31a51ca1ddcb 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -208,9 +208,7 @@ static int igt_ppgtt_alloc(void *arg) } err_ppgtt_cleanup: - mutex_lock(&dev_priv->drm.struct_mutex); i915_vm_put(&ppgtt->vm); - mutex_unlock(&dev_priv->drm.struct_mutex); return err; } @@ -1195,7 +1193,7 @@ static int igt_ggtt_page(void *arg) iowrite32(n, vaddr + n); io_mapping_unmap_atomic(vaddr); } - i915_gem_flush_ggtt_writes(i915); + intel_gt_flush_ggtt_writes(ggtt->vm.gt); i915_random_reorder(order, count, &prng); for (n = 0; n < count; n++) { diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index d5dc4427d664..2b31a4ee0b4c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -12,7 +12,7 @@ selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */ selftest(uncore, intel_uncore_live_selftests) selftest(workarounds, intel_workarounds_live_selftests) -selftest(timelines, i915_timeline_live_selftests) +selftest(timelines, intel_timeline_live_selftests) selftest(requests, i915_request_live_selftests) selftest(active, i915_active_live_selftests) selftest(objects, i915_gem_object_live_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index 510eb176bb2c..b55da4d9ccba 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -15,7 +15,7 @@ selftest(scatterlist, scatterlist_mock_selftests) selftest(syncmap, i915_syncmap_mock_selftests) selftest(uncore, intel_uncore_mock_selftests) selftest(engine, intel_engine_cs_mock_selftests) -selftest(timelines, i915_timeline_mock_selftests) +selftest(timelines, intel_timeline_mock_selftests) selftest(requests, i915_request_mock_selftests) selftest(objects, i915_gem_object_mock_selftests) selftest(phys, i915_gem_phys_mock_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 298bb7116c51..86c299663934 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -27,6 +27,8 @@ #include "gem/i915_gem_pm.h" #include "gem/selftests/mock_context.h" +#include "gt/intel_gt.h" + #include "i915_random.h" #include "i915_selftest.h" #include "igt_live_test.h" @@ -73,55 +75,58 @@ static int igt_wait_request(void *arg) err = -ENOMEM; goto out_unlock; } + i915_request_get(request); if (i915_request_wait(request, 0, 0) != -ETIME) { pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n"); - goto out_unlock; + goto out_request; } if (i915_request_wait(request, 0, T) != -ETIME) { pr_err("request wait succeeded (expected timeout before submit!)\n"); - goto out_unlock; + goto out_request; } if (i915_request_completed(request)) { pr_err("request completed before submit!!\n"); - goto out_unlock; + goto out_request; } i915_request_add(request); if (i915_request_wait(request, 0, 0) != -ETIME) { pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n"); - goto out_unlock; + goto out_request; } if (i915_request_completed(request)) { pr_err("request completed immediately!\n"); - goto out_unlock; + goto out_request; } if (i915_request_wait(request, 0, T / 2) != -ETIME) { pr_err("request wait succeeded (expected timeout!)\n"); - goto out_unlock; + goto out_request; } if (i915_request_wait(request, 0, T) == -ETIME) { pr_err("request wait timed out!\n"); - goto out_unlock; + goto out_request; } if (!i915_request_completed(request)) { pr_err("request not complete after waiting!\n"); - goto out_unlock; + goto out_request; } if (i915_request_wait(request, 0, T) == -ETIME) { pr_err("request wait timed out when already complete!\n"); - goto out_unlock; + goto out_request; } err = 0; +out_request: + i915_request_put(request); out_unlock: mock_device_flush(i915); mutex_unlock(&i915->drm.struct_mutex); @@ -366,14 +371,16 @@ static int __igt_breadcrumbs_smoketest(void *arg) if (!wait_event_timeout(wait->wait, i915_sw_fence_done(wait), - HZ / 2)) { + 5 * HZ)) { struct i915_request *rq = requests[count - 1]; - pr_err("waiting for %d fences (last %llx:%lld) on %s timed out!\n", - count, + pr_err("waiting for %d/%d fences (last %llx:%lld) on %s timed out!\n", + atomic_read(&wait->pending), count, rq->fence.context, rq->fence.seqno, t->engine->name); - i915_gem_set_wedged(t->engine->i915); + GEM_TRACE_DUMP(); + + intel_gt_set_wedged(t->engine->gt); GEM_BUG_ON(!i915_request_completed(rq)); i915_sw_fence_wait(wait); err = -EIO; @@ -622,7 +629,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - i915_gem_chipset_flush(i915); + intel_gt_chipset_flush(&i915->gt); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { @@ -791,7 +798,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - i915_gem_chipset_flush(i915); + intel_gt_chipset_flush(&i915->gt); return vma; @@ -809,7 +816,7 @@ static int recursive_batch_resolve(struct i915_vma *batch) return PTR_ERR(cmd); *cmd = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(batch->vm->i915); + intel_gt_chipset_flush(batch->vm->gt); i915_gem_object_unpin_map(batch->obj); @@ -1031,7 +1038,7 @@ out_request: I915_MAP_WC); if (!IS_ERR(cmd)) { *cmd = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); + intel_gt_chipset_flush(engine->gt); i915_gem_object_unpin_map(request[id]->batch->obj); } @@ -1227,7 +1234,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_breadcrumbs_smoketest), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index b18eaefef798..db9c645bbdfe 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -26,6 +26,8 @@ #include "../i915_drv.h" #include "../i915_selftest.h" +#include "igt_flush_test.h" + struct i915_selftest i915_selftest __read_mostly = { .timeout_ms = 1000, }; @@ -240,7 +242,61 @@ static bool apply_subtest_filter(const char *caller, const char *name) return result; } +int __i915_nop_setup(void *data) +{ + return 0; +} + +int __i915_nop_teardown(int err, void *data) +{ + return err; +} + +int __i915_live_setup(void *data) +{ + struct drm_i915_private *i915 = data; + + return intel_gt_terminally_wedged(&i915->gt); +} + +int __i915_live_teardown(int err, void *data) +{ + struct drm_i915_private *i915 = data; + + mutex_lock(&i915->drm.struct_mutex); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + + i915_gem_drain_freed_objects(i915); + + return err; +} + +int __intel_gt_live_setup(void *data) +{ + struct intel_gt *gt = data; + + return intel_gt_terminally_wedged(gt); +} + +int __intel_gt_live_teardown(int err, void *data) +{ + struct intel_gt *gt = data; + + mutex_lock(>->i915->drm.struct_mutex); + if (igt_flush_test(gt->i915, I915_WAIT_LOCKED)) + err = -EIO; + mutex_unlock(>->i915->drm.struct_mutex); + + i915_gem_drain_freed_objects(gt->i915); + + return err; +} + int __i915_subtests(const char *caller, + int (*setup)(void *data), + int (*teardown)(int err, void *data), const struct i915_subtest *st, unsigned int count, void *data) @@ -255,10 +311,17 @@ int __i915_subtests(const char *caller, if (!apply_subtest_filter(caller, st->name)) continue; + err = setup(data); + if (err) { + pr_err(DRIVER_NAME "/%s: setup failed for %s\n", + caller, st->name); + return err; + } + pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name); GEM_TRACE("Running %s/%s\n", caller, st->name); - err = st->func(data); + err = teardown(st->func(data), data); if (err && err != -EINTR) { pr_err(DRIVER_NAME "/%s: %s failed with error %d\n", caller, st->name, err); diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index fbc79b14823a..a5bec0a4cdcc 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -193,6 +193,8 @@ static int igt_vma_create(void *arg) list_del_init(&ctx->link); mock_context_close(ctx); } + + cond_resched(); } end: @@ -341,6 +343,8 @@ static int igt_vma_pin1(void *arg) goto out; } } + + cond_resched(); } err = 0; @@ -597,6 +601,8 @@ static int igt_vma_rotate_remap(void *arg) } i915_vma_unpin(vma); + + cond_resched(); } } } @@ -752,6 +758,8 @@ static int igt_vma_partial(void *arg) i915_vma_unpin(vma); nvma++; + + cond_resched(); } } @@ -961,6 +969,8 @@ static int igt_vma_remapped_gtt(void *arg) } } i915_vma_unpin_iomap(vma); + + cond_resched(); } } diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index 5bfd1b2626a2..d3b5eb402d33 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c @@ -5,6 +5,7 @@ */ #include "gem/i915_gem_context.h" +#include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_selftest.h" @@ -13,7 +14,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) { - int ret = i915_terminally_wedged(i915) ? -EIO : 0; + int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0; int repeat = !!(flags & I915_WAIT_LOCKED); cond_resched(); @@ -27,7 +28,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) __builtin_return_address(0)); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); repeat = 0; ret = -EIO; } diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c index 587df6fd4ffe..7ec8f8b049c6 100644 --- a/drivers/gpu/drm/i915/selftests/igt_reset.c +++ b/drivers/gpu/drm/i915/selftests/igt_reset.c @@ -7,47 +7,45 @@ #include "igt_reset.h" #include "gt/intel_engine.h" +#include "gt/intel_gt.h" #include "../i915_drv.h" -void igt_global_reset_lock(struct drm_i915_private *i915) +void igt_global_reset_lock(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; - pr_debug("%s: current gpu_error=%08lx\n", - __func__, i915->gpu_error.flags); + pr_debug("%s: current gpu_error=%08lx\n", __func__, gt->reset.flags); - while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) - wait_event(i915->gpu_error.reset_queue, - !test_bit(I915_RESET_BACKOFF, - &i915->gpu_error.flags)); + while (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) + wait_event(gt->reset.queue, + !test_bit(I915_RESET_BACKOFF, >->reset.flags)); - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { while (test_and_set_bit(I915_RESET_ENGINE + id, - &i915->gpu_error.flags)) - wait_on_bit(&i915->gpu_error.flags, - I915_RESET_ENGINE + id, + >->reset.flags)) + wait_on_bit(>->reset.flags, I915_RESET_ENGINE + id, TASK_UNINTERRUPTIBLE); } } -void igt_global_reset_unlock(struct drm_i915_private *i915) +void igt_global_reset_unlock(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + for_each_engine(engine, gt->i915, id) + clear_bit(I915_RESET_ENGINE + id, >->reset.flags); - clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); - wake_up_all(&i915->gpu_error.reset_queue); + clear_bit(I915_RESET_BACKOFF, >->reset.flags); + wake_up_all(>->reset.queue); } -bool igt_force_reset(struct drm_i915_private *i915) +bool igt_force_reset(struct intel_gt *gt) { - i915_gem_set_wedged(i915); - i915_reset(i915, 0, NULL); + intel_gt_set_wedged(gt); + intel_gt_reset(gt, 0, NULL); - return !i915_reset_failed(i915); + return !intel_gt_is_wedged(gt); } diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h index 363bd853e50f..851873b67ab3 100644 --- a/drivers/gpu/drm/i915/selftests/igt_reset.h +++ b/drivers/gpu/drm/i915/selftests/igt_reset.h @@ -7,10 +7,12 @@ #ifndef __I915_SELFTESTS_IGT_RESET_H__ #define __I915_SELFTESTS_IGT_RESET_H__ -#include "../i915_drv.h" +#include <linux/types.h> -void igt_global_reset_lock(struct drm_i915_private *i915); -void igt_global_reset_unlock(struct drm_i915_private *i915); -bool igt_force_reset(struct drm_i915_private *i915); +struct intel_gt; + +void igt_global_reset_lock(struct intel_gt *gt); +void igt_global_reset_unlock(struct intel_gt *gt); +bool igt_force_reset(struct intel_gt *gt); #endif diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 1e59b543cf27..89b6552a6497 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -3,6 +3,7 @@ * * Copyright © 2018 Intel Corporation */ +#include "gt/intel_gt.h" #include "gem/selftests/igt_gem_utils.h" @@ -18,6 +19,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) memset(spin, 0, sizeof(*spin)); spin->i915 = i915; + spin->gt = &i915->gt; spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(spin->hws)) { @@ -94,6 +96,8 @@ igt_spinner_create_request(struct igt_spinner *spin, u32 *batch; int err; + spin->gt = engine->gt; + vma = i915_vma_instance(spin->obj, ctx->vm, NULL); if (IS_ERR(vma)) return ERR_CAST(vma); @@ -138,7 +142,7 @@ igt_spinner_create_request(struct igt_spinner *spin, *batch++ = upper_32_bits(vma->node.start); *batch++ = MI_BATCH_BUFFER_END; /* not reached */ - i915_gem_chipset_flush(spin->i915); + intel_gt_chipset_flush(engine->gt); if (engine->emit_init_breadcrumb && rq->timeline->has_initial_breadcrumb) { @@ -172,7 +176,7 @@ hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) void igt_spinner_end(struct igt_spinner *spin) { *spin->batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(spin->i915); + intel_gt_chipset_flush(spin->gt); } void igt_spinner_fini(struct igt_spinner *spin) diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h index 34a88ac9b47a..1bfc39efa773 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.h +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h @@ -14,8 +14,11 @@ #include "i915_request.h" #include "i915_selftest.h" +struct intel_gt; + struct igt_spinner { struct drm_i915_private *i915; + struct intel_gt *gt; struct drm_i915_gem_object *hws; struct drm_i915_gem_object *obj; u32 *batch; diff --git a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h deleted file mode 100644 index 08e5ff11bbd9..000000000000 --- a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - */ - -#ifndef IGT_WEDGE_ME_H -#define IGT_WEDGE_ME_H - -#include <linux/workqueue.h> - -#include "../i915_gem.h" - -struct drm_i915_private; - -struct igt_wedge_me { - struct delayed_work work; - struct drm_i915_private *i915; - const char *name; -}; - -static void __igt_wedge_me(struct work_struct *work) -{ - struct igt_wedge_me *w = container_of(work, typeof(*w), work.work); - - pr_err("%s timed out, cancelling test.\n", w->name); - - GEM_TRACE("%s timed out.\n", w->name); - GEM_TRACE_DUMP(); - - i915_gem_set_wedged(w->i915); -} - -static void __igt_init_wedge(struct igt_wedge_me *w, - struct drm_i915_private *i915, - long timeout, - const char *name) -{ - w->i915 = i915; - w->name = name; - - INIT_DELAYED_WORK_ONSTACK(&w->work, __igt_wedge_me); - schedule_delayed_work(&w->work, timeout); -} - -static void __igt_fini_wedge(struct igt_wedge_me *w) -{ - cancel_delayed_work_sync(&w->work); - destroy_delayed_work_on_stack(&w->work); - w->i915 = NULL; -} - -#define igt_wedge_on_timeout(W, DEV, TIMEOUT) \ - for (__igt_init_wedge((W), (DEV), (TIMEOUT), __func__); \ - (W)->i915; \ - __igt_fini_wedge((W))) - -#endif /* IGT_WEDGE_ME_H */ diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 64bc51400ae7..fd4cc4809eb8 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -25,6 +25,7 @@ #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include "gt/intel_gt.h" #include "gt/mock_engine.h" #include "mock_request.h" @@ -67,7 +68,7 @@ static void mock_device_release(struct drm_device *dev) i915_gem_contexts_fini(i915); mutex_unlock(&i915->drm.struct_mutex); - i915_timelines_fini(i915); + intel_timelines_fini(i915); drain_workqueue(i915->wq); i915_gem_drain_freed_objects(i915); @@ -179,14 +180,9 @@ struct drm_i915_private *mock_gem_device(void) mock_uncore_init(&i915->uncore); i915_gem_init__mm(i915); - intel_gt_pm_init(i915); + intel_gt_init_early(&i915->gt, i915); atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */ - init_waitqueue_head(&i915->gpu_error.wait_queue); - init_waitqueue_head(&i915->gpu_error.reset_queue); - init_srcu_struct(&i915->gpu_error.reset_backoff_srcu); - mutex_init(&i915->gpu_error.wedge_mutex); - i915->wq = alloc_ordered_workqueue("mock", 0); if (!i915->wq) goto err_drv; @@ -198,11 +194,7 @@ struct drm_i915_private *mock_gem_device(void) i915->gt.awake = true; - i915_timelines_init(i915); - - INIT_LIST_HEAD(&i915->gt.active_rings); - INIT_LIST_HEAD(&i915->gt.closed_vma); - spin_lock_init(&i915->gt.closed_lock); + intel_timelines_init(i915); mutex_lock(&i915->drm.struct_mutex); @@ -233,7 +225,7 @@ err_engine: mock_engine_free(i915->engine[RCS0]); err_unlock: mutex_unlock(&i915->drm.struct_mutex); - i915_timelines_fini(i915); + intel_timelines_fini(i915); destroy_workqueue(i915->wq); err_drv: drm_mode_config_cleanup(&i915->drm); diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index f625c307a406..e62a67e0f79c 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -98,6 +98,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) { memset(ggtt, 0, sizeof(*ggtt)); + ggtt->vm.gt = &i915->gt; ggtt->vm.i915 = i915; ggtt->vm.is_ggtt = true; @@ -116,6 +117,8 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) ggtt->vm.vma_ops.clear_pages = clear_pages; i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); + + intel_gt_init_hw(i915); } void mock_fini_ggtt(struct i915_ggtt *ggtt) diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c index ff8999c63a12..49585f16d4a2 100644 --- a/drivers/gpu/drm/i915/selftests/mock_uncore.c +++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c @@ -41,6 +41,6 @@ __nop_read(64) void mock_uncore_init(struct intel_uncore *uncore) { - ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop); - ASSIGN_READ_MMIO_VFUNCS(uncore, nop); + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop); + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop); } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 1671db47aa57..e9c55d1d6c04 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: /* copy commands into RB: */ obj = submit->bos[submit->cmd[i].idx].obj; @@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index be39cf01e51e..dc8ec2c94301 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 9acbbc0f3232..048c8be426f3 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, /* ignore if there has not been a ctx switch: */ if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 0e2f74163a16..0aa8a12c9952 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -2221,8 +2221,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, if (ret) goto fail; - spin_lock_init(&dpu_enc->enc_spinlock); - atomic_set(&dpu_enc->frame_done_timeout_ms, 0); timer_setup(&dpu_enc->frame_done_timer, dpu_encoder_frame_done_timeout, 0); @@ -2276,6 +2274,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); + spin_lock_init(&dpu_enc->enc_spinlock); dpu_enc->enabled = false; return &dpu_enc->base; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index ff14555372d0..78d5fa230c16 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, mdp5_crtc->enabled = false; } +static void mdp5_crtc_vblank_on(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_interface *intf = mdp5_cstate->pipeline.intf; + u32 count; + + count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff; + drm_crtc_set_max_vblank_count(crtc, count); + + drm_crtc_vblank_on(crtc); +} + static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, } /* Restore vblank irq handling after power is enabled */ - drm_crtc_vblank_on(crtc); + mdp5_crtc_vblank_on(crtc); mdp5_crtc_mode_set_nofb(crtc); @@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc) mdp5_crtc_destroy_state(crtc, crtc->state); __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); + + drm_crtc_vblank_reset(crtc); } static const struct drm_crtc_funcs mdp5_crtc_funcs = { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 4a60f5fca6b0..fec6ef1ae3b9 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; dev->driver->get_scanout_position = mdp5_get_scanoutpos; dev->driver->get_vblank_counter = mdp5_get_vblank_counter; - dev->max_vblank_count = 0xffffffff; + dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */ dev->vblank_disable_immediate = true; return kms; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index ea335ca25eca..ee031c086805 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -619,7 +619,7 @@ static int context_init(struct drm_device *dev, struct drm_file *file) msm_submitqueue_init(dev, ctx); - ctx->aspace = priv->gpu->aspace; + ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL; file->driver_priv = ctx; return 0; @@ -1276,7 +1276,8 @@ static int add_gpu_components(struct device *dev, if (!np) return 0; - drm_of_component_match_add(dev, matchptr, compare_of, np); + if (of_device_is_available(np)) + drm_of_component_match_add(dev, matchptr, compare_of, np); of_node_put(np); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 18da8d6ffc51..8cc70026c358 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj) return !msm_obj->vram_node; } +/* + * Cache sync.. this is a bit over-complicated, to fit dma-mapping + * API. Really GPU cache is out of scope here (handled on cmdstream) + * and all we need to do is invalidate newly allocated pages before + * mapping to CPU as uncached/writecombine. + * + * On top of this, we have the added headache, that depending on + * display generation, the display's iommu may be wired up to either + * the toplevel drm device (mdss), or to the mdp sub-node, meaning + * that here we either have dma-direct or iommu ops. + * + * Let this be a cautionary tail of abstraction gone wrong. + */ + +static void sync_for_device(struct msm_gem_object *msm_obj) +{ + struct device *dev = msm_obj->base.dev->dev; + + if (get_dma_ops(dev)) { + dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } else { + dma_map_sg(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } +} + +static void sync_for_cpu(struct msm_gem_object *msm_obj) +{ + struct device *dev = msm_obj->base.dev->dev; + + if (get_dma_ops(dev)) { + dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } +} + /* allocate pages from VRAM carveout, used when no IOMMU: */ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) { @@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj) * because display controller, GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_map_sg(dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + sync_for_device(msm_obj); } return msm_obj->pages; @@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj) * GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, - DMA_BIDIRECTIONAL); + sync_for_cpu(msm_obj); sg_free_table(msm_obj->sgt); kfree(msm_obj->sgt); diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 8497768f1b41..126703816794 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -780,7 +780,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, connector->display_info.bpc * 3); - if (drm_atomic_crtc_needs_modeset(crtc_state)) { + if (crtc_state->mode_changed) { slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn); diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 8c92374afcf2..a835cebb6d90 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm, fault->inst, fault->addr, fault->access); } +static inline bool +nouveau_range_done(struct hmm_range *range) +{ + bool ret = hmm_range_valid(range); + + hmm_range_unregister(range); + return ret; +} + +static int +nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range) +{ + long ret; + + range->default_flags = 0; + range->pfn_flags_mask = -1UL; + + ret = hmm_range_register(range, mirror, + range->start, range->end, + PAGE_SHIFT); + if (ret) { + up_read(&range->vma->vm_mm->mmap_sem); + return (int)ret; + } + + if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { + up_read(&range->vma->vm_mm->mmap_sem); + return -EAGAIN; + } + + ret = hmm_range_fault(range, true); + if (ret <= 0) { + if (ret == 0) + ret = -EBUSY; + up_read(&range->vma->vm_mm->mmap_sem); + hmm_range_unregister(range); + return ret; + } + return 0; +} + static int nouveau_svm_fault(struct nvif_notify *notify) { @@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify) range.values = nouveau_svm_pfn_values; range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; again: - ret = hmm_vma_fault(&svmm->mirror, &range, true); + ret = nouveau_range_fault(&svmm->mirror, &range); if (ret == 0) { mutex_lock(&svmm->mutex); - if (!hmm_vma_range_done(&range)) { + if (!nouveau_range_done(&range)) { mutex_unlock(&svmm->mutex); goto again; } @@ -666,8 +707,8 @@ again: NULL); svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); + up_read(&svmm->mm->mmap_sem); } - up_read(&svmm->mm->mmap_sem); /* Cancel any faults in the window whose pages didn't manage * to keep their valid bit, or stay writeable when required. diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index d594f7520b7b..7d78e6deac89 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -285,9 +285,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool, static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) { + unsigned long attrs = 0; dma_addr_t dma = d_page->dma; d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL; - dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma); + if (pool->type & IS_HUGE) + attrs = DMA_ATTR_NO_WARN; + + dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs); kfree(d_page); d_page = NULL; diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index e7dff5febe16..d42bc0883a32 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -852,7 +852,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 }; static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 }; static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 }; static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a }; -static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c }; +static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b }; static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c }; static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d }; @@ -3764,6 +3764,7 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME; data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME; data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME; + data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H; data->REG_PWM[0] = NCT6106_REG_PWM; data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT; data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT; diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index a7d2b16dd702..30e18eb60da7 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -408,8 +408,10 @@ static ssize_t occ_show_power_1(struct device *dev, static u64 occ_get_powr_avg(u64 *accum, u32 *samples) { - return div64_u64(get_unaligned_be64(accum) * 1000000ULL, - get_unaligned_be32(samples)); + u64 divisor = get_unaligned_be32(samples); + + return (divisor == 0) ? 0 : + div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor); } static ssize_t occ_show_power_2(struct device *dev, diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c index 8d55cdd69ff4..435c7d7377a3 100644 --- a/drivers/i2c/busses/i2c-at91-core.c +++ b/drivers/i2c/busses/i2c-at91-core.c @@ -142,7 +142,7 @@ static struct at91_twi_pdata sama5d4_config = { static struct at91_twi_pdata sama5d2_config = { .clk_max_div = 7, - .clk_offset = 4, + .clk_offset = 3, .has_unre_flag = true, .has_alt_cmd = true, .has_hold_field = true, diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c index e87232f2e708..a3fcc35ffd3b 100644 --- a/drivers/i2c/busses/i2c-at91-master.c +++ b/drivers/i2c/busses/i2c-at91-master.c @@ -122,9 +122,11 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev) writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR); /* send stop when last byte has been written */ - if (--dev->buf_len == 0) + if (--dev->buf_len == 0) { if (!dev->use_alt_cmd) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); + at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY); + } dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len); @@ -542,9 +544,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) } else { at91_twi_write_next_byte(dev); at91_twi_write(dev, AT91_TWI_IER, - AT91_TWI_TXCOMP | - AT91_TWI_NACK | - AT91_TWI_TXRDY); + AT91_TWI_TXCOMP | AT91_TWI_NACK | + (dev->buf_len ? AT91_TWI_TXRDY : 0)); } } diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 2c7f145a036e..d7fd76baec92 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -392,16 +392,18 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c, static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c) { struct i2c_msg *msg = iproc_i2c->msg; + uint32_t val; /* Read valid data from RX FIFO */ while (iproc_i2c->rx_bytes < msg->len) { - if (!((iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET) >> M_FIFO_RX_CNT_SHIFT) - & M_FIFO_RX_CNT_MASK)) + val = iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET); + + /* rx fifo empty */ + if (!((val >> M_RX_STATUS_SHIFT) & M_RX_STATUS_MASK)) break; msg->buf[iproc_i2c->rx_bytes] = - (iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET) >> - M_RX_DATA_SHIFT) & M_RX_DATA_MASK; + (val >> M_RX_DATA_SHIFT) & M_RX_DATA_MASK; iproc_i2c->rx_bytes++; } } diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c index cfc76b5de726..5a1235fd86bb 100644 --- a/drivers/i2c/busses/i2c-nvidia-gpu.c +++ b/drivers/i2c/busses/i2c-nvidia-gpu.c @@ -364,7 +364,7 @@ static void gpu_i2c_remove(struct pci_dev *pdev) /* * We need gpu_i2c_suspend() even if it is stub, for runtime pm to work * correctly. Without it, lspci shows runtime pm status as "D0" for the card. - * Documentation/power/pci.txt also insists for driver to provide this. + * Documentation/power/pci.rst also insists for driver to provide this. */ static __maybe_unused int gpu_i2c_suspend(struct device *dev) { diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index d97fb857b0ea..c98ef4c4a0c9 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -435,6 +435,7 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat) * fall through to the write state, as we will need to * send a byte as well */ + /* Fall through */ case STATE_WRITE: /* diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 888d89ce81df..beee7b7e0d9a 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -302,7 +302,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, struct ib_udata *udata, struct ib_uobject *uobj) { + enum ib_qp_type qp_type = attr->qp_type; struct ib_qp *qp; + bool is_xrc; if (!dev->ops.create_qp) return ERR_PTR(-EOPNOTSUPP); @@ -320,7 +322,8 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, * and more importantly they are created internaly by driver, * see mlx5 create_dev_resources() as an example. */ - if (attr->qp_type < IB_QPT_XRC_INI) { + is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT; + if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) { qp->res.type = RDMA_RESTRACK_QP; if (uobj) rdma_restrack_uadd(&qp->res); diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index 01faef7bc061..45d5164e9574 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -393,6 +393,9 @@ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index) u64 sum; port_counter = &dev->port_data[port].port_counter; + if (!port_counter->hstats) + return 0; + sum = get_running_counters_hwstat_sum(dev, port, index); sum += port_counter->hstats->value[index]; @@ -594,7 +597,7 @@ void rdma_counter_init(struct ib_device *dev) struct rdma_port_counter *port_counter; u32 port; - if (!dev->ops.alloc_hw_stats || !dev->port_data) + if (!dev->port_data) return; rdma_for_each_port(dev, port) { @@ -602,6 +605,9 @@ void rdma_counter_init(struct ib_device *dev) port_counter->mode.mode = RDMA_COUNTER_MODE_NONE; mutex_init(&port_counter->lock); + if (!dev->ops.alloc_hw_stats) + continue; + port_counter->hstats = dev->ops.alloc_hw_stats(dev, port); if (!port_counter->hstats) goto fail; @@ -624,9 +630,6 @@ void rdma_counter_release(struct ib_device *dev) struct rdma_port_counter *port_counter; u32 port; - if (!dev->ops.alloc_hw_stats) - return; - rdma_for_each_port(dev, port) { port_counter = &dev->port_data[port].port_counter; kfree(port_counter->hstats); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 9773145dee09..ea8661a00651 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -94,11 +94,17 @@ static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC); static DECLARE_RWSEM(devices_rwsem); #define DEVICE_REGISTERED XA_MARK_1 -static LIST_HEAD(client_list); +static u32 highest_client_id; #define CLIENT_REGISTERED XA_MARK_1 static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC); static DECLARE_RWSEM(clients_rwsem); +static void ib_client_put(struct ib_client *client) +{ + if (refcount_dec_and_test(&client->uses)) + complete(&client->uses_zero); +} + /* * If client_data is registered then the corresponding client must also still * be registered. @@ -661,6 +667,14 @@ static int add_client_context(struct ib_device *device, down_write(&device->client_data_rwsem); /* + * So long as the client is registered hold both the client and device + * unregistration locks. + */ + if (!refcount_inc_not_zero(&client->uses)) + goto out_unlock; + refcount_inc(&device->refcount); + + /* * Another caller to add_client_context got here first and has already * completely initialized context. */ @@ -683,6 +697,9 @@ static int add_client_context(struct ib_device *device, return 0; out: + ib_device_put(device); + ib_client_put(client); +out_unlock: up_write(&device->client_data_rwsem); return ret; } @@ -702,7 +719,7 @@ static void remove_client_context(struct ib_device *device, client_data = xa_load(&device->client_data, client_id); xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED); client = xa_load(&clients, client_id); - downgrade_write(&device->client_data_rwsem); + up_write(&device->client_data_rwsem); /* * Notice we cannot be holding any exclusive locks when calling the @@ -712,17 +729,13 @@ static void remove_client_context(struct ib_device *device, * * For this reason clients and drivers should not call the * unregistration functions will holdling any locks. - * - * It tempting to drop the client_data_rwsem too, but this is required - * to ensure that unregister_client does not return until all clients - * are completely unregistered, which is required to avoid module - * unloading races. */ if (client->remove) client->remove(device, client_data); xa_erase(&device->client_data, client_id); - up_read(&device->client_data_rwsem); + ib_device_put(device); + ib_client_put(client); } static int alloc_port_data(struct ib_device *device) @@ -1224,7 +1237,7 @@ static int setup_device(struct ib_device *device) static void disable_device(struct ib_device *device) { - struct ib_client *client; + u32 cid; WARN_ON(!refcount_read(&device->refcount)); @@ -1232,10 +1245,19 @@ static void disable_device(struct ib_device *device) xa_clear_mark(&devices, device->index, DEVICE_REGISTERED); up_write(&devices_rwsem); + /* + * Remove clients in LIFO order, see assign_client_id. This could be + * more efficient if xarray learns to reverse iterate. Since no new + * clients can be added to this ib_device past this point we only need + * the maximum possible client_id value here. + */ down_read(&clients_rwsem); - list_for_each_entry_reverse(client, &client_list, list) - remove_client_context(device, client->client_id); + cid = highest_client_id; up_read(&clients_rwsem); + while (cid) { + cid--; + remove_client_context(device, cid); + } /* Pairs with refcount_set in enable_device */ ib_device_put(device); @@ -1662,30 +1684,31 @@ static int assign_client_id(struct ib_client *client) /* * The add/remove callbacks must be called in FIFO/LIFO order. To * achieve this we assign client_ids so they are sorted in - * registration order, and retain a linked list we can reverse iterate - * to get the LIFO order. The extra linked list can go away if xarray - * learns to reverse iterate. + * registration order. */ - if (list_empty(&client_list)) { - client->client_id = 0; - } else { - struct ib_client *last; - - last = list_last_entry(&client_list, struct ib_client, list); - client->client_id = last->client_id + 1; - } + client->client_id = highest_client_id; ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL); if (ret) goto out; + highest_client_id++; xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED); - list_add_tail(&client->list, &client_list); out: up_write(&clients_rwsem); return ret; } +static void remove_client_id(struct ib_client *client) +{ + down_write(&clients_rwsem); + xa_erase(&clients, client->client_id); + for (; highest_client_id; highest_client_id--) + if (xa_load(&clients, highest_client_id - 1)) + break; + up_write(&clients_rwsem); +} + /** * ib_register_client - Register an IB client * @client:Client to register @@ -1705,6 +1728,8 @@ int ib_register_client(struct ib_client *client) unsigned long index; int ret; + refcount_set(&client->uses, 1); + init_completion(&client->uses_zero); ret = assign_client_id(client); if (ret) return ret; @@ -1740,21 +1765,30 @@ void ib_unregister_client(struct ib_client *client) unsigned long index; down_write(&clients_rwsem); + ib_client_put(client); xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED); up_write(&clients_rwsem); - /* - * Every device still known must be serialized to make sure we are - * done with the client callbacks before we return. - */ - down_read(&devices_rwsem); - xa_for_each (&devices, index, device) + + /* We do not want to have locks while calling client->remove() */ + rcu_read_lock(); + xa_for_each (&devices, index, device) { + if (!ib_device_try_get(device)) + continue; + rcu_read_unlock(); + remove_client_context(device, client->client_id); - up_read(&devices_rwsem); - down_write(&clients_rwsem); - list_del(&client->list); - xa_erase(&clients, client->client_id); - up_write(&clients_rwsem); + ib_device_put(device); + rcu_read_lock(); + } + rcu_read_unlock(); + + /* + * remove_client_context() is not a fence, it can return even though a + * removal is ongoing. Wait until all removals are completed. + */ + wait_for_completion(&client->uses_zero); + remove_client_id(client); } EXPORT_SYMBOL(ib_unregister_client); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index cc99479b2c09..9947d16edef2 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -3224,18 +3224,18 @@ static int ib_mad_port_open(struct ib_device *device, if (has_smi) cq_size *= 2; + port_priv->pd = ib_alloc_pd(device, 0); + if (IS_ERR(port_priv->pd)) { + dev_err(&device->dev, "Couldn't create ib_mad PD\n"); + ret = PTR_ERR(port_priv->pd); + goto error3; + } + port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, IB_POLL_UNBOUND_WORKQUEUE); if (IS_ERR(port_priv->cq)) { dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); ret = PTR_ERR(port_priv->cq); - goto error3; - } - - port_priv->pd = ib_alloc_pd(device, 0); - if (IS_ERR(port_priv->pd)) { - dev_err(&device->dev, "Couldn't create ib_mad PD\n"); - ret = PTR_ERR(port_priv->pd); goto error4; } @@ -3278,11 +3278,11 @@ error8: error7: destroy_mad_qp(&port_priv->qp_info[0]); error6: - ib_dealloc_pd(port_priv->pd); -error4: ib_free_cq(port_priv->cq); cleanup_recv_queue(&port_priv->qp_info[1]); cleanup_recv_queue(&port_priv->qp_info[0]); +error4: + ib_dealloc_pd(port_priv->pd); error3: kfree(port_priv); @@ -3312,8 +3312,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) destroy_workqueue(port_priv->wq); destroy_mad_qp(&port_priv->qp_info[1]); destroy_mad_qp(&port_priv->qp_info[0]); - ib_dealloc_pd(port_priv->pd); ib_free_cq(port_priv->cq); + ib_dealloc_pd(port_priv->pd); cleanup_recv_queue(&port_priv->qp_info[1]); cleanup_recv_queue(&port_priv->qp_info[0]); /* XXX: Handle deallocation of MAD registration tables */ diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 9f8a48016b41..ffdeaf6e0b68 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -49,6 +49,7 @@ #include <linux/sched.h> #include <linux/semaphore.h> #include <linux/slab.h> +#include <linux/nospec.h> #include <linux/uaccess.h> @@ -884,11 +885,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) if (get_user(id, arg)) return -EFAULT; + if (id >= IB_UMAD_MAX_AGENTS) + return -EINVAL; mutex_lock(&file->port->file_mutex); mutex_lock(&file->mutex); - if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { + id = array_index_nospec(id, IB_UMAD_MAX_AGENTS); + if (!__get_agent(file, id)) { ret = -EINVAL; goto out; } diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index a91653aabf38..098ab883733e 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -308,6 +308,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; struct bnxt_qplib_gid *gid_to_del; + u16 vlan_id = 0xFFFF; /* Delete the entry from the hardware */ ctx = *context; @@ -317,7 +318,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) if (sgid_tbl && sgid_tbl->active) { if (ctx->idx >= sgid_tbl->max) return -EINVAL; - gid_to_del = &sgid_tbl->tbl[ctx->idx]; + gid_to_del = &sgid_tbl->tbl[ctx->idx].gid; + vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id; /* DEL_GID is called in WQ context(netdevice_event_work_handler) * or via the ib_unregister_device path. In the former case QP1 * may not be destroyed yet, in which case just return as FW @@ -335,7 +337,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) } ctx->refcnt--; if (!ctx->refcnt) { - rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true); + rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, + vlan_id, true); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to remove GID: %#x", rc); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 37928b1111df..bdbde8e22420 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -488,7 +488,7 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res, struct bnxt_qplib_sgid_tbl *sgid_tbl, u16 max) { - sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL); + sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL); if (!sgid_tbl->tbl) return -ENOMEM; @@ -526,9 +526,10 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res, for (i = 0; i < sgid_tbl->max; i++) { if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero, sizeof(bnxt_qplib_gid_zero))) - bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true); + bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid, + sgid_tbl->tbl[i].vlan_id, true); } - memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max); + memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max); memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max); sgid_tbl->active = 0; @@ -537,7 +538,11 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res, static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct net_device *netdev) { - memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max); + u32 i; + + for (i = 0; i < sgid_tbl->max; i++) + sgid_tbl->tbl[i].vlan_id = 0xffff; + memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 30c42c92fac7..fbda11a7ab1a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -111,7 +111,7 @@ struct bnxt_qplib_pd_tbl { }; struct bnxt_qplib_sgid_tbl { - struct bnxt_qplib_gid *tbl; + struct bnxt_qplib_gid_info *tbl; u16 *hw_id; u16 max; u16 active; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 48793d3512ac..40296b97d21e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -213,12 +213,12 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, index, sgid_tbl->max); return -EINVAL; } - memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid)); + memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid)); return 0; } int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, - struct bnxt_qplib_gid *gid, bool update) + struct bnxt_qplib_gid *gid, u16 vlan_id, bool update) { struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, struct bnxt_qplib_res, @@ -236,7 +236,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, return -ENOMEM; } for (index = 0; index < sgid_tbl->max; index++) { - if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid))) + if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) && + vlan_id == sgid_tbl->tbl[index].vlan_id) break; } if (index == sgid_tbl->max) { @@ -262,8 +263,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, if (rc) return rc; } - memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, + memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero, sizeof(bnxt_qplib_gid_zero)); + sgid_tbl->tbl[index].vlan_id = 0xFFFF; sgid_tbl->vlan[index] = 0; sgid_tbl->active--; dev_dbg(&res->pdev->dev, @@ -296,7 +298,8 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, } free_idx = sgid_tbl->max; for (i = 0; i < sgid_tbl->max; i++) { - if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) { + if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) && + sgid_tbl->tbl[i].vlan_id == vlan_id) { dev_dbg(&res->pdev->dev, "SGID entry already exist in entry %d!\n", i); *index = i; @@ -351,6 +354,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, } /* Add GID to the sgid_tbl */ memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); + sgid_tbl->tbl[free_idx].vlan_id = vlan_id; sgid_tbl->active++; if (vlan_id != 0xFFFF) sgid_tbl->vlan[free_idx] = 1; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 0ec3b12b0bcd..13d9432d5ce2 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -84,6 +84,11 @@ struct bnxt_qplib_gid { u8 data[16]; }; +struct bnxt_qplib_gid_info { + struct bnxt_qplib_gid gid; + u16 vlan_id; +}; + struct bnxt_qplib_ah { struct bnxt_qplib_gid dgid; struct bnxt_qplib_pd *pd; @@ -221,7 +226,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, struct bnxt_qplib_sgid_tbl *sgid_tbl, int index, struct bnxt_qplib_gid *gid); int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, - struct bnxt_qplib_gid *gid, bool update); + struct bnxt_qplib_gid *gid, u16 vlan_id, bool update); int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id, bool update, u32 *index); diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index d5b643a1d9fd..67052dc3100c 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -14452,7 +14452,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd) clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); } -static void init_rxe(struct hfi1_devdata *dd) +static int init_rxe(struct hfi1_devdata *dd) { struct rsm_map_table *rmt; u64 val; @@ -14461,6 +14461,9 @@ static void init_rxe(struct hfi1_devdata *dd) write_csr(dd, RCV_ERR_MASK, ~0ull); rmt = alloc_rsm_map_table(dd); + if (!rmt) + return -ENOMEM; + /* set up QOS, including the QPN map table */ init_qos(dd, rmt); init_fecn_handling(dd, rmt); @@ -14487,6 +14490,7 @@ static void init_rxe(struct hfi1_devdata *dd) val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) << RCV_BYPASS_HDR_SIZE_SHIFT); write_csr(dd, RCV_BYPASS, val); + return 0; } static void init_other(struct hfi1_devdata *dd) @@ -15024,7 +15028,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd) goto bail_cleanup; /* set initial RXE CSRs */ - init_rxe(dd); + ret = init_rxe(dd); + if (ret) + goto bail_cleanup; + /* set initial TXE CSRs */ init_txe(dd); /* set initial non-RXE, non-TXE CSRs */ diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 0477c14633ab..024a7c2b6124 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) break; trdma_clean_swqe(qp, wqe); - rvt_qp_wqe_unreserve(qp, wqe); trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); rvt_qp_complete_swqe(qp, wqe, @@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { trdma_clean_swqe(qp, wqe); - rvt_qp_wqe_unreserve(qp, wqe); trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); rvt_qp_complete_swqe(qp, wqe, diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 92acccaaaa86..996fc298207e 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -1620,6 +1620,7 @@ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req, flows[i].req = req; flows[i].npagesets = 0; flows[i].pagesets[0].mapped = 0; + flows[i].resync_npkts = 0; } req->flows = flows; return 0; @@ -1673,34 +1674,6 @@ static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req, return NULL; } -static struct tid_rdma_flow * -__find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail, - u32 psn, u16 *fidx) -{ - for ( ; CIRC_CNT(head, tail, MAX_FLOWS); - tail = CIRC_NEXT(tail, MAX_FLOWS)) { - struct tid_rdma_flow *flow = &req->flows[tail]; - u32 spsn, lpsn; - - spsn = full_flow_psn(flow, flow->flow_state.spsn); - lpsn = full_flow_psn(flow, flow->flow_state.lpsn); - - if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) { - if (fidx) - *fidx = tail; - return flow; - } - } - return NULL; -} - -static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req, - u32 psn, u16 *fidx) -{ - return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn, - fidx); -} - /* TID RDMA READ functions */ u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, struct ib_other_headers *ohdr, u32 *bth1, @@ -2788,19 +2761,7 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, * to prevent continuous Flow Sequence errors for any * packets that could be still in the fabric. */ - flow = find_flow(req, psn, NULL); - if (!flow) { - /* - * We can't find the IB PSN matching the - * received KDETH PSN. The only thing we can - * do at this point is report the error to - * the QP. - */ - hfi1_kern_read_tid_flow_free(qp); - spin_unlock(&qp->s_lock); - rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); - return ret; - } + flow = &req->flows[req->clear_tail]; if (priv->s_flags & HFI1_R_TID_SW_PSN) { diff = cmp_psn(psn, flow->flow_state.r_next_psn); diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index c4b243f50c76..646f61545ed6 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -54,6 +54,7 @@ #include <linux/mm.h> #include <linux/vmalloc.h> #include <rdma/opa_addr.h> +#include <linux/nospec.h> #include "hfi.h" #include "common.h" @@ -1536,6 +1537,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) sl = rdma_ah_get_sl(ah_attr); if (sl >= ARRAY_SIZE(ibp->sl_to_sc)) return -EINVAL; + sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc)); sc5 = ibp->sl_to_sc[sl]; if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig index 8bf847bcd8d3..54782197c717 100644 --- a/drivers/infiniband/hw/hns/Kconfig +++ b/drivers/infiniband/hw/hns/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only config INFINIBAND_HNS - tristate "HNS RoCE Driver" + bool "HNS RoCE Driver" depends on NET_VENDOR_HISILICON depends on ARM64 || (COMPILE_TEST && 64BIT) ---help--- @@ -11,7 +11,7 @@ config INFINIBAND_HNS To compile HIP06 or HIP08 driver as module, choose M here. config INFINIBAND_HNS_HIP06 - bool "Hisilicon Hip06 Family RoCE support" + tristate "Hisilicon Hip06 Family RoCE support" depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET ---help--- RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and @@ -21,7 +21,7 @@ config INFINIBAND_HNS_HIP06 module will be called hns-roce-hw-v1 config INFINIBAND_HNS_HIP08 - bool "Hisilicon Hip08 Family RoCE support" + tristate "Hisilicon Hip08 Family RoCE support" depends on INFINIBAND_HNS && PCI && HNS3 ---help--- RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC. diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile index e105945b94a1..449a2d81319d 100644 --- a/drivers/infiniband/hw/hns/Makefile +++ b/drivers/infiniband/hw/hns/Makefile @@ -9,12 +9,8 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o -ifdef CONFIG_INFINIBAND_HNS_HIP06 hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs) -obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o -endif +obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o -ifdef CONFIG_INFINIBAND_HNS_HIP08 hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs) -obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o -endif +obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c index 627aa46ef683..c00714c2f16a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_db.c +++ b/drivers/infiniband/hw/hns/hns_roce_db.c @@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, struct ib_udata *udata, unsigned long virt, struct hns_roce_db *db) { + unsigned long page_addr = virt & PAGE_MASK; struct hns_roce_user_db_page *page; + unsigned int offset; int ret = 0; mutex_lock(&context->page_mutex); list_for_each_entry(page, &context->page_list, list) - if (page->user_virt == (virt & PAGE_MASK)) + if (page->user_virt == page_addr) goto found; page = kmalloc(sizeof(*page), GFP_KERNEL); @@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, } refcount_set(&page->refcount, 1); - page->user_virt = (virt & PAGE_MASK); - page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0); + page->user_virt = page_addr; + page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0); if (IS_ERR(page->umem)) { ret = PTR_ERR(page->umem); kfree(page); @@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, list_add(&page->list, &context->page_list); found: - db->dma = sg_dma_address(page->umem->sg_head.sgl) + - (virt & ~PAGE_MASK); - page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK; - db->virt_addr = sg_virt(page->umem->sg_head.sgl); + offset = virt - page_addr; + db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset; + db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset; db->u.user_page = page; refcount_inc(&page->refcount); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 81e6dedb1e02..c07e387a07a3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -750,8 +750,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0); pd = rdma_zalloc_drv_obj(ibdev, ib_pd); - if (!pd) + if (!pd) { + ret = -ENOMEM; goto alloc_mem_failed; + } pd->device = ibdev; ret = hns_roce_alloc_pd(pd, NULL); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index c2a5780cb394..e12a4404096b 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -5802,13 +5802,12 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, return; } - if (mpi->mdev_events.notifier_call) - mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); - mpi->mdev_events.notifier_call = NULL; - mpi->ibdev = NULL; spin_unlock(&port->mp.mpi_lock); + if (mpi->mdev_events.notifier_call) + mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); + mpi->mdev_events.notifier_call = NULL; mlx5_remove_netdev_notifier(ibdev, port_num); spin_lock(&port->mp.mpi_lock); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index c482f19958b3..f6a53455bf8b 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -481,6 +481,7 @@ struct mlx5_umr_wr { u64 length; int access_flags; u32 mkey; + u8 ignore_free_state:1; }; static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 20ece6e0b2fc..b74fad08412f 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static int mr_cache_max_order(struct mlx5_ib_dev *dev); static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); -static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev) -{ - return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled); -} static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) { return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); } -static bool use_umr(struct mlx5_ib_dev *dev, int order) -{ - return order <= mr_cache_max_order(dev) && - umr_can_modify_entity_size(dev); -} - static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); @@ -545,13 +535,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) return; c = order2idx(dev, mr->order); - if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { - mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); - return; - } + WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES); - if (unreg_umr(dev, mr)) + if (unreg_umr(dev, mr)) { + mr->allocated_from_cache = false; + destroy_mkey(dev, mr); + ent = &cache->ent[c]; + if (ent->cur < ent->limit) + queue_work(cache->wq, &ent->work); return; + } ent = &cache->ent[c]; spin_lock_irq(&ent->lock); @@ -1268,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_mr *mr = NULL; - bool populate_mtts = false; + bool use_umr; struct ib_umem *umem; int page_shift; int npages; @@ -1300,29 +1293,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (err < 0) return ERR_PTR(err); - if (use_umr(dev, order)) { + use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) && + (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) || + !MLX5_CAP_GEN(dev->mdev, atomic)); + + if (order <= mr_cache_max_order(dev) && use_umr) { mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, page_shift, order, access_flags); if (PTR_ERR(mr) == -EAGAIN) { mlx5_ib_dbg(dev, "cache empty for order %d\n", order); mr = NULL; } - populate_mtts = false; } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { if (access_flags & IB_ACCESS_ON_DEMAND) { err = -EINVAL; pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n"); goto error; } - populate_mtts = true; + use_umr = false; } if (!mr) { - if (!umr_can_modify_entity_size(dev)) - populate_mtts = true; mutex_lock(&dev->slow_path_mutex); mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, - page_shift, access_flags, populate_mtts); + page_shift, access_flags, !use_umr); mutex_unlock(&dev->slow_path_mutex); } @@ -1338,7 +1332,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, update_odp_mr(mr); - if (!populate_mtts) { + if (use_umr) { int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; if (access_flags & IB_ACCESS_ON_DEMAND) @@ -1373,9 +1367,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) return 0; umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | - MLX5_IB_SEND_UMR_FAIL_IF_FREE; + MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; umrwr.wr.opcode = MLX5_IB_WR_UMR; + umrwr.pd = dev->umrc.pd; umrwr.mkey = mr->mmkey.key; + umrwr.ignore_free_state = 1; return mlx5_ib_post_send_wait(dev, &umrwr); } @@ -1577,10 +1573,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) mr->sig = NULL; } - mlx5_free_priv_descs(mr); - - if (!allocated_from_cache) + if (!allocated_from_cache) { destroy_mkey(dev, mr); + mlx5_free_priv_descs(mr); + } } static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 5b642d81e617..81da82050d05 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -246,7 +246,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, * overwrite the same MTTs. Concurent invalidations might race us, * but they will write 0s as well, so no difference in the end result. */ - + mutex_lock(&umem_odp->umem_mutex); for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; /* @@ -278,6 +278,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, idx - blk_start_idx + 1, 0, MLX5_IB_UPD_XLT_ZAP | MLX5_IB_UPD_XLT_ATOMIC); + mutex_unlock(&umem_odp->umem_mutex); /* * We are now sure that the device will not access the * memory. We can safely unmap it, and mark it as dirty if @@ -1771,7 +1772,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work) num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list, w->num_sge, 0); - kfree(w); + kvfree(w); } int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, @@ -1813,7 +1814,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, if (valid_req) queue_work(system_unbound_wq, &work->work); else - kfree(work); + kvfree(work); srcu_read_unlock(&dev->mr_srcu, srcu_key); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 2a97619ed603..379328b2598f 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1713,7 +1713,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, } MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ); - MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); memcpy(rss_key, ucmd.rx_hash_key, len); break; } @@ -4295,10 +4294,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev, memset(umr, 0, sizeof(*umr)); - if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) - umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */ - else - umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */ + if (!umrwr->ignore_free_state) { + if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) + /* fail if free */ + umr->flags = MLX5_UMR_CHECK_FREE; + else + /* fail if not free */ + umr->flags = MLX5_UMR_CHECK_NOT_FREE; + } umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size)); if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) { diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 533157a2a3be..f97b3d65b30c 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -125,14 +125,20 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, struct qedr_dev *dev = rdma_device_to_drv_device(device, struct qedr_dev, ibdev); - return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor); + return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver); } static DEVICE_ATTR_RO(hw_rev); static ssize_t hca_type_show(struct device *device, struct device_attribute *attr, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET"); + struct qedr_dev *dev = + rdma_device_to_drv_device(device, struct qedr_dev, ibdev); + + return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n", + dev->pdev->device, + rdma_protocol_iwarp(&dev->ibdev, 1) ? + "iWARP" : "RoCE"); } static DEVICE_ATTR_RO(hca_type); diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index a7cde98e73e8..9ce8a1b925d2 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -220,13 +220,12 @@ static void siw_put_work(struct siw_cm_work *work) static void siw_cep_set_inuse(struct siw_cep *cep) { unsigned long flags; - int rv; retry: spin_lock_irqsave(&cep->lock, flags); if (cep->in_use) { spin_unlock_irqrestore(&cep->lock, flags); - rv = wait_event_interruptible(cep->waitq, !cep->in_use); + wait_event_interruptible(cep->waitq, !cep->in_use); if (signal_pending(current)) flush_signals(current); goto retry; diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index f55c4e80aea4..d0f140daf659 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -612,6 +612,7 @@ static __init int siw_init_module(void) if (!siw_create_tx_threads()) { pr_info("siw: Could not start any TX thread\n"); + rv = -ENOMEM; goto out_error; } /* diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c index 11383d9f95ef..e27bd5b35b96 100644 --- a/drivers/infiniband/sw/siw/siw_qp.c +++ b/drivers/infiniband/sw/siw/siw_qp.c @@ -220,12 +220,14 @@ static int siw_qp_enable_crc(struct siw_qp *qp) { struct siw_rx_stream *c_rx = &qp->rx_stream; struct siw_iwarp_tx *c_tx = &qp->tx_ctx; - int size = crypto_shash_descsize(siw_crypto_shash) + - sizeof(struct shash_desc); + int size; if (siw_crypto_shash == NULL) return -ENOENT; + size = crypto_shash_descsize(siw_crypto_shash) + + sizeof(struct shash_desc); + c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) { diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index eb104c719629..4413aa67000e 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -23,6 +23,8 @@ #include <linux/mem_encrypt.h> #include <asm/pci-direct.h> #include <asm/iommu.h> +#include <asm/apic.h> +#include <asm/msidef.h> #include <asm/gart.h> #include <asm/x86_init.h> #include <asm/iommu_table.h> @@ -1920,6 +1922,90 @@ static int iommu_setup_msi(struct amd_iommu *iommu) return 0; } +#define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2) +#define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8) +#define XT_INT_VEC(x) (((x) & 0xFFULL) << 32) +#define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56) + +/** + * Setup the IntCapXT registers with interrupt routing information + * based on the PCI MSI capability block registers, accessed via + * MMIO MSI address low/hi and MSI data registers. + */ +static void iommu_update_intcapxt(struct amd_iommu *iommu) +{ + u64 val; + u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET); + u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET); + u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET); + bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; + u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF); + + if (x2apic_enabled()) + dest |= MSI_ADDR_EXT_DEST_ID(addr_hi); + + val = XT_INT_VEC(data & 0xFF) | + XT_INT_DEST_MODE(dm) | + XT_INT_DEST_LO(dest) | + XT_INT_DEST_HI(dest); + + /** + * Current IOMMU implemtation uses the same IRQ for all + * 3 IOMMU interrupts. + */ + writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); + writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); + writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); +} + +static void _irq_notifier_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct amd_iommu *iommu; + + for_each_iommu(iommu) { + if (iommu->dev->irq == notify->irq) { + iommu_update_intcapxt(iommu); + break; + } + } +} + +static void _irq_notifier_release(struct kref *ref) +{ +} + +static int iommu_init_intcapxt(struct amd_iommu *iommu) +{ + int ret; + struct irq_affinity_notify *notify = &iommu->intcapxt_notify; + + /** + * IntCapXT requires XTSup=1, which can be inferred + * amd_iommu_xt_mode. + */ + if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE) + return 0; + + /** + * Also, we need to setup notifier to update the IntCapXT registers + * whenever the irq affinity is changed from user-space. + */ + notify->irq = iommu->dev->irq; + notify->notify = _irq_notifier_notify, + notify->release = _irq_notifier_release, + ret = irq_set_affinity_notifier(iommu->dev->irq, notify); + if (ret) { + pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n", + iommu->devid, iommu->dev->irq); + return ret; + } + + iommu_update_intcapxt(iommu); + iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); + return ret; +} + static int iommu_init_msi(struct amd_iommu *iommu) { int ret; @@ -1936,6 +2022,10 @@ static int iommu_init_msi(struct amd_iommu *iommu) return ret; enable_faults: + ret = iommu_init_intcapxt(iommu); + if (ret) + return ret; + iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); if (iommu->ppr_log != NULL) diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 52c35d557fad..64edd5a9694c 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -60,6 +60,12 @@ #define MMIO_PPR_LOG_OFFSET 0x0038 #define MMIO_GA_LOG_BASE_OFFSET 0x00e0 #define MMIO_GA_LOG_TAIL_OFFSET 0x00e8 +#define MMIO_MSI_ADDR_LO_OFFSET 0x015C +#define MMIO_MSI_ADDR_HI_OFFSET 0x0160 +#define MMIO_MSI_DATA_OFFSET 0x0164 +#define MMIO_INTCAPXT_EVT_OFFSET 0x0170 +#define MMIO_INTCAPXT_PPR_OFFSET 0x0178 +#define MMIO_INTCAPXT_GALOG_OFFSET 0x0180 #define MMIO_CMD_HEAD_OFFSET 0x2000 #define MMIO_CMD_TAIL_OFFSET 0x2008 #define MMIO_EVT_HEAD_OFFSET 0x2010 @@ -150,6 +156,7 @@ #define CONTROL_GALOG_EN 0x1CULL #define CONTROL_GAINT_EN 0x1DULL #define CONTROL_XT_EN 0x32ULL +#define CONTROL_INTCAPXT_EN 0x33ULL #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) #define CTRL_INV_TO_NONE 0 @@ -592,6 +599,8 @@ struct amd_iommu { /* DebugFS Info */ struct dentry *debugfs; #endif + /* IRQ notifier for IntCapXT interrupt */ + struct irq_affinity_notify intcapxt_notify; }; static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c index 73a552914455..2b25d9c59336 100644 --- a/drivers/iommu/intel-iommu-debugfs.c +++ b/drivers/iommu/intel-iommu-debugfs.c @@ -162,9 +162,9 @@ static inline void print_tbl_walk(struct seq_file *m) (u64)0, (u64)0, (u64)0); else seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", - tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[0], + tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[2], tbl_wlk->pasid_tbl_entry->val[1], - tbl_wlk->pasid_tbl_entry->val[2]); + tbl_wlk->pasid_tbl_entry->val[0]); } static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry, diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ac4172c02244..bdaed2da8a55 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -339,8 +339,6 @@ static void domain_exit(struct dmar_domain *domain); static void domain_remove_dev_info(struct dmar_domain *domain); static void dmar_remove_one_dev_info(struct device *dev); static void __dmar_remove_one_dev_info(struct device_domain_info *info); -static void domain_context_clear(struct intel_iommu *iommu, - struct device *dev); static int domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); static bool device_is_rmrr_locked(struct device *dev); @@ -1833,9 +1831,65 @@ static inline int guestwidth_to_adjustwidth(int gaw) return agaw; } +static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, + int guest_width) +{ + int adjust_width, agaw; + unsigned long sagaw; + int err; + + init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); + + err = init_iova_flush_queue(&domain->iovad, + iommu_flush_iova, iova_entry_free); + if (err) + return err; + + domain_reserve_special_ranges(domain); + + /* calculate AGAW */ + if (guest_width > cap_mgaw(iommu->cap)) + guest_width = cap_mgaw(iommu->cap); + domain->gaw = guest_width; + adjust_width = guestwidth_to_adjustwidth(guest_width); + agaw = width_to_agaw(adjust_width); + sagaw = cap_sagaw(iommu->cap); + if (!test_bit(agaw, &sagaw)) { + /* hardware doesn't support it, choose a bigger one */ + pr_debug("Hardware doesn't support agaw %d\n", agaw); + agaw = find_next_bit(&sagaw, 5, agaw); + if (agaw >= 5) + return -ENODEV; + } + domain->agaw = agaw; + + if (ecap_coherent(iommu->ecap)) + domain->iommu_coherency = 1; + else + domain->iommu_coherency = 0; + + if (ecap_sc_support(iommu->ecap)) + domain->iommu_snooping = 1; + else + domain->iommu_snooping = 0; + + if (intel_iommu_superpage) + domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); + else + domain->iommu_superpage = 0; + + domain->nid = iommu->node; + + /* always allocate the top pgd */ + domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); + if (!domain->pgd) + return -ENOMEM; + __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); + return 0; +} + static void domain_exit(struct dmar_domain *domain) { - struct page *freelist; /* Remove associated devices and clear attached or cached domains */ domain_remove_dev_info(domain); @@ -1843,9 +1897,12 @@ static void domain_exit(struct dmar_domain *domain) /* destroy iovas */ put_iova_domain(&domain->iovad); - freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); + if (domain->pgd) { + struct page *freelist; - dma_free_pagelist(freelist); + freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); + dma_free_pagelist(freelist); + } free_domain_mem(domain); } @@ -2048,26 +2105,9 @@ out_unlock: return ret; } -struct domain_context_mapping_data { - struct dmar_domain *domain; - struct intel_iommu *iommu; - struct pasid_table *table; -}; - -static int domain_context_mapping_cb(struct pci_dev *pdev, - u16 alias, void *opaque) -{ - struct domain_context_mapping_data *data = opaque; - - return domain_context_mapping_one(data->domain, data->iommu, - data->table, PCI_BUS_NUM(alias), - alias & 0xff); -} - static int domain_context_mapping(struct dmar_domain *domain, struct device *dev) { - struct domain_context_mapping_data data; struct pasid_table *table; struct intel_iommu *iommu; u8 bus, devfn; @@ -2077,17 +2117,7 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev) return -ENODEV; table = intel_pasid_get_table(dev); - - if (!dev_is_pci(dev)) - return domain_context_mapping_one(domain, iommu, table, - bus, devfn); - - data.domain = domain; - data.iommu = iommu; - data.table = table; - - return pci_for_each_dma_alias(to_pci_dev(dev), - &domain_context_mapping_cb, &data); + return domain_context_mapping_one(domain, iommu, table, bus, devfn); } static int domain_context_mapped_cb(struct pci_dev *pdev, @@ -2513,31 +2543,6 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque) return 0; } -static int domain_init(struct dmar_domain *domain, int guest_width) -{ - int adjust_width; - - init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); - domain_reserve_special_ranges(domain); - - /* calculate AGAW */ - domain->gaw = guest_width; - adjust_width = guestwidth_to_adjustwidth(guest_width); - domain->agaw = width_to_agaw(adjust_width); - - domain->iommu_coherency = 0; - domain->iommu_snooping = 0; - domain->iommu_superpage = 0; - domain->max_addr = 0; - - /* always allocate the top pgd */ - domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); - if (!domain->pgd) - return -ENOMEM; - domain_flush_cache(domain, domain->pgd, PAGE_SIZE); - return 0; -} - static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) { struct device_domain_info *info; @@ -2575,19 +2580,11 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) domain = alloc_domain(0); if (!domain) return NULL; - - if (domain_init(domain, gaw)) { + if (domain_init(domain, iommu, gaw)) { domain_exit(domain); return NULL; } - if (init_iova_flush_queue(&domain->iovad, - iommu_flush_iova, - iova_entry_free)) { - pr_warn("iova flush queue initialization failed\n"); - intel_iommu_strict = 1; - } - out: return domain; } @@ -2692,6 +2689,8 @@ static int domain_prepare_identity_map(struct device *dev, return iommu_domain_identity_map(domain, start, end); } +static int md_domain_init(struct dmar_domain *domain, int guest_width); + static int __init si_domain_init(int hw) { struct dmar_rmrr_unit *rmrr; @@ -2702,7 +2701,7 @@ static int __init si_domain_init(int hw) if (!si_domain) return -EFAULT; - if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { + if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { domain_exit(si_domain); return -EFAULT; } @@ -3564,7 +3563,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) freelist = domain_unmap(domain, start_pfn, last_pfn); - if (intel_iommu_strict || (pdev && pdev->untrusted)) { + if (intel_iommu_strict || (pdev && pdev->untrusted) || + !has_iova_flush_queue(&domain->iovad)) { iommu_flush_iotlb_psi(iommu, domain, start_pfn, nrpages, !freelist, 0); /* free iova */ @@ -4758,28 +4758,6 @@ out_free_dmar: return ret; } -static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque) -{ - struct intel_iommu *iommu = opaque; - - domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); - return 0; -} - -/* - * NB - intel-iommu lacks any sort of reference counting for the users of - * dependent devices. If multiple endpoints have intersecting dependent - * devices, unbinding the driver from any one of them will possibly leave - * the others unable to operate. - */ -static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) -{ - if (!iommu || !dev || !dev_is_pci(dev)) - return; - - pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); -} - static void __dmar_remove_one_dev_info(struct device_domain_info *info) { struct dmar_domain *domain; @@ -4800,7 +4778,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) PASID_RID2PASID); iommu_disable_dev_iotlb(info); - domain_context_clear(iommu, info->dev); + domain_context_clear_one(iommu, info->bus, info->devfn); intel_pasid_free_table(info->dev); } @@ -4829,6 +4807,31 @@ static void dmar_remove_one_dev_info(struct device *dev) spin_unlock_irqrestore(&device_domain_lock, flags); } +static int md_domain_init(struct dmar_domain *domain, int guest_width) +{ + int adjust_width; + + init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); + domain_reserve_special_ranges(domain); + + /* calculate AGAW */ + domain->gaw = guest_width; + adjust_width = guestwidth_to_adjustwidth(guest_width); + domain->agaw = width_to_agaw(adjust_width); + + domain->iommu_coherency = 0; + domain->iommu_snooping = 0; + domain->iommu_superpage = 0; + domain->max_addr = 0; + + /* always allocate the top pgd */ + domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); + if (!domain->pgd) + return -ENOMEM; + domain_flush_cache(domain, domain->pgd, PAGE_SIZE); + return 0; +} + static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) { struct dmar_domain *dmar_domain; @@ -4843,7 +4846,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) pr_err("Can't allocate dmar_domain\n"); return NULL; } - if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { + if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { pr_err("Domain initialization failed\n"); domain_exit(dmar_domain); return NULL; diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index d499b2621239..3e1a8a675572 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -54,9 +54,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, } EXPORT_SYMBOL_GPL(init_iova_domain); +bool has_iova_flush_queue(struct iova_domain *iovad) +{ + return !!iovad->fq; +} + static void free_iova_flush_queue(struct iova_domain *iovad) { - if (!iovad->fq) + if (!has_iova_flush_queue(iovad)) return; if (timer_pending(&iovad->fq_timer)) @@ -74,13 +79,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad) int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) { + struct iova_fq __percpu *queue; int cpu; atomic64_set(&iovad->fq_flush_start_cnt, 0); atomic64_set(&iovad->fq_flush_finish_cnt, 0); - iovad->fq = alloc_percpu(struct iova_fq); - if (!iovad->fq) + queue = alloc_percpu(struct iova_fq); + if (!queue) return -ENOMEM; iovad->flush_cb = flush_cb; @@ -89,13 +95,17 @@ int init_iova_flush_queue(struct iova_domain *iovad, for_each_possible_cpu(cpu) { struct iova_fq *fq; - fq = per_cpu_ptr(iovad->fq, cpu); + fq = per_cpu_ptr(queue, cpu); fq->head = 0; fq->tail = 0; spin_lock_init(&fq->lock); } + smp_wmb(); + + iovad->fq = queue; + timer_setup(&iovad->fq_timer, fq_flush_timeout, 0); atomic_set(&iovad->fq_timer_on, 0); @@ -127,8 +137,9 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) struct iova *cached_iova; cached_iova = rb_entry(iovad->cached32_node, struct iova, node); - if (free->pfn_hi < iovad->dma_32bit_pfn && - free->pfn_lo >= cached_iova->pfn_lo) { + if (free == cached_iova || + (free->pfn_hi < iovad->dma_32bit_pfn && + free->pfn_lo >= cached_iova->pfn_lo)) { iovad->cached32_node = rb_next(&free->node); iovad->max32_alloc_size = iovad->dma_32bit_pfn; } diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 433f4d2ee956..80a740df0737 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -2,7 +2,7 @@ /* * Virtio driver for the paravirtualized IOMMU * - * Copyright (C) 2018 Arm Limited + * Copyright (C) 2019 Arm Limited */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -47,7 +47,10 @@ struct viommu_dev { /* Device configuration */ struct iommu_domain_geometry geometry; u64 pgsize_bitmap; - u8 domain_bits; + u32 first_domain; + u32 last_domain; + /* Supported MAP flags */ + u32 map_flags; u32 probe_size; }; @@ -62,6 +65,7 @@ struct viommu_domain { struct viommu_dev *viommu; struct mutex mutex; /* protects viommu pointer */ unsigned int id; + u32 map_flags; spinlock_t mappings_lock; struct rb_root_cached mappings; @@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len) return -ENOENT; case VIRTIO_IOMMU_S_FAULT: return -EFAULT; + case VIRTIO_IOMMU_S_NOMEM: + return -ENOMEM; case VIRTIO_IOMMU_S_IOERR: case VIRTIO_IOMMU_S_DEVERR: default: @@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu, { int ret; struct viommu_domain *vdomain = to_viommu_domain(domain); - unsigned int max_domain = viommu->domain_bits > 31 ? ~0 : - (1U << viommu->domain_bits) - 1; vdomain->viommu = viommu; + vdomain->map_flags = viommu->map_flags; domain->pgsize_bitmap = viommu->pgsize_bitmap; domain->geometry = viommu->geometry; - ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL); + ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, + viommu->last_domain, GFP_KERNEL); if (ret >= 0) vdomain->id = (unsigned int)ret; @@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { int ret; - int flags; + u32 flags; struct virtio_iommu_req_map map; struct viommu_domain *vdomain = to_viommu_domain(domain); @@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); + if (flags & ~vdomain->map_flags) + return -EINVAL; + ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); if (ret) return ret; @@ -1027,7 +1036,8 @@ static int viommu_probe(struct virtio_device *vdev) goto err_free_vqs; } - viommu->domain_bits = 32; + viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; + viommu->last_domain = ~0U; /* Optional features */ virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, @@ -1038,9 +1048,13 @@ static int viommu_probe(struct virtio_device *vdev) struct virtio_iommu_config, input_range.end, &input_end); - virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS, - struct virtio_iommu_config, domain_bits, - &viommu->domain_bits); + virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, + struct virtio_iommu_config, domain_range.start, + &viommu->first_domain); + + virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, + struct virtio_iommu_config, domain_range.end, + &viommu->last_domain); virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, struct virtio_iommu_config, probe_size, @@ -1052,6 +1066,9 @@ static int viommu_probe(struct virtio_device *vdev) .force_aperture = true, }; + if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO)) + viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; + viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; virtio_device_ready(vdev); @@ -1130,9 +1147,10 @@ static void viommu_config_changed(struct virtio_device *vdev) static unsigned int features[] = { VIRTIO_IOMMU_F_MAP_UNMAP, - VIRTIO_IOMMU_F_DOMAIN_BITS, VIRTIO_IOMMU_F_INPUT_RANGE, + VIRTIO_IOMMU_F_DOMAIN_RANGE, VIRTIO_IOMMU_F_PROBE, + VIRTIO_IOMMU_F_MMIO, }; static struct virtio_device_id id_table[] = { diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 730fbe0e2a9d..1b5c3672aea2 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -3010,7 +3010,7 @@ static int its_vpe_init(struct its_vpe *vpe) if (!its_alloc_vpe_table(vpe_id)) { its_vpe_id_free(vpe_id); - its_free_pending_table(vpe->vpt_page); + its_free_pending_table(vpt_page); return -ENOMEM; } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 9bca4896fa6f..96d927f0f91a 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -771,8 +771,10 @@ static void gic_cpu_sys_reg_init(void) case 7: write_gicreg(0, ICC_AP0R3_EL1); write_gicreg(0, ICC_AP0R2_EL1); + /* Fall through */ case 6: write_gicreg(0, ICC_AP0R1_EL1); + /* Fall through */ case 5: case 4: write_gicreg(0, ICC_AP0R0_EL1); @@ -786,8 +788,10 @@ static void gic_cpu_sys_reg_init(void) case 7: write_gicreg(0, ICC_AP1R3_EL1); write_gicreg(0, ICC_AP1R2_EL1); + /* Fall through */ case 6: write_gicreg(0, ICC_AP1R1_EL1); + /* Fall through */ case 5: case 4: write_gicreg(0, ICC_AP1R0_EL1); diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index bf2237ac5d09..4f74c15c4755 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c @@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = { .irq_unmask = imx_gpcv2_irq_unmask, .irq_set_wake = imx_gpcv2_irq_set_wake, .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = irq_chip_set_type_parent, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, #endif diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 3dd28382d5f5..3f09f658e8e2 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -241,12 +241,15 @@ static int mbigen_of_create_domain(struct platform_device *pdev, parent = platform_bus_type.dev_root; child = of_platform_device_create(np, NULL, parent); - if (!child) + if (!child) { + of_node_put(np); return -ENOMEM; + } if (of_property_read_u32(child->dev.of_node, "num-pins", &num_pins) < 0) { dev_err(&pdev->dev, "No num-pins property\n"); + of_node_put(np); return -EINVAL; } @@ -254,8 +257,10 @@ static int mbigen_of_create_domain(struct platform_device *pdev, mbigen_write_msg, &mbigen_domain_ops, mgn_chip); - if (!domain) + if (!domain) { + of_node_put(np); return -ENOMEM; + } } return 0; diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 276065c888bc..23f1f41c8602 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -852,6 +852,7 @@ int smu_queue_i2c(struct smu_i2c_cmd *cmd) break; case SMU_I2C_TRANSFER_COMBINED: cmd->info.devaddr &= 0xfe; + /* fall through */ case SMU_I2C_TRANSFER_STDSUB: if (cmd->info.sublen > 3) return -EINVAL; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 26e374fbf57c..20ed838e9413 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -931,6 +931,9 @@ int bch_cached_dev_run(struct cached_dev *dc) if (dc->io_disable) { pr_err("I/O disabled on cached dev %s", dc->backing_dev_name); + kfree(env[1]); + kfree(env[2]); + kfree(buf); return -EIO; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index caaee8032afe..7b6c3ee9e755 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -882,23 +882,23 @@ EXPORT_SYMBOL_GPL(dm_table_set_type); /* validate the dax capability of the target device span */ int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) + sector_t start, sector_t len, void *data) { int blocksize = *(int *) data; return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize, - start, len); + start, len); } /* Check devices support synchronous DAX */ -static int device_synchronous(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { - return dax_synchronous(dev->dax_dev); + return dev->dax_dev && dax_synchronous(dev->dax_dev); } bool dm_table_supports_dax(struct dm_table *t, - iterate_devices_callout_fn iterate_fn, int *blocksize) + iterate_devices_callout_fn iterate_fn, int *blocksize) { struct dm_target *ti; unsigned i; @@ -911,7 +911,7 @@ bool dm_table_supports_dax(struct dm_table *t, return false; if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, iterate_fn, blocksize)) + !ti->type->iterate_devices(ti, iterate_fn, blocksize)) return false; } @@ -1921,7 +1921,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (dm_table_supports_dax(t, device_supports_dax, &page_size)) { blk_queue_flag_set(QUEUE_FLAG_DAX, q); - if (dm_table_supports_dax(t, device_synchronous, NULL)) + if (dm_table_supports_dax(t, device_dax_synchronous, NULL)) set_dax_synchronous(t->md->dax_dev); } else diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c index 21fb90d66bfc..25c73c13cc7e 100644 --- a/drivers/media/v4l2-core/v4l2-subdev.c +++ b/drivers/media/v4l2-core/v4l2-subdev.c @@ -124,7 +124,7 @@ static inline int check_which(__u32 which) static inline int check_pad(struct v4l2_subdev *sd, __u32 pad) { #if defined(CONFIG_MEDIA_CONTROLLER) - if (sd->entity.graph_obj.mdev) { + if (sd->entity.num_pads) { if (pad >= sd->entity.num_pads) return -EINVAL; return 0; diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig index f88094719552..f2abe27010ef 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig @@ -5,6 +5,7 @@ config EEPROM_AT24 tristate "I2C EEPROMs / RAMs / ROMs from most vendors" depends on I2C && SYSFS select NVMEM + select NVMEM_SYSFS select REGMAP_I2C help Enable this driver to get read/write support to most I2C EEPROMs @@ -34,6 +35,7 @@ config EEPROM_AT25 tristate "SPI EEPROMs from most vendors" depends on SPI && SYSFS select NVMEM + select NVMEM_SYSFS help Enable this driver to get read/write support to most SPI EEPROMs, after you configure the board init code to know about each eeprom @@ -80,6 +82,7 @@ config EEPROM_93XX46 depends on SPI && SYSFS select REGMAP select NVMEM + select NVMEM_SYSFS help Driver for the microwire EEPROM chipsets 93xx46x. The driver supports both read and write commands and also the command to diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 35bf2477693d..518945b2f737 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -685,7 +685,7 @@ static int at24_probe(struct i2c_client *client) nvmem_config.name = dev_name(dev); nvmem_config.dev = dev; nvmem_config.read_only = !writable; - nvmem_config.root_only = true; + nvmem_config.root_only = !(flags & AT24_FLAG_IRUGO); nvmem_config.owner = THIS_MODULE; nvmem_config.compat = true; nvmem_config.base_dev = dev; diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 75294ec65257..1a2c062a57d4 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -695,8 +695,8 @@ static int goya_sw_init(struct hl_device *hdev) goto free_dma_pool; } - dev_dbg(hdev->dev, "cpu accessible memory at bus address 0x%llx\n", - hdev->cpu_accessible_dma_address); + dev_dbg(hdev->dev, "cpu accessible memory at bus address %pad\n", + &hdev->cpu_accessible_dma_address); hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1); if (!hdev->cpu_accessible_dma_pool) { @@ -4449,7 +4449,6 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) case GOYA_ASYNC_EVENT_ID_AXI_ECC: case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC: case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET: - case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT: goya_print_irq_info(hdev, event_type, false); hl_device_reset(hdev, true, false); break; @@ -4485,6 +4484,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) goya_unmask_irq(hdev, event_type); break; + case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT: case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU: case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU: case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU: diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index d74b182e19f3..6c0173772162 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -81,6 +81,9 @@ #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ +#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */ +#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 7a2b3545a7f9..57cb68f5cc64 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -98,6 +98,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index e327f80ebe70..7102e2ebc614 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -10,6 +10,7 @@ #include <linux/kthread.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> +#include <linux/backing-dev.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> @@ -427,6 +428,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) goto free_tag_set; } + if (mmc_host_is_spi(host) && host->use_spi_crc) + mq->queue->backing_dev_info->capabilities |= + BDI_CAP_STABLE_WRITES; + mq->queue->queuedata = mq; blk_queue_rq_timeout(mq->queue, 60 * HZ); diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index faaaf52a46d2..eea52e2c5a0c 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2012,8 +2012,7 @@ static void dw_mci_tasklet_func(unsigned long priv) * delayed. Allowing the transfer to take place * avoids races and keeps things simple. */ - if ((err != -ETIMEDOUT) && - (cmd->opcode == MMC_SEND_TUNING_BLOCK)) { + if (err != -ETIMEDOUT) { state = STATE_SENDING_DATA; continue; } diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index 2d736e416775..ba9a63db73da 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -73,7 +73,7 @@ #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6) #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8) #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9) - #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13) + #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(13, 10) #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15) #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30) #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31) diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index 6ee340a3fb3a..603a5d9f045a 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -624,6 +624,7 @@ err_cleanup_host: sdhci_cleanup_host(host); pm_runtime_disable: + pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig index cff6bbd226f5..b4e3caf7d799 100644 --- a/drivers/mtd/hyperbus/Kconfig +++ b/drivers/mtd/hyperbus/Kconfig @@ -14,8 +14,9 @@ if MTD_HYPERBUS config HBMC_AM654 tristate "HyperBus controller driver for AM65x SoC" + depends on ARM64 || COMPILE_TEST select MULTIPLEXER - select MUX_MMIO + imply MUX_MMIO help This is the driver for HyperBus controller on TI's AM65x and other SoCs diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c index a1f8fe1abb10..e082d632fb74 100644 --- a/drivers/mtd/nand/onenand/onenand_base.c +++ b/drivers/mtd/nand/onenand/onenand_base.c @@ -3259,6 +3259,7 @@ static void onenand_check_features(struct mtd_info *mtd) switch (density) { case ONENAND_DEVICE_DENSITY_8Gb: this->options |= ONENAND_HAS_NOP_1; + /* fall through */ case ONENAND_DEVICE_DENSITY_4Gb: if (ONENAND_IS_DDP(this)) this->options |= ONENAND_HAS_2PLANE; diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 1622d3145587..8ca9fad6e6ad 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -390,6 +390,14 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip) (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2) return MICRON_ON_DIE_UNSUPPORTED; + /* + * It seems that there are devices which do not support ECC officially. + * At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports + * enabling the ECC feature but don't reflect that to the READ_ID table. + * So we have to guarantee that we disable the ECC feature directly + * after we did the READ_ID table command. Later we can evaluate the + * ECC_ENABLE support. + */ ret = micron_nand_on_die_ecc_setup(chip, true); if (ret) return MICRON_ON_DIE_UNSUPPORTED; @@ -398,13 +406,13 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip) if (ret) return MICRON_ON_DIE_UNSUPPORTED; - if (!(id[4] & MICRON_ID_ECC_ENABLED)) - return MICRON_ON_DIE_UNSUPPORTED; - ret = micron_nand_on_die_ecc_setup(chip, false); if (ret) return MICRON_ON_DIE_UNSUPPORTED; + if (!(id[4] & MICRON_ID_ECC_ENABLED)) + return MICRON_ON_DIE_UNSUPPORTED; + ret = nand_readid_op(chip, 0, id, sizeof(id)); if (ret) return MICRON_ON_DIE_UNSUPPORTED; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 1d4075903971..c8e1a04ba384 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -898,7 +898,8 @@ static void at91_irq_err_state(struct net_device *dev, CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; } - case CAN_STATE_ERROR_WARNING: /* fallthrough */ + /* fall through */ + case CAN_STATE_ERROR_WARNING: /* * from: ERROR_ACTIVE, ERROR_WARNING * to : ERROR_PASSIVE, BUS_OFF @@ -947,7 +948,8 @@ static void at91_irq_err_state(struct net_device *dev, netdev_dbg(dev, "Error Active\n"); cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; - case CAN_STATE_ERROR_WARNING: /* fallthrough */ + /* fall through */ + case CAN_STATE_ERROR_WARNING: reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF; reg_ier = AT91_IRQ_ERRP; break; diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index 7f6a3b971da9..13b10cbf236a 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -660,7 +660,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd) pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ, PCIEFD_REG_CAN_CLK_SEL); - /* fallthough */ + /* fall through */ case CANFD_CLK_SEL_80MHZ: priv->ucan.can.clock.freq = 80 * 1000 * 1000; break; diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 44e99e3d7134..234cf1042df6 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -860,7 +860,8 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) if (new_state >= CAN_STATE_ERROR_WARNING && new_state <= CAN_STATE_BUS_OFF) priv->can.can_stats.error_warning++; - case CAN_STATE_ERROR_WARNING: /* fallthrough */ + /* fall through */ + case CAN_STATE_ERROR_WARNING: if (new_state >= CAN_STATE_ERROR_PASSIVE && new_state <= CAN_STATE_BUS_OFF) priv->can.can_stats.error_passive++; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 15ce5ad1d632..617da295b6c1 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -415,7 +415,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, new_state = CAN_STATE_ERROR_WARNING; break; } - /* else: fall through */ + /* fall through */ case CAN_STATE_ERROR_WARNING: if (n & PCAN_USB_ERROR_BUS_HEAVY) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 656ed80647f0..e2be5a685130 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -285,6 +285,9 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) hw_cons = le16_to_cpu(*txdata->tx_cons_sb); sw_cons = txdata->tx_pkt_cons; + /* Ensure subsequent loads occur after hw_cons */ + smp_rmb(); + while (sw_cons != hw_cons) { u16 pkt_cons; diff --git a/drivers/net/ethernet/chelsio/cxgb/my3126.c b/drivers/net/ethernet/chelsio/cxgb/my3126.c index 20c09cc4b323..60aa45b375b6 100644 --- a/drivers/net/ethernet/chelsio/cxgb/my3126.c +++ b/drivers/net/ethernet/chelsio/cxgb/my3126.c @@ -94,7 +94,7 @@ static int my3126_interrupt_handler(struct cphy *cphy) return cphy_cause_link_change; } -static void my3216_poll(struct work_struct *work) +static void my3126_poll(struct work_struct *work) { struct cphy *cphy = container_of(work, struct cphy, phy_update.work); @@ -177,7 +177,7 @@ static struct cphy *my3126_phy_create(struct net_device *dev, return NULL; cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops); - INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); + INIT_DELAYED_WORK(&cphy->phy_update, my3126_poll); cphy->bmsr = 0; return cphy; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 67202b6f352e..4311ad9c84b2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5561,7 +5561,6 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) char name[IFNAMSIZ]; u32 devcap2; u16 flags; - int pos; /* If we want to instantiate Virtual Functions, then our * parent bridge's PCI-E needs to support Alternative Routing @@ -5569,9 +5568,8 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) * and above. */ pbridge = pdev->bus->self; - pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP); - pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags); - pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2); + pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags); + pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2); if ((flags & PCI_EXP_FLAGS_VERS) < 2 || !(devcap2 & PCI_EXP_DEVCAP2_ARI)) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 312599c6b35a..e447976bdd3e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = { static struct ch_tc_flower_entry *allocate_flower_entry(void) { struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); - spin_lock_init(&new->lock); + if (new) + spin_lock_init(&new->lock); return new; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 9dd5ed9a2965..f7fc553356f2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -7309,7 +7309,6 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, } else { unsigned int pack_align; unsigned int ingpad, ingpack; - unsigned int pcie_cap; /* T5 introduced the separation of the Free List Padding and * Packing Boundaries. Thus, we can select a smaller Padding @@ -7334,8 +7333,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, * multiple of the Maximum Payload Size. */ pack_align = fl_align; - pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP); - if (pcie_cap) { + if (pci_is_pcie(adap->pdev)) { unsigned int mps, mps_log; u16 devctl; @@ -7343,9 +7341,8 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, * [bits 7:5] encodes sizes as powers of 2 starting at * 128 bytes. */ - pci_read_config_word(adap->pdev, - pcie_cap + PCI_EXP_DEVCTL, - &devctl); + pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, + &devctl); mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7; mps = 1 << mps_log; if (mps > pack_align) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index b7a246b33599..2edb86ec9fe9 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4698,8 +4698,13 @@ int be_update_queues(struct be_adapter *adapter) int status; if (netif_running(netdev)) { + /* be_tx_timeout() must not run concurrently with this + * function, synchronize with an already-running dev_watchdog + */ + netif_tx_lock_bh(netdev); /* device cannot transmit now, avoid dev_watchdog timeouts */ netif_carrier_off(netdev); + netif_tx_unlock_bh(netdev); be_close(netdev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 8ad5292eebbe..75329ab775a6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -43,7 +43,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ - HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ + HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index a38ac7cfe16b..690b9990215c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -304,7 +304,7 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, memcpy(&msg_data[6], &vlan_tag, sizeof(u16)); return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), - HLCGE_MBX_PUSH_VLAN_INFO, vfid); + HCLGE_MBX_PUSH_VLAN_INFO, vfid); } static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index f60b80bd605e..6a96987bd8f0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -204,7 +204,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: case HCLGE_MBX_LINK_STAT_MODE: - case HLCGE_MBX_PUSH_VLAN_INFO: + case HCLGE_MBX_PUSH_VLAN_INFO: /* set this mbx event as pending. This is required as we * might loose interrupt event when mbx task is busy * handling. This shall be cleared when mbx task just @@ -307,7 +307,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) hclgevf_reset_task_schedule(hdev); break; - case HLCGE_MBX_PUSH_VLAN_INFO: + case HCLGE_MBX_PUSH_VLAN_INFO: state = le16_to_cpu(msg_q[1]); vlan_info = &msg_q[1]; hclgevf_update_port_base_vlan_info(hdev, state, diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 93f3b4e6185b..aa9323e55406 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -3912,13 +3912,11 @@ void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) { struct igc_adapter *adapter = hw->back; - u16 cap_offset; - cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); - if (!cap_offset) + if (!pci_is_pcie(adapter->pdev)) return -IGC_ERR_CONFIG; - pci_read_config_word(adapter->pdev, cap_offset + reg, value); + pcie_capability_read_word(adapter->pdev, reg, value); return IGC_SUCCESS; } @@ -3926,13 +3924,11 @@ s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) { struct igc_adapter *adapter = hw->back; - u16 cap_offset; - cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); - if (!cap_offset) + if (!pci_is_pcie(adapter->pdev)) return -IGC_ERR_CONFIG; - pci_write_config_word(adapter->pdev, cap_offset + reg, *value); + pcie_capability_write_word(adapter->pdev, reg, *value); return IGC_SUCCESS; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 7245d287633d..7f747cb1a4f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -735,8 +735,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, list_add(&indr_priv->list, &rpriv->uplink_priv.tc_indr_block_priv_list); - block_cb = flow_block_cb_alloc(f->net, - mlx5e_rep_indr_setup_block_cb, + block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb, indr_priv, indr_priv, mlx5e_rep_indr_tc_block_unbind); if (IS_ERR(block_cb)) { @@ -753,7 +752,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, if (!indr_priv) return -ENOENT; - block_cb = flow_block_cb_lookup(f, + block_cb = flow_block_cb_lookup(f->block, mlx5e_rep_indr_setup_block_cb, indr_priv); if (!block_cb) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 4d34d42b3b0e..650638152bbc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1604,14 +1604,14 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, bool register_block = false; int err; - block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower, + block_cb = flow_block_cb_lookup(f->block, + mlxsw_sp_setup_tc_block_cb_flower, mlxsw_sp); if (!block_cb) { acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); if (!acl_block) return -ENOMEM; - block_cb = flow_block_cb_alloc(f->net, - mlxsw_sp_setup_tc_block_cb_flower, + block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, mlxsw_sp, acl_block, mlxsw_sp_tc_block_flower_release); if (IS_ERR(block_cb)) { @@ -1657,7 +1657,8 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, struct flow_block_cb *block_cb; int err; - block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower, + block_cb = flow_block_cb_lookup(f->block, + mlxsw_sp_setup_tc_block_cb_flower, mlxsw_sp); if (!block_cb) return; @@ -1680,7 +1681,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, struct flow_block_offload *f) { struct flow_block_cb *block_cb; - tc_setup_cb_t *cb; + flow_setup_cb_t *cb; bool ingress; int err; @@ -1702,7 +1703,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, &mlxsw_sp_block_cb_list)) return -EBUSY; - block_cb = flow_block_cb_alloc(f->net, cb, mlxsw_sp_port, + block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, mlxsw_sp_port, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); @@ -1718,7 +1719,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, case FLOW_BLOCK_UNBIND: mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, f, ingress); - block_cb = flow_block_cb_lookup(f, cb, mlxsw_sp_port); + block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); if (!block_cb) return -ENOENT; diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 7aaddc09c185..59487d446a09 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -316,15 +316,14 @@ int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port, if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) return -EOPNOTSUPP; - block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower, - port); + block_cb = flow_block_cb_lookup(f->block, + ocelot_setup_tc_block_cb_flower, port); if (!block_cb) { port_block = ocelot_port_block_create(port); if (!port_block) return -ENOMEM; - block_cb = flow_block_cb_alloc(f->net, - ocelot_setup_tc_block_cb_flower, + block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower, port, port_block, ocelot_tc_block_unbind); if (IS_ERR(block_cb)) { @@ -351,8 +350,8 @@ void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port, { struct flow_block_cb *block_cb; - block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower, - port); + block_cb = flow_block_cb_lookup(f->block, + ocelot_setup_tc_block_cb_flower, port); if (!block_cb) return; diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c index 9e6464ffae5d..16a6db71ca5e 100644 --- a/drivers/net/ethernet/mscc/ocelot_tc.c +++ b/drivers/net/ethernet/mscc/ocelot_tc.c @@ -134,7 +134,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port, struct flow_block_offload *f) { struct flow_block_cb *block_cb; - tc_setup_cb_t *cb; + flow_setup_cb_t *cb; int err; netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n", @@ -156,7 +156,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port, if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list)) return -EBUSY; - block_cb = flow_block_cb_alloc(f->net, cb, port, port, NULL); + block_cb = flow_block_cb_alloc(cb, port, port, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); @@ -169,7 +169,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port, list_add_tail(&block_cb->driver_list, f->driver_block_list); return 0; case FLOW_BLOCK_UNBIND: - block_cb = flow_block_cb_lookup(f, cb, port); + block_cb = flow_block_cb_lookup(f->block, cb, port); if (!block_cb) return -ENOENT; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index faa8ba012a37..e209f150c5f2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1318,8 +1318,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, &nfp_block_cb_list)) return -EBUSY; - block_cb = flow_block_cb_alloc(f->net, - nfp_flower_setup_tc_block_cb, + block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb, repr, repr, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); @@ -1328,7 +1327,8 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); return 0; case FLOW_BLOCK_UNBIND: - block_cb = flow_block_cb_lookup(f, nfp_flower_setup_tc_block_cb, + block_cb = flow_block_cb_lookup(f->block, + nfp_flower_setup_tc_block_cb, repr); if (!block_cb) return -ENOENT; @@ -1424,8 +1424,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, cb_priv->app = app; list_add(&cb_priv->list, &priv->indr_block_cb_priv); - block_cb = flow_block_cb_alloc(f->net, - nfp_flower_setup_indr_block_cb, + block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb, cb_priv, cb_priv, nfp_flower_setup_indr_tc_release); if (IS_ERR(block_cb)) { @@ -1442,7 +1441,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, if (!cb_priv) return -ENOENT; - block_cb = flow_block_cb_lookup(f, + block_cb = flow_block_cb_lookup(f->block, nfp_flower_setup_indr_block_cb, cb_priv); if (!block_cb) diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index f900fde448db..17c64e43d6c3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -530,9 +530,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); /* Check atomic operations support in PCI configuration space. */ - pci_read_config_dword(cdev->pdev, - cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2, - &pci_status_control); + pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2, + &pci_status_control); if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 0637c6752a78..6272115b2848 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -3251,9 +3251,9 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) ret = phy_read_paged(tp->phydev, 0x0a46, 0x13); if (ret & BIT(8)) - phy_modify_paged(tp->phydev, 0x0c41, 0x12, 0, BIT(1)); + phy_modify_paged(tp->phydev, 0x0c41, 0x15, 0, BIT(1)); else - phy_modify_paged(tp->phydev, 0x0c41, 0x12, BIT(1), 0); + phy_modify_paged(tp->phydev, 0x0c41, 0x15, BIT(1), 0); /* Enable PHY auto speed down */ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index afdcc5664ea6..3544e1991579 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -836,7 +836,6 @@ int netvsc_recv_callback(struct net_device *net, if (unlikely(!skb)) { ++net_device_ctx->eth_stats.rx_no_memory; - rcu_read_unlock(); return NVSP_STAT_FAIL; } diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 2d816aadea79..e36c04c26866 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -517,7 +517,7 @@ static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value) static void sfp_hwmon_to_rx_power(long *value) { - *value = DIV_ROUND_CLOSEST(*value, 100); + *value = DIV_ROUND_CLOSEST(*value, 10); } static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset, diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 54edf8956a25..6e84328bdd40 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -165,23 +165,29 @@ static int vrf_ip6_local_out(struct net *net, struct sock *sk, static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, struct net_device *dev) { - const struct ipv6hdr *iph = ipv6_hdr(skb); + const struct ipv6hdr *iph; struct net *net = dev_net(skb->dev); - struct flowi6 fl6 = { - /* needed to match OIF rule */ - .flowi6_oif = dev->ifindex, - .flowi6_iif = LOOPBACK_IFINDEX, - .daddr = iph->daddr, - .saddr = iph->saddr, - .flowlabel = ip6_flowinfo(iph), - .flowi6_mark = skb->mark, - .flowi6_proto = iph->nexthdr, - .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF, - }; + struct flowi6 fl6; int ret = NET_XMIT_DROP; struct dst_entry *dst; struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst; + if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr))) + goto err; + + iph = ipv6_hdr(skb); + + memset(&fl6, 0, sizeof(fl6)); + /* needed to match OIF rule */ + fl6.flowi6_oif = dev->ifindex; + fl6.flowi6_iif = LOOPBACK_IFINDEX; + fl6.daddr = iph->daddr; + fl6.saddr = iph->saddr; + fl6.flowlabel = ip6_flowinfo(iph); + fl6.flowi6_mark = skb->mark; + fl6.flowi6_proto = iph->nexthdr; + fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF; + dst = ip6_route_output(net, NULL, &fl6); if (dst == dst_null) goto err; @@ -237,21 +243,27 @@ static int vrf_ip_local_out(struct net *net, struct sock *sk, static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, struct net_device *vrf_dev) { - struct iphdr *ip4h = ip_hdr(skb); + struct iphdr *ip4h; int ret = NET_XMIT_DROP; - struct flowi4 fl4 = { - /* needed to match OIF rule */ - .flowi4_oif = vrf_dev->ifindex, - .flowi4_iif = LOOPBACK_IFINDEX, - .flowi4_tos = RT_TOS(ip4h->tos), - .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF, - .flowi4_proto = ip4h->protocol, - .daddr = ip4h->daddr, - .saddr = ip4h->saddr, - }; + struct flowi4 fl4; struct net *net = dev_net(vrf_dev); struct rtable *rt; + if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr))) + goto err; + + ip4h = ip_hdr(skb); + + memset(&fl4, 0, sizeof(fl4)); + /* needed to match OIF rule */ + fl4.flowi4_oif = vrf_dev->ifindex; + fl4.flowi4_iif = LOOPBACK_IFINDEX; + fl4.flowi4_tos = RT_TOS(ip4h->tos); + fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF; + fl4.flowi4_proto = ip4h->protocol; + fl4.daddr = ip4h->daddr; + fl4.saddr = ip4h->saddr; + rt = ip_route_output_flow(net, &fl4, NULL); if (IS_ERR(rt)) goto err; diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index d436cc51dfd1..2fb4258941a5 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -177,6 +177,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = { .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, + .policy = wil_rf_sector_policy, .doit = wil_rf_sector_get_cfg }, { @@ -184,6 +185,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = { .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, + .policy = wil_rf_sector_policy, .doit = wil_rf_sector_set_cfg }, { @@ -192,6 +194,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = { QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, + .policy = wil_rf_sector_policy, .doit = wil_rf_sector_get_selected }, { @@ -200,6 +203,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = { QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, + .policy = wil_rf_sector_policy, .doit = wil_rf_sector_set_selected }, }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c index f6500899fc14..d07e7c7355d9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c @@ -112,6 +112,7 @@ const struct wiphy_vendor_command brcmf_vendor_cmds[] = { }, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .policy = VENDOR_CMD_RAW_DATA, .doit = brcmf_cfg80211_vndr_cmds_dcmd_handler }, }; diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c index 5cf0b32c413b..e1bd344c4ebc 100644 --- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c @@ -163,6 +163,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = { .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = wlcore_vendor_cmd_smart_config_start, + .policy = wlcore_vendor_attr_policy, }, { .info = { @@ -172,6 +173,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = { .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = wlcore_vendor_cmd_smart_config_stop, + .policy = wlcore_vendor_attr_policy, }, { .info = { @@ -181,6 +183,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = { .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = wlcore_vendor_cmd_smart_config_set_group_key, + .policy = wlcore_vendor_attr_policy, }, }; diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 62d00fffa4af..3508a79110c7 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -62,14 +62,14 @@ static ssize_t sector_size_store(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); rc = nd_size_select_store(dev, buf, &nd_btt->lbasize, btt_lbasize_supported); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc ? rc : len; } @@ -91,11 +91,11 @@ static ssize_t uuid_store(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - device_unlock(dev); + nd_device_unlock(dev); return rc ? rc : len; } @@ -120,13 +120,13 @@ static ssize_t namespace_store(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc; } @@ -138,14 +138,14 @@ static ssize_t size_show(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); if (dev->driver) rc = sprintf(buf, "%llu\n", nd_btt->size); else { /* no size to convey if the btt instance is disabled */ rc = -ENXIO; } - device_unlock(dev); + nd_device_unlock(dev); return rc; } diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 2dca3034fee0..798c5c4aea9c 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -26,7 +26,7 @@ int nvdimm_major; static int nvdimm_bus_major; -static struct class *nd_class; +struct class *nd_class; static DEFINE_IDA(nd_ida); static int to_nd_device_type(struct device *dev) @@ -73,7 +73,7 @@ static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus) { nvdimm_bus_lock(&nvdimm_bus->dev); if (--nvdimm_bus->probe_active == 0) - wake_up(&nvdimm_bus->probe_wait); + wake_up(&nvdimm_bus->wait); nvdimm_bus_unlock(&nvdimm_bus->dev); } @@ -91,7 +91,10 @@ static int nvdimm_bus_probe(struct device *dev) dev->driver->name, dev_name(dev)); nvdimm_bus_probe_start(nvdimm_bus); + debug_nvdimm_lock(dev); rc = nd_drv->probe(dev); + debug_nvdimm_unlock(dev); + if (rc == 0) nd_region_probe_success(nvdimm_bus, dev); else @@ -113,8 +116,11 @@ static int nvdimm_bus_remove(struct device *dev) struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); int rc = 0; - if (nd_drv->remove) + if (nd_drv->remove) { + debug_nvdimm_lock(dev); rc = nd_drv->remove(dev); + debug_nvdimm_unlock(dev); + } nd_region_disable(nvdimm_bus, dev); dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name, @@ -140,7 +146,7 @@ static void nvdimm_bus_shutdown(struct device *dev) void nd_device_notify(struct device *dev, enum nvdimm_event event) { - device_lock(dev); + nd_device_lock(dev); if (dev->driver) { struct nd_device_driver *nd_drv; @@ -148,7 +154,7 @@ void nd_device_notify(struct device *dev, enum nvdimm_event event) if (nd_drv->notify) nd_drv->notify(dev, event); } - device_unlock(dev); + nd_device_unlock(dev); } EXPORT_SYMBOL(nd_device_notify); @@ -296,7 +302,7 @@ static void nvdimm_bus_release(struct device *dev) kfree(nvdimm_bus); } -static bool is_nvdimm_bus(struct device *dev) +bool is_nvdimm_bus(struct device *dev) { return dev->release == nvdimm_bus_release; } @@ -341,7 +347,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent, return NULL; INIT_LIST_HEAD(&nvdimm_bus->list); INIT_LIST_HEAD(&nvdimm_bus->mapping_list); - init_waitqueue_head(&nvdimm_bus->probe_wait); + init_waitqueue_head(&nvdimm_bus->wait); nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); if (nvdimm_bus->id < 0) { kfree(nvdimm_bus); @@ -426,6 +432,9 @@ static int nd_bus_remove(struct device *dev) list_del_init(&nvdimm_bus->list); mutex_unlock(&nvdimm_bus_list_mutex); + wait_event(nvdimm_bus->wait, + atomic_read(&nvdimm_bus->ioctl_active) == 0); + nd_synchronize(); device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister); @@ -547,13 +556,38 @@ EXPORT_SYMBOL(nd_device_register); void nd_device_unregister(struct device *dev, enum nd_async_mode mode) { + bool killed; + switch (mode) { case ND_ASYNC: + /* + * In the async case this is being triggered with the + * device lock held and the unregistration work needs to + * be moved out of line iff this is thread has won the + * race to schedule the deletion. + */ + if (!kill_device(dev)) + return; + get_device(dev); async_schedule_domain(nd_async_device_unregister, dev, &nd_async_domain); break; case ND_SYNC: + /* + * In the sync case the device is being unregistered due + * to a state change of the parent. Claim the kill state + * to synchronize against other unregistration requests, + * or otherwise let the async path handle it if the + * unregistration was already queued. + */ + nd_device_lock(dev); + killed = kill_device(dev); + nd_device_unlock(dev); + + if (!killed) + return; + nd_synchronize(); device_unregister(dev); break; @@ -859,10 +893,12 @@ void wait_nvdimm_bus_probe_idle(struct device *dev) do { if (nvdimm_bus->probe_active == 0) break; - nvdimm_bus_unlock(&nvdimm_bus->dev); - wait_event(nvdimm_bus->probe_wait, + nvdimm_bus_unlock(dev); + nd_device_unlock(dev); + wait_event(nvdimm_bus->wait, nvdimm_bus->probe_active == 0); - nvdimm_bus_lock(&nvdimm_bus->dev); + nd_device_lock(dev); + nvdimm_bus_lock(dev); } while (true); } @@ -945,20 +981,19 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, int read_only, unsigned int ioctl_cmd, unsigned long arg) { struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; - static char out_env[ND_CMD_MAX_ENVELOPE]; - static char in_env[ND_CMD_MAX_ENVELOPE]; const struct nd_cmd_desc *desc = NULL; unsigned int cmd = _IOC_NR(ioctl_cmd); struct device *dev = &nvdimm_bus->dev; void __user *p = (void __user *) arg; + char *out_env = NULL, *in_env = NULL; const char *cmd_name, *dimm_name; u32 in_len = 0, out_len = 0; unsigned int func = cmd; unsigned long cmd_mask; struct nd_cmd_pkg pkg; int rc, i, cmd_rc; + void *buf = NULL; u64 buf_len = 0; - void *buf; if (nvdimm) { desc = nd_cmd_dimm_desc(cmd); @@ -989,7 +1024,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, case ND_CMD_ARS_START: case ND_CMD_CLEAR_ERROR: case ND_CMD_CALL: - dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n", + dev_dbg(dev, "'%s' command while read-only.\n", nvdimm ? nvdimm_cmd_name(cmd) : nvdimm_bus_cmd_name(cmd)); return -EPERM; @@ -998,6 +1033,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, } /* process an input envelope */ + in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); + if (!in_env) + return -ENOMEM; for (i = 0; i < desc->in_num; i++) { u32 in_size, copy; @@ -1005,14 +1043,17 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, if (in_size == UINT_MAX) { dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n", __func__, dimm_name, cmd_name, i); - return -ENXIO; + rc = -ENXIO; + goto out; } - if (in_len < sizeof(in_env)) - copy = min_t(u32, sizeof(in_env) - in_len, in_size); + if (in_len < ND_CMD_MAX_ENVELOPE) + copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size); else copy = 0; - if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) - return -EFAULT; + if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) { + rc = -EFAULT; + goto out; + } in_len += in_size; } @@ -1024,6 +1065,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, } /* process an output envelope */ + out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); + if (!out_env) { + rc = -ENOMEM; + goto out; + } + for (i = 0; i < desc->out_num; i++) { u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, (u32 *) in_env, (u32 *) out_env, 0); @@ -1032,15 +1079,18 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, if (out_size == UINT_MAX) { dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n", dimm_name, cmd_name, i); - return -EFAULT; + rc = -EFAULT; + goto out; } - if (out_len < sizeof(out_env)) - copy = min_t(u32, sizeof(out_env) - out_len, out_size); + if (out_len < ND_CMD_MAX_ENVELOPE) + copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size); else copy = 0; if (copy && copy_from_user(&out_env[out_len], - p + in_len + out_len, copy)) - return -EFAULT; + p + in_len + out_len, copy)) { + rc = -EFAULT; + goto out; + } out_len += out_size; } @@ -1048,19 +1098,23 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, if (buf_len > ND_IOCTL_MAX_BUFLEN) { dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name, cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN); - return -EINVAL; + rc = -EINVAL; + goto out; } buf = vmalloc(buf_len); - if (!buf) - return -ENOMEM; + if (!buf) { + rc = -ENOMEM; + goto out; + } if (copy_from_user(buf, p, buf_len)) { rc = -EFAULT; goto out; } - nvdimm_bus_lock(&nvdimm_bus->dev); + nd_device_lock(dev); + nvdimm_bus_lock(dev); rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf); if (rc) goto out_unlock; @@ -1075,39 +1129,24 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address, clear_err->cleared); } - nvdimm_bus_unlock(&nvdimm_bus->dev); if (copy_to_user(p, buf, buf_len)) rc = -EFAULT; - vfree(buf); - return rc; - - out_unlock: - nvdimm_bus_unlock(&nvdimm_bus->dev); - out: +out_unlock: + nvdimm_bus_unlock(dev); + nd_device_unlock(dev); +out: + kfree(in_env); + kfree(out_env); vfree(buf); return rc; } -static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - long id = (long) file->private_data; - int rc = -ENXIO, ro; - struct nvdimm_bus *nvdimm_bus; - - ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); - mutex_lock(&nvdimm_bus_list_mutex); - list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { - if (nvdimm_bus->id == id) { - rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg); - break; - } - } - mutex_unlock(&nvdimm_bus_list_mutex); - - return rc; -} +enum nd_ioctl_mode { + BUS_IOCTL, + DIMM_IOCTL, +}; static int match_dimm(struct device *dev, void *data) { @@ -1122,31 +1161,62 @@ static int match_dimm(struct device *dev, void *data) return 0; } -static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg, + enum nd_ioctl_mode mode) + { - int rc = -ENXIO, ro; - struct nvdimm_bus *nvdimm_bus; + struct nvdimm_bus *nvdimm_bus, *found = NULL; + long id = (long) file->private_data; + struct nvdimm *nvdimm = NULL; + int rc, ro; ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); mutex_lock(&nvdimm_bus_list_mutex); list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { - struct device *dev = device_find_child(&nvdimm_bus->dev, - file->private_data, match_dimm); - struct nvdimm *nvdimm; - - if (!dev) - continue; + if (mode == DIMM_IOCTL) { + struct device *dev; + + dev = device_find_child(&nvdimm_bus->dev, + file->private_data, match_dimm); + if (!dev) + continue; + nvdimm = to_nvdimm(dev); + found = nvdimm_bus; + } else if (nvdimm_bus->id == id) { + found = nvdimm_bus; + } - nvdimm = to_nvdimm(dev); - rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg); - put_device(dev); - break; + if (found) { + atomic_inc(&nvdimm_bus->ioctl_active); + break; + } } mutex_unlock(&nvdimm_bus_list_mutex); + if (!found) + return -ENXIO; + + nvdimm_bus = found; + rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg); + + if (nvdimm) + put_device(&nvdimm->dev); + if (atomic_dec_and_test(&nvdimm_bus->ioctl_active)) + wake_up(&nvdimm_bus->wait); + return rc; } +static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + return nd_ioctl(file, cmd, arg, BUS_IOCTL); +} + +static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + return nd_ioctl(file, cmd, arg, DIMM_IOCTL); +} + static int nd_open(struct inode *inode, struct file *file) { long minor = iminor(inode); @@ -1158,16 +1228,16 @@ static int nd_open(struct inode *inode, struct file *file) static const struct file_operations nvdimm_bus_fops = { .owner = THIS_MODULE, .open = nd_open, - .unlocked_ioctl = nd_ioctl, - .compat_ioctl = nd_ioctl, + .unlocked_ioctl = bus_ioctl, + .compat_ioctl = bus_ioctl, .llseek = noop_llseek, }; static const struct file_operations nvdimm_fops = { .owner = THIS_MODULE, .open = nd_open, - .unlocked_ioctl = nvdimm_ioctl, - .compat_ioctl = nvdimm_ioctl, + .unlocked_ioctl = dimm_ioctl, + .compat_ioctl = dimm_ioctl, .llseek = noop_llseek, }; diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 5e1f060547bf..9204f1e9fd14 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -246,7 +246,7 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf, * * Enforce that uuids can only be changed while the device is disabled * (driver detached) - * LOCKING: expects device_lock() is held on entry + * LOCKING: expects nd_device_lock() is held on entry */ int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, size_t len) @@ -347,15 +347,15 @@ static DEVICE_ATTR_RO(provider); static int flush_namespaces(struct device *dev, void *data) { - device_lock(dev); - device_unlock(dev); + nd_device_lock(dev); + nd_device_unlock(dev); return 0; } static int flush_regions_dimms(struct device *dev, void *data) { - device_lock(dev); - device_unlock(dev); + nd_device_lock(dev); + nd_device_unlock(dev); device_for_each_child(dev, NULL, flush_namespaces); return 0; } diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index dfecd6e17043..29a065e769ea 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -484,12 +484,12 @@ static ssize_t security_store(struct device *dev, * done while probing is idle and the DIMM is not in active use * in any region. */ - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __security_store(dev, buf, len); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc; } diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 2d8d7e554877..a16e52251a30 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -410,7 +410,7 @@ static ssize_t alt_name_store(struct device *dev, struct nd_region *nd_region = to_nd_region(dev->parent); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __alt_name_store(dev, buf, len); @@ -418,7 +418,7 @@ static ssize_t alt_name_store(struct device *dev, rc = nd_namespace_label_update(nd_region, dev); dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc < 0 ? rc : len; } @@ -1077,7 +1077,7 @@ static ssize_t size_store(struct device *dev, if (rc) return rc; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __size_store(dev, val); @@ -1103,7 +1103,7 @@ static ssize_t size_store(struct device *dev, dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc < 0 ? rc : len; } @@ -1286,7 +1286,7 @@ static ssize_t uuid_store(struct device *dev, } else return -ENXIO; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); if (to_ndns(dev)->claim) @@ -1302,7 +1302,7 @@ static ssize_t uuid_store(struct device *dev, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc < 0 ? rc : len; } @@ -1376,7 +1376,7 @@ static ssize_t sector_size_store(struct device *dev, } else return -ENXIO; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); if (to_ndns(dev)->claim) rc = -EBUSY; @@ -1387,7 +1387,7 @@ static ssize_t sector_size_store(struct device *dev, dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote", buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc ? rc : len; } @@ -1502,9 +1502,9 @@ static ssize_t holder_show(struct device *dev, struct nd_namespace_common *ndns = to_ndns(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : ""); - device_unlock(dev); + nd_device_unlock(dev); return rc; } @@ -1541,7 +1541,7 @@ static ssize_t holder_class_store(struct device *dev, struct nd_region *nd_region = to_nd_region(dev->parent); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __holder_class_store(dev, buf); @@ -1549,7 +1549,7 @@ static ssize_t holder_class_store(struct device *dev, rc = nd_namespace_label_update(nd_region, dev); dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc < 0 ? rc : len; } @@ -1560,7 +1560,7 @@ static ssize_t holder_class_show(struct device *dev, struct nd_namespace_common *ndns = to_ndns(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); if (ndns->claim_class == NVDIMM_CCLASS_NONE) rc = sprintf(buf, "\n"); else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) || @@ -1572,7 +1572,7 @@ static ssize_t holder_class_show(struct device *dev, rc = sprintf(buf, "dax\n"); else rc = sprintf(buf, "<unknown>\n"); - device_unlock(dev); + nd_device_unlock(dev); return rc; } @@ -1586,7 +1586,7 @@ static ssize_t mode_show(struct device *dev, char *mode; ssize_t rc; - device_lock(dev); + nd_device_lock(dev); claim = ndns->claim; if (claim && is_nd_btt(claim)) mode = "safe"; @@ -1599,7 +1599,7 @@ static ssize_t mode_show(struct device *dev, else mode = "raw"; rc = sprintf(buf, "%s\n", mode); - device_unlock(dev); + nd_device_unlock(dev); return rc; } @@ -1703,8 +1703,8 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) * Flush any in-progess probes / removals in the driver * for the raw personality of this namespace. */ - device_lock(&ndns->dev); - device_unlock(&ndns->dev); + nd_device_lock(&ndns->dev); + nd_device_unlock(&ndns->dev); if (ndns->dev.driver) { dev_dbg(&ndns->dev, "is active, can't bind %s\n", dev_name(dev)); diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 391e88de3a29..0ac52b6eb00e 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -9,6 +9,7 @@ #include <linux/sizes.h> #include <linux/mutex.h> #include <linux/nd.h> +#include "nd.h" extern struct list_head nvdimm_bus_list; extern struct mutex nvdimm_bus_list_mutex; @@ -17,10 +18,11 @@ extern struct workqueue_struct *nvdimm_wq; struct nvdimm_bus { struct nvdimm_bus_descriptor *nd_desc; - wait_queue_head_t probe_wait; + wait_queue_head_t wait; struct list_head list; struct device dev; int id, probe_active; + atomic_t ioctl_active; struct list_head mapping_list; struct mutex reconfig_mutex; struct badrange badrange; @@ -181,4 +183,71 @@ ssize_t nd_namespace_store(struct device *dev, struct nd_namespace_common **_ndns, const char *buf, size_t len); struct nd_pfn *to_nd_pfn_safe(struct device *dev); +bool is_nvdimm_bus(struct device *dev); + +#ifdef CONFIG_PROVE_LOCKING +extern struct class *nd_class; + +enum { + LOCK_BUS, + LOCK_NDCTL, + LOCK_REGION, + LOCK_DIMM = LOCK_REGION, + LOCK_NAMESPACE, + LOCK_CLAIM, +}; + +static inline void debug_nvdimm_lock(struct device *dev) +{ + if (is_nd_region(dev)) + mutex_lock_nested(&dev->lockdep_mutex, LOCK_REGION); + else if (is_nvdimm(dev)) + mutex_lock_nested(&dev->lockdep_mutex, LOCK_DIMM); + else if (is_nd_btt(dev) || is_nd_pfn(dev) || is_nd_dax(dev)) + mutex_lock_nested(&dev->lockdep_mutex, LOCK_CLAIM); + else if (dev->parent && (is_nd_region(dev->parent))) + mutex_lock_nested(&dev->lockdep_mutex, LOCK_NAMESPACE); + else if (is_nvdimm_bus(dev)) + mutex_lock_nested(&dev->lockdep_mutex, LOCK_BUS); + else if (dev->class && dev->class == nd_class) + mutex_lock_nested(&dev->lockdep_mutex, LOCK_NDCTL); + else + dev_WARN(dev, "unknown lock level\n"); +} + +static inline void debug_nvdimm_unlock(struct device *dev) +{ + mutex_unlock(&dev->lockdep_mutex); +} + +static inline void nd_device_lock(struct device *dev) +{ + device_lock(dev); + debug_nvdimm_lock(dev); +} + +static inline void nd_device_unlock(struct device *dev) +{ + debug_nvdimm_unlock(dev); + device_unlock(dev); +} +#else +static inline void nd_device_lock(struct device *dev) +{ + device_lock(dev); +} + +static inline void nd_device_unlock(struct device *dev) +{ + device_unlock(dev); +} + +static inline void debug_nvdimm_lock(struct device *dev) +{ +} + +static inline void debug_nvdimm_unlock(struct device *dev) +{ +} +#endif #endif /* __ND_CORE_H__ */ diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index df2bdbd22450..3e7b11cf1aae 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -67,7 +67,7 @@ static ssize_t mode_store(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc = 0; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); if (dev->driver) rc = -EBUSY; @@ -89,7 +89,7 @@ static ssize_t mode_store(struct device *dev, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc ? rc : len; } @@ -132,14 +132,14 @@ static ssize_t align_store(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); rc = nd_size_select_store(dev, buf, &nd_pfn->align, nd_pfn_supported_alignments()); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc ? rc : len; } @@ -161,11 +161,11 @@ static ssize_t uuid_store(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - device_unlock(dev); + nd_device_unlock(dev); return rc ? rc : len; } @@ -190,13 +190,13 @@ static ssize_t namespace_store(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc; } @@ -208,7 +208,7 @@ static ssize_t resource_show(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); if (dev->driver) { struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; u64 offset = __le64_to_cpu(pfn_sb->dataoff); @@ -222,7 +222,7 @@ static ssize_t resource_show(struct device *dev, /* no address to convey if the pfn instance is disabled */ rc = -ENXIO; } - device_unlock(dev); + nd_device_unlock(dev); return rc; } @@ -234,7 +234,7 @@ static ssize_t size_show(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); if (dev->driver) { struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; u64 offset = __le64_to_cpu(pfn_sb->dataoff); @@ -250,7 +250,7 @@ static ssize_t size_show(struct device *dev, /* no size to convey if the pfn instance is disabled */ rc = -ENXIO; } - device_unlock(dev); + nd_device_unlock(dev); return rc; } diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 2bf3acd69613..4c121dd03dd9 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -522,8 +522,8 @@ static int nd_pmem_remove(struct device *dev) nvdimm_namespace_detach_btt(to_nd_btt(dev)); else { /* - * Note, this assumes device_lock() context to not race - * nd_pmem_notify() + * Note, this assumes nd_device_lock() context to not + * race nd_pmem_notify() */ sysfs_put(pmem->bb_state); pmem->bb_state = NULL; diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c index ef46cc3a71ae..37bf8719a2a4 100644 --- a/drivers/nvdimm/region.c +++ b/drivers/nvdimm/region.c @@ -34,17 +34,6 @@ static int nd_region_probe(struct device *dev) if (rc) return rc; - rc = nd_region_register_namespaces(nd_region, &err); - if (rc < 0) - return rc; - - ndrd = dev_get_drvdata(dev); - ndrd->ns_active = rc; - ndrd->ns_count = rc + err; - - if (rc && err && rc == err) - return -ENODEV; - if (is_nd_pmem(&nd_region->dev)) { struct resource ndr_res; @@ -60,6 +49,17 @@ static int nd_region_probe(struct device *dev) nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res); } + rc = nd_region_register_namespaces(nd_region, &err); + if (rc < 0) + return rc; + + ndrd = dev_get_drvdata(dev); + ndrd->ns_active = rc; + ndrd->ns_count = rc + err; + + if (rc && err && rc == err) + return -ENODEV; + nd_region->btt_seed = nd_btt_create(nd_region); nd_region->pfn_seed = nd_pfn_create(nd_region); nd_region->dax_seed = nd_dax_create(nd_region); @@ -102,7 +102,7 @@ static int nd_region_remove(struct device *dev) nvdimm_bus_unlock(dev); /* - * Note, this assumes device_lock() context to not race + * Note, this assumes nd_device_lock() context to not race * nd_region_notify() */ sysfs_put(nd_region->bb_state); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 56f2227f192a..af30cbe7a8ea 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -331,7 +331,7 @@ static ssize_t set_cookie_show(struct device *dev, * the v1.1 namespace label cookie definition. To read all this * data we need to wait for probing to settle. */ - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); if (nd_region->ndr_mappings) { @@ -348,7 +348,7 @@ static ssize_t set_cookie_show(struct device *dev, } } nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); if (rc) return rc; @@ -424,10 +424,12 @@ static ssize_t available_size_show(struct device *dev, * memory nvdimm_bus_lock() is dropped, but that's userspace's * problem to not race itself. */ + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); available = nd_region_available_dpa(nd_region); nvdimm_bus_unlock(dev); + nd_device_unlock(dev); return sprintf(buf, "%llu\n", available); } @@ -439,10 +441,12 @@ static ssize_t max_available_extent_show(struct device *dev, struct nd_region *nd_region = to_nd_region(dev); unsigned long long available = 0; + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); available = nd_region_allocatable_dpa(nd_region); nvdimm_bus_unlock(dev); + nd_device_unlock(dev); return sprintf(buf, "%llu\n", available); } @@ -561,12 +565,12 @@ static ssize_t region_badblocks_show(struct device *dev, struct nd_region *nd_region = to_nd_region(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); if (dev->driver) rc = badblocks_show(&nd_region->bb, buf, 0); else rc = -ENXIO; - device_unlock(dev); + nd_device_unlock(dev); return rc; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index cc09b81fc7f4..8f3fbe5ca937 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2311,17 +2311,15 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); } -static void __nvme_release_subsystem(struct nvme_subsystem *subsys) +static void nvme_release_subsystem(struct device *dev) { + struct nvme_subsystem *subsys = + container_of(dev, struct nvme_subsystem, dev); + ida_simple_remove(&nvme_subsystems_ida, subsys->instance); kfree(subsys); } -static void nvme_release_subsystem(struct device *dev) -{ - __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev)); -} - static void nvme_destroy_subsystem(struct kref *ref) { struct nvme_subsystem *subsys = @@ -2477,7 +2475,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) mutex_lock(&nvme_subsystems_lock); found = __nvme_find_get_subsystem(subsys->subnqn); if (found) { - __nvme_release_subsystem(subsys); + put_device(&subsys->dev); subsys = found; if (!nvme_validate_cntlid(subsys, ctrl, id)) { diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index a9a927677970..4f0d0d12744e 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -12,11 +12,6 @@ module_param(multipath, bool, 0444); MODULE_PARM_DESC(multipath, "turn on native support for multiple controllers per subsystem"); -inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) -{ - return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3)); -} - /* * If multipathing is enabled we need to always use the subsystem instance * number for numbering our devices to avoid conflicts between subsystems that @@ -622,7 +617,8 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) { int error; - if (!nvme_ctrl_use_ana(ctrl)) + /* check if multipath is enabled and we have the capability */ + if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3))) return 0; ctrl->anacap = id->anacap; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 716a876119c8..26b563f9985b 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -485,7 +485,11 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[]; extern const struct block_device_operations nvme_ns_head_ops; #ifdef CONFIG_NVME_MULTIPATH -bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl); +static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) +{ + return ctrl->ana_log_buf != NULL; +} + void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, struct nvme_ctrl *ctrl, int *flags); void nvme_failover_req(struct request *req); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index bb970ca82517..db160cee42ad 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2254,9 +2254,7 @@ static int nvme_dev_add(struct nvme_dev *dev) if (!dev->ctrl.tagset) { dev->tagset.ops = &nvme_mq_ops; dev->tagset.nr_hw_queues = dev->online_queues - 1; - dev->tagset.nr_maps = 1; /* default */ - if (dev->io_queues[HCTX_TYPE_READ]) - dev->tagset.nr_maps++; + dev->tagset.nr_maps = 2; /* default + read */ if (dev->io_queues[HCTX_TYPE_POLL]) dev->tagset.nr_maps++; dev->tagset.timeout = NVME_IO_TIMEOUT; @@ -3029,6 +3027,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_LIGHTNVM, }, { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ .driver_data = NVME_QUIRK_LIGHTNVM, }, + { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 2d06b8095a19..df352b334ea7 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -723,8 +723,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, cpu_pm_pmu_setup(armpmu, cmd); break; case CPU_PM_EXIT: - cpu_pm_pmu_setup(armpmu, cmd); case CPU_PM_ENTER_FAILED: + cpu_pm_pmu_setup(armpmu, cmd); armpmu->start(armpmu); break; default: diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c index 48d6f0d87583..83ed1fbf73cf 100644 --- a/drivers/platform/olpc/olpc-xo175-ec.c +++ b/drivers/platform/olpc/olpc-xo175-ec.c @@ -736,6 +736,12 @@ static const struct of_device_id olpc_xo175_ec_of_match[] = { }; MODULE_DEVICE_TABLE(of, olpc_xo175_ec_of_match); +static const struct spi_device_id olpc_xo175_ec_id_table[] = { + { "xo1.75-ec", 0 }, + {} +}; +MODULE_DEVICE_TABLE(spi, olpc_xo175_ec_id_table); + static struct spi_driver olpc_xo175_ec_spi_driver = { .driver = { .name = "olpc-xo175-ec", diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 235c0b89f824..c510d0d72475 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -812,6 +812,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map), INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map), INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map), + INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), {} }; diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c index b0d3110ae378..e4c68efac0c2 100644 --- a/drivers/platform/x86/pcengines-apuv2.c +++ b/drivers/platform/x86/pcengines-apuv2.c @@ -93,7 +93,7 @@ static struct gpiod_lookup_table gpios_led_table = { static struct gpio_keys_button apu2_keys_buttons[] = { { - .code = KEY_SETUP, + .code = KEY_RESTART, .active_low = 1, .desc = "front button", .type = EV_KEY, @@ -255,6 +255,4 @@ MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table); MODULE_ALIAS("platform:pcengines-apuv2"); -MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME); -MODULE_SOFTDEP("pre: platform:leds-gpio"); -MODULE_SOFTDEP("pre: platform:gpio_keys_polled"); +MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME " platform:leds-gpio platform:gpio_keys_polled"); diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 9fd6dd342169..6df481896b5f 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -1454,7 +1454,7 @@ static void __exit rapl_exit(void) unregister_pm_notifier(&rapl_pm_notifier); } -module_init(rapl_init); +fs_initcall(rapl_init); module_exit(rapl_exit); MODULE_DESCRIPTION("Intel Runtime Average Power Limit (RAPL) common code"); diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index 540e8aafc990..f808c5fa9838 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c @@ -671,7 +671,7 @@ static int __init powercap_init(void) return class_register(&powercap_class); } -device_initcall(powercap_init); +fs_initcall(powercap_init); MODULE_DESCRIPTION("PowerCap sysfs Driver"); MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index b9ce93e9df89..99f86612f775 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr) char msg_format; char msg_no; + /* + * intrc values ENODEV, ENOLINK and EPERM + * will be optained from sleep_on to indicate that no + * IO operation can be started + */ + if (cqr->intrc == -ENODEV) + return 1; + + if (cqr->intrc == -ENOLINK) + return 1; + + if (cqr->intrc == -EPERM) + return 1; + sense = dasd_get_sense(&cqr->irb); if (!sense) return 0; @@ -447,12 +461,8 @@ static int read_unit_address_configuration(struct dasd_device *device, lcu->flags &= ~NEED_UAC_UPDATE; spin_unlock_irqrestore(&lcu->lock, flags); - do { - rc = dasd_sleep_on(cqr); - if (rc && suborder_not_supported(cqr)) - return -EOPNOTSUPP; - } while (rc && (cqr->retries > 0)); - if (rc) { + rc = dasd_sleep_on(cqr); + if (rc && !suborder_not_supported(cqr)) { spin_lock_irqsave(&lcu->lock, flags); lcu->flags |= NEED_UAC_UPDATE; spin_unlock_irqrestore(&lcu->lock, flags); diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 8c9d412b6d33..e7cf0a1d4f71 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -398,6 +398,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm, } if (dstat == 0x08) break; + /* else, fall through */ case 0x04: /* Device end interrupt. */ if ((raw = req->info) == NULL) diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 8d3370da2dfc..3e0b2f63a9d2 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c @@ -677,6 +677,7 @@ tape_generic_remove(struct ccw_device *cdev) switch (device->tape_state) { case TS_INIT: tape_state_set(device, TS_NOT_OPER); + /* fallthrough */ case TS_NOT_OPER: /* * Nothing to do. @@ -949,6 +950,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request) break; if (device->tape_state == TS_UNUSED) break; + /* fallthrough */ default: if (device->tape_state == TS_BLKUSE) break; @@ -1116,6 +1118,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) case -ETIMEDOUT: DBF_LH(1, "(%08x): Request timed out\n", device->cdev_id); + /* fallthrough */ case -EIO: __tape_end_request(device, request, -EIO); break; diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 730c4e68094b..4142c85e77d8 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -319,9 +319,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, int retries = 0, cc; unsigned long laob = 0; - WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) || - !q->u.out.use_cq)); - if (q->u.out.use_cq && aob != 0) { + if (aob) { fc = QDIO_SIGA_WRITEQ; laob = aob; } @@ -621,9 +619,6 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, { unsigned long phys_aob = 0; - if (!q->use_cq) - return 0; - if (!q->aobs[bufnr]) { struct qaob *aob = qdio_allocate_aob(); q->aobs[bufnr] = aob; @@ -1308,6 +1303,8 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) for_each_output_queue(irq_ptr, q, i) { if (use_cq) { + if (multicast_outbound(q)) + continue; if (qdio_enable_async_operation(&q->u.out) < 0) { use_cq = 0; continue; @@ -1553,18 +1550,19 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, /* One SIGA-W per buffer required for unicast HSI */ WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); - phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); + if (q->u.out.use_cq) + phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); rc = qdio_kick_outbound_q(q, phys_aob); } else if (need_siga_sync(q)) { rc = qdio_siga_sync_q(q); + } else if (count < QDIO_MAX_BUFFERS_PER_Q && + get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 && + state == SLSB_CU_OUTPUT_PRIMED) { + /* The previous buffer is not processed yet, tack on. */ + qperf_inc(q, fast_requeue); } else { - /* try to fast requeue buffers */ - get_buf_state(q, prev_buf(bufnr), &state, 0); - if (state != SLSB_CU_OUTPUT_PRIMED) - rc = qdio_kick_outbound_q(q, 0); - else - qperf_inc(q, fast_requeue); + rc = qdio_kick_outbound_q(q, 0); } /* in case of SIGA errors we must process the error immediately */ diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c index 8c1d2357ef5b..7a838e3d7c0f 100644 --- a/drivers/s390/cio/vfio_ccw_async.c +++ b/drivers/s390/cio/vfio_ccw_async.c @@ -70,7 +70,7 @@ static void vfio_ccw_async_region_release(struct vfio_ccw_private *private, } -const struct vfio_ccw_regops vfio_ccw_async_region_ops = { +static const struct vfio_ccw_regops vfio_ccw_async_region_ops = { .read = vfio_ccw_async_region_read, .write = vfio_ccw_async_region_write, .release = vfio_ccw_async_region_release, diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index 1d4c893ead23..3645d1720c4b 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -72,8 +72,10 @@ static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len) sizeof(*pa->pa_iova_pfn) + sizeof(*pa->pa_pfn), GFP_KERNEL); - if (unlikely(!pa->pa_iova_pfn)) + if (unlikely(!pa->pa_iova_pfn)) { + pa->pa_nr = 0; return -ENOMEM; + } pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; @@ -421,7 +423,7 @@ static int ccwchain_loop_tic(struct ccwchain *chain, static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) { struct ccwchain *chain; - int len; + int len, ret; /* Copy 2K (the most we support today) of possible CCWs */ len = copy_from_iova(cp->mdev, cp->guest_cp, cda, @@ -448,7 +450,12 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1)); /* Loop for tics on this new chain. */ - return ccwchain_loop_tic(chain, cp); + ret = ccwchain_loop_tic(chain, cp); + + if (ret) + ccwchain_free(chain); + + return ret; } /* Loop for TICs. */ @@ -642,17 +649,16 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) /* Build a ccwchain for the first CCW segment */ ret = ccwchain_handle_ccw(orb->cmd.cpa, cp); - if (ret) - cp_free(cp); - - /* It is safe to force: if not set but idals used - * ccwchain_calc_length returns an error. - */ - cp->orb.cmd.c64 = 1; - if (!ret) + if (!ret) { cp->initialized = true; + /* It is safe to force: if it was not set but idals used + * ccwchain_calc_length would have returned an error. + */ + cp->orb.cmd.c64 = 1; + } + return ret; } diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 2b90a5ecaeb9..9208c0e56c33 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -88,7 +88,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); if (scsw_is_solicited(&irb->scsw)) { cp_update_scsw(&private->cp, &irb->scsw); - if (is_final) + if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) cp_free(&private->cp); } mutex_lock(&private->io_mutex); diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 5ea83dc4f1d7..dad2be333d82 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -152,6 +152,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) ap_msg->receive(aq, ap_msg, aq->reply); break; } + /* fall through */ case AP_RESPONSE_NO_PENDING_REPLY: if (!status.queue_empty || aq->queue_count <= 0) break; diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 12fe9deb265e..a36251d138fb 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -801,10 +801,7 @@ static int convert_response_ica(struct zcrypt_queue *zq, if (msg->cprbx.cprb_ver_id == 0x02) return convert_type86_ica(zq, reply, outputdata, outputdatalength); - /* - * Fall through, no break, incorrect cprb version is an unknown - * response - */ + /* fall through - wrong cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ zq->online = 0; pr_err("Cryptographic device %02x.%04x failed and was set offline\n", @@ -837,10 +834,7 @@ static int convert_response_xcrb(struct zcrypt_queue *zq, } if (msg->cprbx.cprb_ver_id == 0x02) return convert_type86_xcrb(zq, reply, xcRB); - /* - * Fall through, no break, incorrect cprb version is an unknown - * response - */ + /* fall through - wrong cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ zq->online = 0; @@ -870,7 +864,7 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq, return convert_error(zq, reply); if (msg->cprbx.cprb_ver_id == 0x04) return convert_type86_ep11_xcrb(zq, reply, xcRB); - /* Fall through, no break, incorrect cprb version is an unknown resp.*/ + /* fall through - wrong cprb version is an unknown resp */ default: /* Unknown response type, this should NEVER EVER happen */ zq->online = 0; pr_err("Cryptographic device %02x.%04x failed and was set offline\n", @@ -900,10 +894,7 @@ static int convert_response_rng(struct zcrypt_queue *zq, return -EINVAL; if (msg->cprbx.cprb_ver_id == 0x02) return convert_type86_rng(zq, reply, data); - /* - * Fall through, no break, incorrect cprb version is an unknown - * response - */ + /* fall through - wrong cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ zq->online = 0; pr_err("Cryptographic device %02x.%04x failed and was set offline\n", diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 1a55e5942d36..957889a42d2e 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -145,6 +145,8 @@ struct airq_info { struct airq_iv *aiv; }; static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; +static DEFINE_MUTEX(airq_areas_lock); + static u8 *summary_indicators; static inline u8 *get_summary_indicator(struct airq_info *info) @@ -265,9 +267,11 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, unsigned long bit, flags; for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { + mutex_lock(&airq_areas_lock); if (!airq_areas[i]) airq_areas[i] = new_airq_info(i); info = airq_areas[i]; + mutex_unlock(&airq_areas_lock); if (!info) return 0; write_lock_irqsave(&info->lock, flags); diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 75f66f8ad3ea..1b92f3c19ff3 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1523,10 +1523,10 @@ config SCSI_VIRTIO source "drivers/scsi/csiostor/Kconfig" -endif # SCSI_LOWLEVEL - source "drivers/scsi/pcmcia/Kconfig" +endif # SCSI_LOWLEVEL + source "drivers/scsi/device_handler/Kconfig" endmenu diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index f0066f8a1786..4971104b1817 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -40,6 +40,7 @@ #define ALUA_FAILOVER_TIMEOUT 60 #define ALUA_FAILOVER_RETRIES 5 #define ALUA_RTPG_DELAY_MSECS 5 +#define ALUA_RTPG_RETRY_DELAY 2 /* device handler flags */ #define ALUA_OPTIMIZE_STPG 0x01 @@ -682,7 +683,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) case SCSI_ACCESS_STATE_TRANSITIONING: if (time_before(jiffies, pg->expiry)) { /* State transition, retry */ - pg->interval = 2; + pg->interval = ALUA_RTPG_RETRY_DELAY; err = SCSI_DH_RETRY; } else { struct alua_dh_data *h; @@ -807,6 +808,8 @@ static void alua_rtpg_work(struct work_struct *work) spin_lock_irqsave(&pg->lock, flags); pg->flags &= ~ALUA_PG_RUNNING; pg->flags |= ALUA_PG_RUN_RTPG; + if (!pg->interval) + pg->interval = ALUA_RTPG_RETRY_DELAY; spin_unlock_irqrestore(&pg->lock, flags); queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ); @@ -818,6 +821,8 @@ static void alua_rtpg_work(struct work_struct *work) spin_lock_irqsave(&pg->lock, flags); if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { pg->flags &= ~ALUA_PG_RUNNING; + if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG)) + pg->interval = ALUA_RTPG_RETRY_DELAY; pg->flags |= ALUA_PG_RUN_RTPG; spin_unlock_irqrestore(&pg->lock, flags); queue_delayed_work(kaluad_wq, &pg->rtpg_work, diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 590ec8009f52..1791a393795d 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -1019,7 +1019,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fcoe_fcf *fcf; struct fcoe_fcf new; - unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV); + unsigned long sol_tov = msecs_to_jiffies(FCOE_CTLR_SOL_TOV); int first = 0; int mtu_valid; int found = 0; @@ -2005,7 +2005,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); */ static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata) { - return (struct fcoe_rport *)(rdata + 1); + return container_of(rdata, struct fcoe_rport, rdata); } /** @@ -2269,7 +2269,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip) */ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, struct sk_buff *skb, - struct fc_rport_priv *rdata) + struct fcoe_rport *frport) { struct fip_header *fiph; struct fip_desc *desc = NULL; @@ -2277,16 +2277,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, struct fip_wwn_desc *wwn = NULL; struct fip_vn_desc *vn = NULL; struct fip_size_desc *size = NULL; - struct fcoe_rport *frport; size_t rlen; size_t dlen; u32 desc_mask = 0; u32 dtype; u8 sub; - memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); - frport = fcoe_ctlr_rport(rdata); - fiph = (struct fip_header *)skb->data; frport->flags = ntohs(fiph->fip_flags); @@ -2349,15 +2345,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, if (dlen != sizeof(struct fip_wwn_desc)) goto len_err; wwn = (struct fip_wwn_desc *)desc; - rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); + frport->rdata.ids.node_name = + get_unaligned_be64(&wwn->fd_wwn); break; case FIP_DT_VN_ID: if (dlen != sizeof(struct fip_vn_desc)) goto len_err; vn = (struct fip_vn_desc *)desc; memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN); - rdata->ids.port_id = ntoh24(vn->fd_fc_id); - rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn); + frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id); + frport->rdata.ids.port_name = + get_unaligned_be64(&vn->fd_wwpn); break; case FIP_DT_FC4F: if (dlen != sizeof(struct fip_fc4_feat)) @@ -2403,16 +2401,14 @@ static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip) /** * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request. * @fip: The FCoE controller - * @rdata: parsed remote port with frport from the probe request + * @frport: parsed FCoE rport from the probe request * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, - struct fc_rport_priv *rdata) + struct fcoe_rport *frport) { - struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); - - if (rdata->ids.port_id != fip->port_id) + if (frport->rdata.ids.port_id != fip->port_id) return; switch (fip->state) { @@ -2432,7 +2428,7 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, * Probe's REC bit is not set. * If we don't reply, we will change our address. */ - if (fip->lp->wwpn > rdata->ids.port_name && + if (fip->lp->wwpn > frport->rdata.ids.port_name && !(frport->flags & FIP_FL_REC_OR_P2P)) { LIBFCOE_FIP_DBG(fip, "vn_probe_req: " "port_id collision\n"); @@ -2456,14 +2452,14 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, /** * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply. * @fip: The FCoE controller - * @rdata: parsed remote port with frport from the probe request + * @frport: parsed FCoE rport from the probe request * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip, - struct fc_rport_priv *rdata) + struct fcoe_rport *frport) { - if (rdata->ids.port_id != fip->port_id) + if (frport->rdata.ids.port_id != fip->port_id) return; switch (fip->state) { case FIP_ST_VNMP_START: @@ -2486,11 +2482,11 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip, /** * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply. * @fip: The FCoE controller - * @new: newly-parsed remote port with frport as a template for new rdata + * @new: newly-parsed FCoE rport as a template for new rdata * * Called with ctlr_mutex held. */ -static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) +static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fcoe_rport *new) { struct fc_lport *lport = fip->lp; struct fc_rport_priv *rdata; @@ -2498,7 +2494,7 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) struct fcoe_rport *frport; u32 port_id; - port_id = new->ids.port_id; + port_id = new->rdata.ids.port_id; if (port_id == fip->port_id) return; @@ -2515,22 +2511,28 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) rdata->disc_id = lport->disc.disc_id; ids = &rdata->ids; - if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) || - (ids->node_name != -1 && ids->node_name != new->ids.node_name)) { + if ((ids->port_name != -1 && + ids->port_name != new->rdata.ids.port_name) || + (ids->node_name != -1 && + ids->node_name != new->rdata.ids.node_name)) { mutex_unlock(&rdata->rp_mutex); LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id); fc_rport_logoff(rdata); mutex_lock(&rdata->rp_mutex); } - ids->port_name = new->ids.port_name; - ids->node_name = new->ids.node_name; + ids->port_name = new->rdata.ids.port_name; + ids->node_name = new->rdata.ids.node_name; mutex_unlock(&rdata->rp_mutex); frport = fcoe_ctlr_rport(rdata); LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n", port_id, frport->fcoe_len ? "old" : "new", rdata->rp_state); - *frport = *fcoe_ctlr_rport(new); + frport->fcoe_len = new->fcoe_len; + frport->flags = new->flags; + frport->login_count = new->login_count; + memcpy(frport->enode_mac, new->enode_mac, ETH_ALEN); + memcpy(frport->vn_mac, new->vn_mac, ETH_ALEN); frport->time = 0; } @@ -2562,16 +2564,14 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac) /** * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification * @fip: The FCoE controller - * @new: newly-parsed remote port with frport as a template for new rdata + * @new: newly-parsed FCoE rport as a template for new rdata * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, - struct fc_rport_priv *new) + struct fcoe_rport *new) { - struct fcoe_rport *frport = fcoe_ctlr_rport(new); - - if (frport->flags & FIP_FL_REC_OR_P2P) { + if (new->flags & FIP_FL_REC_OR_P2P) { LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n"); fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); return; @@ -2580,7 +2580,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, case FIP_ST_VNMP_START: case FIP_ST_VNMP_PROBE1: case FIP_ST_VNMP_PROBE2: - if (new->ids.port_id == fip->port_id) { + if (new->rdata.ids.port_id == fip->port_id) { LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " "restart, state %d\n", fip->state); @@ -2589,8 +2589,8 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, break; case FIP_ST_VNMP_CLAIM: case FIP_ST_VNMP_UP: - if (new->ids.port_id == fip->port_id) { - if (new->ids.port_name > fip->lp->wwpn) { + if (new->rdata.ids.port_id == fip->port_id) { + if (new->rdata.ids.port_name > fip->lp->wwpn) { LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " "restart, port_id collision\n"); fcoe_ctlr_vn_restart(fip); @@ -2602,15 +2602,16 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, break; } LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n", - new->ids.port_id); - fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac, - min((u32)frport->fcoe_len, + new->rdata.ids.port_id); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, new->enode_mac, + min((u32)new->fcoe_len, fcoe_ctlr_fcoe_size(fip))); fcoe_ctlr_vn_add(fip, new); break; default: LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " - "ignoring claim from %x\n", new->ids.port_id); + "ignoring claim from %x\n", + new->rdata.ids.port_id); break; } } @@ -2618,15 +2619,15 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, /** * fcoe_ctlr_vn_claim_resp() - handle received Claim Response * @fip: The FCoE controller that received the frame - * @new: newly-parsed remote port with frport from the Claim Response + * @new: newly-parsed FCoE rport from the Claim Response * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip, - struct fc_rport_priv *new) + struct fcoe_rport *new) { LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n", - new->ids.port_id, fcoe_ctlr_state(fip->state)); + new->rdata.ids.port_id, fcoe_ctlr_state(fip->state)); if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM) fcoe_ctlr_vn_add(fip, new); } @@ -2634,28 +2635,28 @@ static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip, /** * fcoe_ctlr_vn_beacon() - handle received beacon. * @fip: The FCoE controller that received the frame - * @new: newly-parsed remote port with frport from the Beacon + * @new: newly-parsed FCoE rport from the Beacon * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, - struct fc_rport_priv *new) + struct fcoe_rport *new) { struct fc_lport *lport = fip->lp; struct fc_rport_priv *rdata; struct fcoe_rport *frport; - frport = fcoe_ctlr_rport(new); - if (frport->flags & FIP_FL_REC_OR_P2P) { + if (new->flags & FIP_FL_REC_OR_P2P) { LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n"); fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); return; } - rdata = fc_rport_lookup(lport, new->ids.port_id); + rdata = fc_rport_lookup(lport, new->rdata.ids.port_id); if (rdata) { - if (rdata->ids.node_name == new->ids.node_name && - rdata->ids.port_name == new->ids.port_name) { + if (rdata->ids.node_name == new->rdata.ids.node_name && + rdata->ids.port_name == new->rdata.ids.port_name) { frport = fcoe_ctlr_rport(rdata); + LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n", rdata->ids.port_id); if (!frport->time && fip->state == FIP_ST_VNMP_UP) { @@ -2678,7 +2679,7 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, * Don't add the neighbor yet. */ LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n", - new->ids.port_id); + new->rdata.ids.port_id); if (time_after(jiffies, fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT))) fcoe_ctlr_vn_send_claim(fip); @@ -2738,10 +2739,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fip_header *fiph; enum fip_vn2vn_subcode sub; - struct { - struct fc_rport_priv rdata; - struct fcoe_rport frport; - } buf; + struct fcoe_rport frport = { }; int rc, vlan_id = 0; fiph = (struct fip_header *)skb->data; @@ -2757,7 +2755,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) goto drop; } - rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata); + rc = fcoe_ctlr_vn_parse(fip, skb, &frport); if (rc) { LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc); goto drop; @@ -2766,19 +2764,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) mutex_lock(&fip->ctlr_mutex); switch (sub) { case FIP_SC_VN_PROBE_REQ: - fcoe_ctlr_vn_probe_req(fip, &buf.rdata); + fcoe_ctlr_vn_probe_req(fip, &frport); break; case FIP_SC_VN_PROBE_REP: - fcoe_ctlr_vn_probe_reply(fip, &buf.rdata); + fcoe_ctlr_vn_probe_reply(fip, &frport); break; case FIP_SC_VN_CLAIM_NOTIFY: - fcoe_ctlr_vn_claim_notify(fip, &buf.rdata); + fcoe_ctlr_vn_claim_notify(fip, &frport); break; case FIP_SC_VN_CLAIM_REP: - fcoe_ctlr_vn_claim_resp(fip, &buf.rdata); + fcoe_ctlr_vn_claim_resp(fip, &frport); break; case FIP_SC_VN_BEACON: - fcoe_ctlr_vn_beacon(fip, &buf.rdata); + fcoe_ctlr_vn_beacon(fip, &frport); break; default: LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub); @@ -2802,22 +2800,18 @@ drop: */ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip, struct sk_buff *skb, - struct fc_rport_priv *rdata) + struct fcoe_rport *frport) { struct fip_header *fiph; struct fip_desc *desc = NULL; struct fip_mac_desc *macd = NULL; struct fip_wwn_desc *wwn = NULL; - struct fcoe_rport *frport; size_t rlen; size_t dlen; u32 desc_mask = 0; u32 dtype; u8 sub; - memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); - frport = fcoe_ctlr_rport(rdata); - fiph = (struct fip_header *)skb->data; frport->flags = ntohs(fiph->fip_flags); @@ -2871,7 +2865,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip, if (dlen != sizeof(struct fip_wwn_desc)) goto len_err; wwn = (struct fip_wwn_desc *)desc; - rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); + frport->rdata.ids.node_name = + get_unaligned_be64(&wwn->fd_wwn); break; default: LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " @@ -2957,13 +2952,13 @@ static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip, /** * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification. * @fip: The FCoE controller + * @frport: The newly-parsed FCoE rport from the Discovery Request * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip, - struct fc_rport_priv *rdata) + struct fcoe_rport *frport) { - struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); enum fip_vlan_subcode sub = FIP_SC_VL_NOTE; if (fip->mode == FIP_MODE_VN2VN) @@ -2982,22 +2977,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fip_header *fiph; enum fip_vlan_subcode sub; - struct { - struct fc_rport_priv rdata; - struct fcoe_rport frport; - } buf; + struct fcoe_rport frport = { }; int rc; fiph = (struct fip_header *)skb->data; sub = fiph->fip_subcode; - rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata); + rc = fcoe_ctlr_vlan_parse(fip, skb, &frport); if (rc) { LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc); goto drop; } mutex_lock(&fip->ctlr_mutex); if (sub == FIP_SC_VL_REQ) - fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata); + fcoe_ctlr_vlan_disc_reply(fip, &frport); mutex_unlock(&fip->ctlr_mutex); drop: diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 43a6b5350775..1bb6aada93fa 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -2334,6 +2334,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, case IOACCEL2_SERV_RESPONSE_COMPLETE: switch (c2->error_data.status) { case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: + if (cmd) + cmd->result = 0; break; case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: cmd->result |= SAM_STAT_CHECK_CONDITION; @@ -2483,8 +2485,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h, /* check for good status */ if (likely(c2->error_data.serv_response == 0 && - c2->error_data.status == 0)) + c2->error_data.status == 0)) { + cmd->result = 0; return hpsa_cmd_free_and_done(h, c, cmd); + } /* * Any RAID offload error results in retry which will use @@ -5654,6 +5658,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) return SCSI_MLQUEUE_DEVICE_BUSY; /* + * This is necessary because the SML doesn't zero out this field during + * error recovery. + */ + cmd->result = 0; + + /* * Call alternate submit routine for I/O accelerated commands. * Retries always go down the normal I/O path. */ @@ -6081,8 +6091,6 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, if (idx != h->last_collision_tag) { /* Print once per tag */ dev_warn(&h->pdev->dev, "%s: tag collision (tag=%d)\n", __func__, idx); - if (c->scsi_cmd != NULL) - scsi_print_command(c->scsi_cmd); if (scmd) scsi_print_command(scmd); h->last_collision_tag = idx; @@ -7798,7 +7806,7 @@ static void hpsa_free_pci_init(struct ctlr_info *h) hpsa_disable_interrupt_mode(h); /* pci_init 2 */ /* * call pci_disable_device before pci_release_regions per - * Documentation/PCI/pci.rst + * Documentation/driver-api/pci/pci.rst */ pci_disable_device(h->pdev); /* pci_init 1 */ pci_release_regions(h->pdev); /* pci_init 2 */ @@ -7881,7 +7889,7 @@ clean2: /* intmode+region, pci */ clean1: /* * call pci_disable_device before pci_release_regions per - * Documentation/PCI/pci.rst + * Documentation/driver-api/pci/pci.rst */ pci_disable_device(h->pdev); pci_release_regions(h->pdev); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index acd16e0d52cf..8cdbac076a1b 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4864,8 +4864,8 @@ static int ibmvfc_remove(struct vio_dev *vdev) spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_purge_requests(vhost, DID_ERROR); - ibmvfc_free_event_pool(vhost); spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_free_event_pool(vhost); ibmvfc_free_mem(vhost); spin_lock(&ibmvfc_driver_lock); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index e0f3852fdad1..da6e97d8dc3b 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -128,6 +128,7 @@ EXPORT_SYMBOL(fc_rport_lookup); struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) { struct fc_rport_priv *rdata; + size_t rport_priv_size = sizeof(*rdata); lockdep_assert_held(&lport->disc.disc_mutex); @@ -135,7 +136,9 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) if (rdata) return rdata; - rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL); + if (lport->rport_priv_size > 0) + rport_priv_size = lport->rport_priv_size; + rdata = kzalloc(rport_priv_size, GFP_KERNEL); if (!rdata) return NULL; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index b2339d04a700..f9f07935556e 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3163,6 +3163,7 @@ fw_crash_buffer_show(struct device *cdev, (struct megasas_instance *) shost->hostdata; u32 size; unsigned long dmachunk = CRASH_DMA_BUF_SIZE; + unsigned long chunk_left_bytes; unsigned long src_addr; unsigned long flags; u32 buff_offset; @@ -3186,6 +3187,8 @@ fw_crash_buffer_show(struct device *cdev, } size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; + chunk_left_bytes = dmachunk - (buff_offset % dmachunk); + size = (size > chunk_left_bytes) ? chunk_left_bytes : size; size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + @@ -8763,7 +8766,7 @@ static int __init megasas_init(void) if ((event_log_level < MFI_EVT_CLASS_DEBUG) || (event_log_level > MFI_EVT_CLASS_DEAD)) { - printk(KERN_WARNING "megarid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); + pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); event_log_level = MFI_EVT_CLASS_CRITICAL; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index a32b3f0fcd15..120e3c4de8c2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -537,7 +537,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) return 0; } -int +static int megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) { u32 max_mpt_cmd, i, j; @@ -576,7 +576,8 @@ megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) return 0; } -int + +static int megasas_alloc_request_fusion(struct megasas_instance *instance) { struct fusion_context *fusion; @@ -657,7 +658,7 @@ retry_alloc: return 0; } -int +static int megasas_alloc_reply_fusion(struct megasas_instance *instance) { int i, count; @@ -734,7 +735,7 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance) return 0; } -int +static int megasas_alloc_rdpq_fusion(struct megasas_instance *instance) { int i, j, k, msix_count; @@ -916,7 +917,7 @@ megasas_free_reply_fusion(struct megasas_instance *instance) { * and is used as SMID of the cmd. * SMID value range is from 1 to max_fw_cmds. */ -int +static int megasas_alloc_cmds_fusion(struct megasas_instance *instance) { int i; @@ -1736,7 +1737,7 @@ static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance) * * This is the main function for initializing firmware. */ -u32 +static u32 megasas_init_adapter_fusion(struct megasas_instance *instance) { struct fusion_context *fusion; @@ -1962,7 +1963,7 @@ megasas_fusion_stop_watchdog(struct megasas_instance *instance) * @ext_status : ext status of cmd returned by FW */ -void +static void map_cmd_status(struct fusion_context *fusion, struct scsi_cmnd *scmd, u8 status, u8 ext_status, u32 data_length, u8 *sense) @@ -2375,7 +2376,7 @@ int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp, * * Used to set the PD LBA in CDB for FP IOs */ -void +static void megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) @@ -2714,7 +2715,7 @@ megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion, * Prepares the io_request and chain elements (sg_frame) for IO * The IO can be for PD (Fast Path) or LD */ -void +static void megasas_build_ldio_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd_fusion *cmd) @@ -3211,7 +3212,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, * Invokes helper functions to prepare request frames * and sets flags appropriate for IO/Non-IO cmd */ -int +static int megasas_build_io_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd_fusion *cmd) @@ -3325,9 +3326,9 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) /* megasas_prepate_secondRaid1_IO * It prepares the raid 1 second IO */ -void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, - struct megasas_cmd_fusion *cmd, - struct megasas_cmd_fusion *r1_cmd) +static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd, + struct megasas_cmd_fusion *r1_cmd) { union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; struct fusion_context *fusion; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 684662888792..050c0f029ef9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2703,6 +2703,8 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) { u64 required_mask, coherent_mask; struct sysinfo s; + /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ + int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64; if (ioc->is_mcpu_endpoint) goto try_32bit; @@ -2712,17 +2714,17 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) goto try_32bit; if (ioc->dma_mask) - coherent_mask = DMA_BIT_MASK(64); + coherent_mask = DMA_BIT_MASK(dma_mask); else coherent_mask = DMA_BIT_MASK(32); - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) || dma_set_coherent_mask(&pdev->dev, coherent_mask)) goto try_32bit; ioc->base_add_sg_single = &_base_add_sg_single_64; ioc->sge_size = sizeof(Mpi2SGESimple64_t); - ioc->dma_mask = 64; + ioc->dma_mask = dma_mask; goto out; try_32bit: @@ -2744,7 +2746,7 @@ static int _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) { - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) { if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) return -ENODEV; } @@ -4989,7 +4991,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) total_sz += sz; } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); - if (ioc->dma_mask == 64) { + if (ioc->dma_mask > 32) { if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { ioc_warn(ioc, "no suitable consistent DMA mask for %s\n", pci_name(ioc->pdev)); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 4059655639d9..da83034d4759 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -4877,7 +4877,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) ql_log(ql_log_warn, vha, 0xd049, "Failed to allocate ct_sns request.\n"); kfree(fcport); - fcport = NULL; + return NULL; } INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9381171c2fc0..11e64b50497f 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1784,8 +1784,10 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); } - shost->max_sectors = min_t(unsigned int, shost->max_sectors, - dma_max_mapping_size(dev) << SECTOR_SHIFT); + if (dev->dma_mask) { + shost->max_sectors = min_t(unsigned int, shost->max_sectors, + dma_max_mapping_size(dev) >> SECTOR_SHIFT); + } blk_queue_max_hw_sectors(q, shost->max_sectors); if (shost->unchecked_isa_dma) blk_queue_bounce_limit(q, BLK_BOUNCE_ISA); diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index 62c6ba17991a..c9519e62308c 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c @@ -419,7 +419,7 @@ static void qe_upload_microcode(const void *base, /* * Upload a microcode to the I-RAM at a specific address. * - * See Documentation/powerpc/qe_firmware.txt for information on QE microcode + * See Documentation/powerpc/qe_firmware.rst for information on QE microcode * uploading. * * Currently, only version 1 is supported, so the 'version' field must be diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index 22dd4c457d6a..c70caf4ea490 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c @@ -875,10 +875,12 @@ static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port) return 0; if (caps & DCB_CAP_DCBX_VER_IEEE) { - iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; - + iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM; ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); - + if (!ret) { + iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; + ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); + } } else if (caps & DCB_CAP_DCBX_VER_CEE) { iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM; diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c index 343b129c2cfa..e877b917c15f 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_main.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c @@ -589,7 +589,8 @@ static void cxgbit_dcb_workfn(struct work_struct *work) iscsi_app = &dcb_work->dcb_app; if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { - if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY) + if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) && + (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)) goto out; priority = iscsi_app->app.priority; diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c index 213ab3cc6b80..d3446acf9bbd 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c @@ -487,6 +487,7 @@ static int proc_thermal_rapl_add(struct pci_dev *pdev, rapl_mmio_cpu_online, rapl_mmio_cpu_down_prep); if (ret < 0) { powercap_unregister_control_type(rapl_mmio_priv.control_type); + rapl_mmio_priv.control_type = NULL; return ret; } rapl_mmio_priv.pcap_rapl_online = ret; @@ -496,6 +497,9 @@ static int proc_thermal_rapl_add(struct pci_dev *pdev, static void proc_thermal_rapl_remove(void) { + if (IS_ERR_OR_NULL(rapl_mmio_priv.control_type)) + return; + cpuhp_remove_state(rapl_mmio_priv.pcap_rapl_online); powercap_unregister_control_type(rapl_mmio_priv.control_type); } diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c index cb4db1b3ca3c..5fb214e67d73 100644 --- a/drivers/tty/hvc/hvcs.c +++ b/drivers/tty/hvc/hvcs.c @@ -47,7 +47,7 @@ * using the 2.6 Linux kernel kref construct. * * For direction on installation and usage of this driver please reference - * Documentation/powerpc/hvcs.txt. + * Documentation/powerpc/hvcs.rst. */ #include <linux/device.h> diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index fd385c8c53a5..3083dbae35f7 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1035,25 +1035,6 @@ config SERIAL_VT8500_CONSOLE depends on SERIAL_VT8500=y select SERIAL_CORE_CONSOLE -config SERIAL_NETX - tristate "NetX serial port support" - depends on ARCH_NETX - select SERIAL_CORE - help - If you have a machine based on a Hilscher NetX SoC you - can enable its onboard serial port by enabling this option. - - To compile this driver as a module, choose M here: the - module will be called netx-serial. - -config SERIAL_NETX_CONSOLE - bool "Console on NetX serial port" - depends on SERIAL_NETX=y - select SERIAL_CORE_CONSOLE - help - If you have enabled the serial port on the Hilscher NetX SoC - you can make it the console by answering Y to this option. - config SERIAL_OMAP tristate "OMAP serial port support" depends on ARCH_OMAP2PLUS diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index 7cd7cabfa6c4..15a0fccadf7e 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -59,7 +59,6 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o obj-$(CONFIG_SERIAL_MSM) += msm_serial.o obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o -obj-$(CONFIG_SERIAL_NETX) += netx-serial.o obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c deleted file mode 100644 index b3556863491f..000000000000 --- a/drivers/tty/serial/netx-serial.c +++ /dev/null @@ -1,733 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#if defined(CONFIG_SERIAL_NETX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) -#define SUPPORT_SYSRQ -#endif - -#include <linux/device.h> -#include <linux/module.h> -#include <linux/ioport.h> -#include <linux/init.h> -#include <linux/console.h> -#include <linux/sysrq.h> -#include <linux/platform_device.h> -#include <linux/tty.h> -#include <linux/tty_flip.h> -#include <linux/serial_core.h> -#include <linux/serial.h> - -#include <asm/io.h> -#include <asm/irq.h> -#include <mach/hardware.h> -#include <mach/netx-regs.h> - -/* We've been assigned a range on the "Low-density serial ports" major */ -#define SERIAL_NX_MAJOR 204 -#define MINOR_START 170 - -enum uart_regs { - UART_DR = 0x00, - UART_SR = 0x04, - UART_LINE_CR = 0x08, - UART_BAUDDIV_MSB = 0x0c, - UART_BAUDDIV_LSB = 0x10, - UART_CR = 0x14, - UART_FR = 0x18, - UART_IIR = 0x1c, - UART_ILPR = 0x20, - UART_RTS_CR = 0x24, - UART_RTS_LEAD = 0x28, - UART_RTS_TRAIL = 0x2c, - UART_DRV_ENABLE = 0x30, - UART_BRM_CR = 0x34, - UART_RXFIFO_IRQLEVEL = 0x38, - UART_TXFIFO_IRQLEVEL = 0x3c, -}; - -#define SR_FE (1<<0) -#define SR_PE (1<<1) -#define SR_BE (1<<2) -#define SR_OE (1<<3) - -#define LINE_CR_BRK (1<<0) -#define LINE_CR_PEN (1<<1) -#define LINE_CR_EPS (1<<2) -#define LINE_CR_STP2 (1<<3) -#define LINE_CR_FEN (1<<4) -#define LINE_CR_5BIT (0<<5) -#define LINE_CR_6BIT (1<<5) -#define LINE_CR_7BIT (2<<5) -#define LINE_CR_8BIT (3<<5) -#define LINE_CR_BITS_MASK (3<<5) - -#define CR_UART_EN (1<<0) -#define CR_SIREN (1<<1) -#define CR_SIRLP (1<<2) -#define CR_MSIE (1<<3) -#define CR_RIE (1<<4) -#define CR_TIE (1<<5) -#define CR_RTIE (1<<6) -#define CR_LBE (1<<7) - -#define FR_CTS (1<<0) -#define FR_DSR (1<<1) -#define FR_DCD (1<<2) -#define FR_BUSY (1<<3) -#define FR_RXFE (1<<4) -#define FR_TXFF (1<<5) -#define FR_RXFF (1<<6) -#define FR_TXFE (1<<7) - -#define IIR_MIS (1<<0) -#define IIR_RIS (1<<1) -#define IIR_TIS (1<<2) -#define IIR_RTIS (1<<3) -#define IIR_MASK 0xf - -#define RTS_CR_AUTO (1<<0) -#define RTS_CR_RTS (1<<1) -#define RTS_CR_COUNT (1<<2) -#define RTS_CR_MOD2 (1<<3) -#define RTS_CR_RTS_POL (1<<4) -#define RTS_CR_CTS_CTR (1<<5) -#define RTS_CR_CTS_POL (1<<6) -#define RTS_CR_STICK (1<<7) - -#define UART_PORT_SIZE 0x40 -#define DRIVER_NAME "netx-uart" - -struct netx_port { - struct uart_port port; -}; - -static void netx_stop_tx(struct uart_port *port) -{ - unsigned int val; - val = readl(port->membase + UART_CR); - writel(val & ~CR_TIE, port->membase + UART_CR); -} - -static void netx_stop_rx(struct uart_port *port) -{ - unsigned int val; - val = readl(port->membase + UART_CR); - writel(val & ~CR_RIE, port->membase + UART_CR); -} - -static void netx_enable_ms(struct uart_port *port) -{ - unsigned int val; - val = readl(port->membase + UART_CR); - writel(val | CR_MSIE, port->membase + UART_CR); -} - -static inline void netx_transmit_buffer(struct uart_port *port) -{ - struct circ_buf *xmit = &port->state->xmit; - - if (port->x_char) { - writel(port->x_char, port->membase + UART_DR); - port->icount.tx++; - port->x_char = 0; - return; - } - - if (uart_tx_stopped(port) || uart_circ_empty(xmit)) { - netx_stop_tx(port); - return; - } - - do { - /* send xmit->buf[xmit->tail] - * out the port here */ - writel(xmit->buf[xmit->tail], port->membase + UART_DR); - xmit->tail = (xmit->tail + 1) & - (UART_XMIT_SIZE - 1); - port->icount.tx++; - if (uart_circ_empty(xmit)) - break; - } while (!(readl(port->membase + UART_FR) & FR_TXFF)); - - if (uart_circ_empty(xmit)) - netx_stop_tx(port); -} - -static void netx_start_tx(struct uart_port *port) -{ - writel( - readl(port->membase + UART_CR) | CR_TIE, port->membase + UART_CR); - - if (!(readl(port->membase + UART_FR) & FR_TXFF)) - netx_transmit_buffer(port); -} - -static unsigned int netx_tx_empty(struct uart_port *port) -{ - return readl(port->membase + UART_FR) & FR_BUSY ? 0 : TIOCSER_TEMT; -} - -static void netx_txint(struct uart_port *port) -{ - struct circ_buf *xmit = &port->state->xmit; - - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { - netx_stop_tx(port); - return; - } - - netx_transmit_buffer(port); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); -} - -static void netx_rxint(struct uart_port *port, unsigned long *flags) -{ - unsigned char rx, flg, status; - - while (!(readl(port->membase + UART_FR) & FR_RXFE)) { - rx = readl(port->membase + UART_DR); - flg = TTY_NORMAL; - port->icount.rx++; - status = readl(port->membase + UART_SR); - if (status & SR_BE) { - writel(0, port->membase + UART_SR); - if (uart_handle_break(port)) - continue; - } - - if (unlikely(status & (SR_FE | SR_PE | SR_OE))) { - - if (status & SR_PE) - port->icount.parity++; - else if (status & SR_FE) - port->icount.frame++; - if (status & SR_OE) - port->icount.overrun++; - - status &= port->read_status_mask; - - if (status & SR_BE) - flg = TTY_BREAK; - else if (status & SR_PE) - flg = TTY_PARITY; - else if (status & SR_FE) - flg = TTY_FRAME; - } - - if (uart_handle_sysrq_char(port, rx)) - continue; - - uart_insert_char(port, status, SR_OE, rx, flg); - } - - spin_unlock_irqrestore(&port->lock, *flags); - tty_flip_buffer_push(&port->state->port); - spin_lock_irqsave(&port->lock, *flags); -} - -static irqreturn_t netx_int(int irq, void *dev_id) -{ - struct uart_port *port = dev_id; - unsigned long flags; - unsigned char status; - - spin_lock_irqsave(&port->lock,flags); - - status = readl(port->membase + UART_IIR) & IIR_MASK; - while (status) { - if (status & IIR_RIS) - netx_rxint(port, &flags); - if (status & IIR_TIS) - netx_txint(port); - if (status & IIR_MIS) { - if (readl(port->membase + UART_FR) & FR_CTS) - uart_handle_cts_change(port, 1); - else - uart_handle_cts_change(port, 0); - } - writel(0, port->membase + UART_IIR); - status = readl(port->membase + UART_IIR) & IIR_MASK; - } - - spin_unlock_irqrestore(&port->lock,flags); - return IRQ_HANDLED; -} - -static unsigned int netx_get_mctrl(struct uart_port *port) -{ - unsigned int ret = TIOCM_DSR | TIOCM_CAR; - - if (readl(port->membase + UART_FR) & FR_CTS) - ret |= TIOCM_CTS; - - return ret; -} - -static void netx_set_mctrl(struct uart_port *port, unsigned int mctrl) -{ - unsigned int val; - - /* FIXME: Locking needed ? */ - if (mctrl & TIOCM_RTS) { - val = readl(port->membase + UART_RTS_CR); - writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR); - } -} - -static void netx_break_ctl(struct uart_port *port, int break_state) -{ - unsigned int line_cr; - spin_lock_irq(&port->lock); - - line_cr = readl(port->membase + UART_LINE_CR); - if (break_state != 0) - line_cr |= LINE_CR_BRK; - else - line_cr &= ~LINE_CR_BRK; - writel(line_cr, port->membase + UART_LINE_CR); - - spin_unlock_irq(&port->lock); -} - -static int netx_startup(struct uart_port *port) -{ - int ret; - - ret = request_irq(port->irq, netx_int, 0, - DRIVER_NAME, port); - if (ret) { - dev_err(port->dev, "unable to grab irq%d\n",port->irq); - goto exit; - } - - writel(readl(port->membase + UART_LINE_CR) | LINE_CR_FEN, - port->membase + UART_LINE_CR); - - writel(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE | CR_UART_EN, - port->membase + UART_CR); - -exit: - return ret; -} - -static void netx_shutdown(struct uart_port *port) -{ - writel(0, port->membase + UART_CR) ; - - free_irq(port->irq, port); -} - -static void -netx_set_termios(struct uart_port *port, struct ktermios *termios, - struct ktermios *old) -{ - unsigned int baud, quot; - unsigned char old_cr; - unsigned char line_cr = LINE_CR_FEN; - unsigned char rts_cr = 0; - - switch (termios->c_cflag & CSIZE) { - case CS5: - line_cr |= LINE_CR_5BIT; - break; - case CS6: - line_cr |= LINE_CR_6BIT; - break; - case CS7: - line_cr |= LINE_CR_7BIT; - break; - case CS8: - line_cr |= LINE_CR_8BIT; - break; - } - - if (termios->c_cflag & CSTOPB) - line_cr |= LINE_CR_STP2; - - if (termios->c_cflag & PARENB) { - line_cr |= LINE_CR_PEN; - if (!(termios->c_cflag & PARODD)) - line_cr |= LINE_CR_EPS; - } - - if (termios->c_cflag & CRTSCTS) - rts_cr = RTS_CR_AUTO | RTS_CR_CTS_CTR | RTS_CR_RTS_POL; - - baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); - quot = baud * 4096; - quot /= 1000; - quot *= 256; - quot /= 100000; - - spin_lock_irq(&port->lock); - - uart_update_timeout(port, termios->c_cflag, baud); - - old_cr = readl(port->membase + UART_CR); - - /* disable interrupts */ - writel(old_cr & ~(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE), - port->membase + UART_CR); - - /* drain transmitter */ - while (readl(port->membase + UART_FR) & FR_BUSY); - - /* disable UART */ - writel(old_cr & ~CR_UART_EN, port->membase + UART_CR); - - /* modem status interrupts */ - old_cr &= ~CR_MSIE; - if (UART_ENABLE_MS(port, termios->c_cflag)) - old_cr |= CR_MSIE; - - writel((quot>>8) & 0xff, port->membase + UART_BAUDDIV_MSB); - writel(quot & 0xff, port->membase + UART_BAUDDIV_LSB); - writel(line_cr, port->membase + UART_LINE_CR); - - writel(rts_cr, port->membase + UART_RTS_CR); - - /* - * Characters to ignore - */ - port->ignore_status_mask = 0; - if (termios->c_iflag & IGNPAR) - port->ignore_status_mask |= SR_PE; - if (termios->c_iflag & IGNBRK) { - port->ignore_status_mask |= SR_BE; - /* - * If we're ignoring parity and break indicators, - * ignore overruns too (for real raw support). - */ - if (termios->c_iflag & IGNPAR) - port->ignore_status_mask |= SR_PE; - } - - port->read_status_mask = 0; - if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) - port->read_status_mask |= SR_BE; - if (termios->c_iflag & INPCK) - port->read_status_mask |= SR_PE | SR_FE; - - writel(old_cr, port->membase + UART_CR); - - spin_unlock_irq(&port->lock); -} - -static const char *netx_type(struct uart_port *port) -{ - return port->type == PORT_NETX ? "NETX" : NULL; -} - -static void netx_release_port(struct uart_port *port) -{ - release_mem_region(port->mapbase, UART_PORT_SIZE); -} - -static int netx_request_port(struct uart_port *port) -{ - return request_mem_region(port->mapbase, UART_PORT_SIZE, - DRIVER_NAME) != NULL ? 0 : -EBUSY; -} - -static void netx_config_port(struct uart_port *port, int flags) -{ - if (flags & UART_CONFIG_TYPE && netx_request_port(port) == 0) - port->type = PORT_NETX; -} - -static int -netx_verify_port(struct uart_port *port, struct serial_struct *ser) -{ - int ret = 0; - - if (ser->type != PORT_UNKNOWN && ser->type != PORT_NETX) - ret = -EINVAL; - - return ret; -} - -static struct uart_ops netx_pops = { - .tx_empty = netx_tx_empty, - .set_mctrl = netx_set_mctrl, - .get_mctrl = netx_get_mctrl, - .stop_tx = netx_stop_tx, - .start_tx = netx_start_tx, - .stop_rx = netx_stop_rx, - .enable_ms = netx_enable_ms, - .break_ctl = netx_break_ctl, - .startup = netx_startup, - .shutdown = netx_shutdown, - .set_termios = netx_set_termios, - .type = netx_type, - .release_port = netx_release_port, - .request_port = netx_request_port, - .config_port = netx_config_port, - .verify_port = netx_verify_port, -}; - -static struct netx_port netx_ports[] = { - { - .port = { - .type = PORT_NETX, - .iotype = UPIO_MEM, - .membase = (char __iomem *)io_p2v(NETX_PA_UART0), - .mapbase = NETX_PA_UART0, - .irq = NETX_IRQ_UART0, - .uartclk = 100000000, - .fifosize = 16, - .flags = UPF_BOOT_AUTOCONF, - .ops = &netx_pops, - .line = 0, - }, - }, { - .port = { - .type = PORT_NETX, - .iotype = UPIO_MEM, - .membase = (char __iomem *)io_p2v(NETX_PA_UART1), - .mapbase = NETX_PA_UART1, - .irq = NETX_IRQ_UART1, - .uartclk = 100000000, - .fifosize = 16, - .flags = UPF_BOOT_AUTOCONF, - .ops = &netx_pops, - .line = 1, - }, - }, { - .port = { - .type = PORT_NETX, - .iotype = UPIO_MEM, - .membase = (char __iomem *)io_p2v(NETX_PA_UART2), - .mapbase = NETX_PA_UART2, - .irq = NETX_IRQ_UART2, - .uartclk = 100000000, - .fifosize = 16, - .flags = UPF_BOOT_AUTOCONF, - .ops = &netx_pops, - .line = 2, - }, - } -}; - -#ifdef CONFIG_SERIAL_NETX_CONSOLE - -static void netx_console_putchar(struct uart_port *port, int ch) -{ - while (readl(port->membase + UART_FR) & FR_BUSY); - writel(ch, port->membase + UART_DR); -} - -static void -netx_console_write(struct console *co, const char *s, unsigned int count) -{ - struct uart_port *port = &netx_ports[co->index].port; - unsigned char cr_save; - - cr_save = readl(port->membase + UART_CR); - writel(cr_save | CR_UART_EN, port->membase + UART_CR); - - uart_console_write(port, s, count, netx_console_putchar); - - while (readl(port->membase + UART_FR) & FR_BUSY); - writel(cr_save, port->membase + UART_CR); -} - -static void __init -netx_console_get_options(struct uart_port *port, int *baud, - int *parity, int *bits, int *flow) -{ - unsigned char line_cr; - - *baud = (readl(port->membase + UART_BAUDDIV_MSB) << 8) | - readl(port->membase + UART_BAUDDIV_LSB); - *baud *= 1000; - *baud /= 4096; - *baud *= 1000; - *baud /= 256; - *baud *= 100; - - line_cr = readl(port->membase + UART_LINE_CR); - *parity = 'n'; - if (line_cr & LINE_CR_PEN) { - if (line_cr & LINE_CR_EPS) - *parity = 'e'; - else - *parity = 'o'; - } - - switch (line_cr & LINE_CR_BITS_MASK) { - case LINE_CR_8BIT: - *bits = 8; - break; - case LINE_CR_7BIT: - *bits = 7; - break; - case LINE_CR_6BIT: - *bits = 6; - break; - case LINE_CR_5BIT: - *bits = 5; - break; - } - - if (readl(port->membase + UART_RTS_CR) & RTS_CR_AUTO) - *flow = 'r'; -} - -static int __init -netx_console_setup(struct console *co, char *options) -{ - struct netx_port *sport; - int baud = 9600; - int bits = 8; - int parity = 'n'; - int flow = 'n'; - - /* - * Check whether an invalid uart number has been specified, and - * if so, search for the first available port that does have - * console support. - */ - if (co->index == -1 || co->index >= ARRAY_SIZE(netx_ports)) - co->index = 0; - sport = &netx_ports[co->index]; - - if (options) { - uart_parse_options(options, &baud, &parity, &bits, &flow); - } else { - /* if the UART is enabled, assume it has been correctly setup - * by the bootloader and get the options - */ - if (readl(sport->port.membase + UART_CR) & CR_UART_EN) { - netx_console_get_options(&sport->port, &baud, - &parity, &bits, &flow); - } - - } - - return uart_set_options(&sport->port, co, baud, parity, bits, flow); -} - -static struct uart_driver netx_reg; -static struct console netx_console = { - .name = "ttyNX", - .write = netx_console_write, - .device = uart_console_device, - .setup = netx_console_setup, - .flags = CON_PRINTBUFFER, - .index = -1, - .data = &netx_reg, -}; - -static int __init netx_console_init(void) -{ - register_console(&netx_console); - return 0; -} -console_initcall(netx_console_init); - -#define NETX_CONSOLE &netx_console -#else -#define NETX_CONSOLE NULL -#endif - -static struct uart_driver netx_reg = { - .owner = THIS_MODULE, - .driver_name = DRIVER_NAME, - .dev_name = "ttyNX", - .major = SERIAL_NX_MAJOR, - .minor = MINOR_START, - .nr = ARRAY_SIZE(netx_ports), - .cons = NETX_CONSOLE, -}; - -static int serial_netx_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct netx_port *sport = platform_get_drvdata(pdev); - - if (sport) - uart_suspend_port(&netx_reg, &sport->port); - - return 0; -} - -static int serial_netx_resume(struct platform_device *pdev) -{ - struct netx_port *sport = platform_get_drvdata(pdev); - - if (sport) - uart_resume_port(&netx_reg, &sport->port); - - return 0; -} - -static int serial_netx_probe(struct platform_device *pdev) -{ - struct uart_port *port = &netx_ports[pdev->id].port; - - dev_info(&pdev->dev, "initialising\n"); - - port->dev = &pdev->dev; - - writel(1, port->membase + UART_RXFIFO_IRQLEVEL); - uart_add_one_port(&netx_reg, &netx_ports[pdev->id].port); - platform_set_drvdata(pdev, &netx_ports[pdev->id]); - - return 0; -} - -static int serial_netx_remove(struct platform_device *pdev) -{ - struct netx_port *sport = platform_get_drvdata(pdev); - - if (sport) - uart_remove_one_port(&netx_reg, &sport->port); - - return 0; -} - -static struct platform_driver serial_netx_driver = { - .probe = serial_netx_probe, - .remove = serial_netx_remove, - - .suspend = serial_netx_suspend, - .resume = serial_netx_resume, - - .driver = { - .name = DRIVER_NAME, - }, -}; - -static int __init netx_serial_init(void) -{ - int ret; - - printk(KERN_INFO "Serial: NetX driver\n"); - - ret = uart_register_driver(&netx_reg); - if (ret) - return ret; - - ret = platform_driver_register(&serial_netx_driver); - if (ret != 0) - uart_unregister_driver(&netx_reg); - - return 0; -} - -static void __exit netx_serial_exit(void) -{ - platform_driver_unregister(&serial_netx_driver); - uart_unregister_driver(&netx_reg); -} - -module_init(netx_serial_init); -module_exit(netx_serial_exit); - -MODULE_AUTHOR("Sascha Hauer"); -MODULE_DESCRIPTION("NetX serial port driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c index 717292c1c0df..60ff236a3d63 100644 --- a/drivers/tty/tty_ldsem.c +++ b/drivers/tty/tty_ldsem.c @@ -93,8 +93,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem) list_for_each_entry_safe(waiter, next, &sem->read_wait, list) { tsk = waiter->task; - smp_mb(); - waiter->task = NULL; + smp_store_release(&waiter->task, NULL); wake_up_process(tsk); put_task_struct(tsk); } @@ -194,7 +193,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout) for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); - if (!waiter.task) + if (!smp_load_acquire(&waiter.task)) break; if (!timeout) break; diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index ec92f36ab5c4..34aa39d1aed9 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -3771,7 +3771,11 @@ static ssize_t show_bind(struct device *dev, struct device_attribute *attr, char *buf) { struct con_driver *con = dev_get_drvdata(dev); - int bind = con_is_bound(con->con); + int bind; + + console_lock(); + bind = con_is_bound(con->con); + console_unlock(); return snprintf(buf, PAGE_SIZE, "%i\n", bind); } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 88533938ce19..9320787ac2e6 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -3052,8 +3052,8 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, local_mem = devm_memremap(hcd->self.sysdev, phys_addr, size, MEMREMAP_WC); - if (!local_mem) - return -ENOMEM; + if (IS_ERR(local_mem)) + return PTR_ERR(local_mem); /* * Here we pass a dma_addr_t but the arg type is a phys_addr_t. diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index fe9422d3bcdc..b0882c13a1d1 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -149,7 +149,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd) break; case PCI_VENDOR_ID_AMD: /* AMD PLL quirk */ - if (usb_amd_find_chipset_info()) + if (usb_amd_quirk_pll_check()) ehci->amd_pll_fix = 1; /* AMD8111 EHCI doesn't work, according to AMD errata */ if (pdev->device == 0x7463) { @@ -186,7 +186,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd) break; case PCI_VENDOR_ID_ATI: /* AMD PLL quirk */ - if (usb_amd_find_chipset_info()) + if (usb_amd_quirk_pll_check()) ehci->amd_pll_fix = 1; /* diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c index 09a8ebd95588..6968b9f2b76b 100644 --- a/drivers/usb/host/hwa-hc.c +++ b/drivers/usb/host/hwa-hc.c @@ -159,7 +159,7 @@ out: return result; error_set_cluster_id: - wusb_cluster_id_put(wusbhc->cluster_id); + wusb_cluster_id_put(addr); error_cluster_id_get: goto out; diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index a033f7d855e0..f4e13a3fddee 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -152,7 +152,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); - if (usb_amd_find_chipset_info()) + if (usb_amd_quirk_pll_check()) ohci->flags |= OHCI_QUIRK_AMD_PLL; /* SB800 needs pre-fetch fix */ diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 3ce71cbfbb58..f6d04491df60 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -132,7 +132,7 @@ static struct amd_chipset_info { struct amd_chipset_type sb_type; int isoc_reqs; int probe_count; - int probe_result; + bool need_pll_quirk; } amd_chipset; static DEFINE_SPINLOCK(amd_lock); @@ -201,11 +201,11 @@ void sb800_prefetch(struct device *dev, int on) } EXPORT_SYMBOL_GPL(sb800_prefetch); -int usb_amd_find_chipset_info(void) +static void usb_amd_find_chipset_info(void) { unsigned long flags; struct amd_chipset_info info; - int ret; + info.need_pll_quirk = 0; spin_lock_irqsave(&amd_lock, flags); @@ -213,27 +213,34 @@ int usb_amd_find_chipset_info(void) if (amd_chipset.probe_count > 0) { amd_chipset.probe_count++; spin_unlock_irqrestore(&amd_lock, flags); - return amd_chipset.probe_result; + return; } memset(&info, 0, sizeof(info)); spin_unlock_irqrestore(&amd_lock, flags); if (!amd_chipset_sb_type_init(&info)) { - ret = 0; goto commit; } - /* Below chipset generations needn't enable AMD PLL quirk */ - if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN || - info.sb_type.gen == AMD_CHIPSET_SB600 || - info.sb_type.gen == AMD_CHIPSET_YANGTZE || - (info.sb_type.gen == AMD_CHIPSET_SB700 && - info.sb_type.rev > 0x3b)) { + switch (info.sb_type.gen) { + case AMD_CHIPSET_SB700: + info.need_pll_quirk = info.sb_type.rev <= 0x3B; + break; + case AMD_CHIPSET_SB800: + case AMD_CHIPSET_HUDSON2: + case AMD_CHIPSET_BOLTON: + info.need_pll_quirk = 1; + break; + default: + info.need_pll_quirk = 0; + break; + } + + if (!info.need_pll_quirk) { if (info.smbus_dev) { pci_dev_put(info.smbus_dev); info.smbus_dev = NULL; } - ret = 0; goto commit; } @@ -252,7 +259,6 @@ int usb_amd_find_chipset_info(void) } } - ret = info.probe_result = 1; printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); commit: @@ -263,7 +269,6 @@ commit: /* Mark that we where here */ amd_chipset.probe_count++; - ret = amd_chipset.probe_result; spin_unlock_irqrestore(&amd_lock, flags); @@ -276,10 +281,7 @@ commit: amd_chipset = info; spin_unlock_irqrestore(&amd_lock, flags); } - - return ret; } -EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev) { @@ -315,6 +317,13 @@ bool usb_amd_prefetch_quirk(void) } EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk); +bool usb_amd_quirk_pll_check(void) +{ + usb_amd_find_chipset_info(); + return amd_chipset.need_pll_quirk; +} +EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_check); + /* * The hardware normally enables the A-link power management feature, which * lets the system lower the power consumption in idle states. @@ -520,7 +529,7 @@ void usb_amd_dev_put(void) amd_chipset.nb_type = 0; memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type)); amd_chipset.isoc_reqs = 0; - amd_chipset.probe_result = 0; + amd_chipset.need_pll_quirk = 0; spin_unlock_irqrestore(&amd_lock, flags); diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index 63c633077d9e..e729de21fad7 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h @@ -5,11 +5,11 @@ #ifdef CONFIG_USB_PCI void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); -int usb_amd_find_chipset_info(void); int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev); bool usb_amd_hang_symptom_quirk(void); bool usb_amd_prefetch_quirk(void); void usb_amd_dev_put(void); +bool usb_amd_quirk_pll_check(void); void usb_amd_quirk_pll_disable(void); void usb_amd_quirk_pll_enable(void); void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index c2fe218e051f..1e0236e90687 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -130,7 +130,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_AMD_0x96_HOST; /* AMD PLL quirk */ - if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) + if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_quirk_pll_check()) xhci->quirks |= XHCI_AMD_PLL_FIX; if (pdev->vendor == PCI_VENDOR_ID_AMD && diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 7a264962a1a9..f5c41448d067 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -2175,7 +2175,8 @@ static inline bool xhci_urb_suitable_for_idt(struct urb *urb) if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) && usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE && urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE && - !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) + !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) && + !urb->num_sgs) return true; return false; diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index 4d6ae3795a88..6ca9111d150a 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c @@ -375,7 +375,8 @@ out_err: #ifdef CONFIG_OF static void usb251xb_get_ports_field(struct usb251xb *hub, - const char *prop_name, u8 port_cnt, u8 *fld) + const char *prop_name, u8 port_cnt, + bool ds_only, u8 *fld) { struct device *dev = hub->dev; struct property *prop; @@ -383,7 +384,7 @@ static void usb251xb_get_ports_field(struct usb251xb *hub, u32 port; of_property_for_each_u32(dev->of_node, prop_name, prop, p, port) { - if ((port >= 1) && (port <= port_cnt)) + if ((port >= ds_only ? 1 : 0) && (port <= port_cnt)) *fld |= BIT(port); else dev_warn(dev, "port %u doesn't exist\n", port); @@ -501,15 +502,15 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, hub->non_rem_dev = USB251XB_DEF_NON_REMOVABLE_DEVICES; usb251xb_get_ports_field(hub, "non-removable-ports", data->port_cnt, - &hub->non_rem_dev); + true, &hub->non_rem_dev); hub->port_disable_sp = USB251XB_DEF_PORT_DISABLE_SELF; usb251xb_get_ports_field(hub, "sp-disabled-ports", data->port_cnt, - &hub->port_disable_sp); + true, &hub->port_disable_sp); hub->port_disable_bp = USB251XB_DEF_PORT_DISABLE_BUS; usb251xb_get_ports_field(hub, "bp-disabled-ports", data->port_cnt, - &hub->port_disable_bp); + true, &hub->port_disable_bp); hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF; if (!of_property_read_u32(np, "sp-max-total-current-microamp", @@ -573,9 +574,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, */ hub->port_swap = USB251XB_DEF_PORT_SWAP; usb251xb_get_ports_field(hub, "swap-dx-lanes", data->port_cnt, - &hub->port_swap); - if (of_get_property(np, "swap-us-lanes", NULL)) - hub->port_swap |= BIT(0); + false, &hub->port_swap); /* The following parameters are currently not exposed to devicetree, but * may be as soon as needed. diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 30790240aec6..05b80211290d 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -28,6 +28,8 @@ * status of a command. */ +#include <linux/blkdev.h> +#include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/mutex.h> @@ -99,6 +101,7 @@ static int slave_alloc (struct scsi_device *sdev) static int slave_configure(struct scsi_device *sdev) { struct us_data *us = host_to_us(sdev->host); + struct device *dev = us->pusb_dev->bus->sysdev; /* * Many devices have trouble transferring more than 32KB at a time, @@ -129,6 +132,14 @@ static int slave_configure(struct scsi_device *sdev) } /* + * The max_hw_sectors should be up to maximum size of a mapping for + * the device. Otherwise, a DMA API might fail on swiotlb environment. + */ + blk_queue_max_hw_sectors(sdev->request_queue, + min_t(size_t, queue_max_hw_sectors(sdev->request_queue), + dma_max_mapping_size(dev) >> SECTOR_SHIFT)); + + /* * Some USB host controllers can't do DMA; they have to use PIO. * They indicate this by setting their dma_mask to NULL. For * such controllers we need to make sure the block layer sets diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 819296332913..42a8c2a13ab1 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -96,7 +96,7 @@ struct vhost_uaddr { }; #if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 -#define VHOST_ARCH_CAN_ACCEL_UACCESS 1 +#define VHOST_ARCH_CAN_ACCEL_UACCESS 0 #else #define VHOST_ARCH_CAN_ACCEL_UACCESS 0 #endif diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 4c339c7e66e5..a446a7221e13 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -1143,7 +1143,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) goto out_put_map; if (!use_ptemod) { - err = vm_map_pages(vma, map->pages, map->count); + err = vm_map_pages_zero(vma, map->pages, map->count); if (err) goto out_put_map; } else { diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 2f5ce7230a43..c6070e70dd73 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -724,25 +724,6 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata) return 0; } -struct remap_pfn { - struct mm_struct *mm; - struct page **pages; - pgprot_t prot; - unsigned long i; -}; - -static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data) -{ - struct remap_pfn *r = data; - struct page *page = r->pages[r->i]; - pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot)); - - set_pte_at(r->mm, addr, ptep, pte); - r->i++; - - return 0; -} - static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) { struct privcmd_data *data = file->private_data; @@ -774,7 +755,8 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) goto out; } - if (xen_feature(XENFEAT_auto_translated_physmap)) { + if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) && + xen_feature(XENFEAT_auto_translated_physmap)) { unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE); struct page **pages; unsigned int i; @@ -808,16 +790,9 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) if (rc) goto out; - if (xen_feature(XENFEAT_auto_translated_physmap)) { - struct remap_pfn r = { - .mm = vma->vm_mm, - .pages = vma->vm_private_data, - .prot = vma->vm_page_prot, - }; - - rc = apply_to_page_range(r.mm, kdata.addr, - kdata.num << PAGE_SHIFT, - remap_pfn_fn, &r); + if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) && + xen_feature(XENFEAT_auto_translated_physmap)) { + rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT); } else { unsigned int domid = (xdata.flags & XENMEM_rsrc_acq_caller_owned) ? diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index cfbe46785a3b..ae1df496bf38 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -83,34 +83,18 @@ static inline dma_addr_t xen_virt_to_bus(void *address) return xen_phys_to_bus(virt_to_phys(address)); } -static int check_pages_physically_contiguous(unsigned long xen_pfn, - unsigned int offset, - size_t length) +static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) { - unsigned long next_bfn; - int i; - int nr_pages; + unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p); + unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size); next_bfn = pfn_to_bfn(xen_pfn); - nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT; - for (i = 1; i < nr_pages; i++) { + for (i = 1; i < nr_pages; i++) if (pfn_to_bfn(++xen_pfn) != ++next_bfn) - return 0; - } - return 1; -} + return 1; -static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) -{ - unsigned long xen_pfn = XEN_PFN_DOWN(p); - unsigned int offset = p & ~XEN_PAGE_MASK; - - if (offset + size <= XEN_PAGE_SIZE) - return 0; - if (check_pages_physically_contiguous(xen_pfn, offset, size)) - return 0; - return 1; + return 0; } static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) @@ -338,6 +322,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); return NULL; } + SetPageXenRemapped(virt_to_page(ret)); } memset(ret, 0, size); return ret; @@ -361,8 +346,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, /* Convert the size to actually allocated. */ size = 1UL << (order + XEN_PAGE_SHIFT); - if (((dev_addr + size - 1 <= dma_mask)) || - range_straddles_page_boundary(phys, size)) + if (!WARN_ON((dev_addr + size - 1 > dma_mask) || + range_straddles_page_boundary(phys, size)) && + TestClearPageXenRemapped(virt_to_page(vaddr))) xen_destroy_contiguous_region(phys, order); xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c index 73427d8e0116..e5694133ebe5 100644 --- a/drivers/xen/xen-pciback/conf_space_capability.c +++ b/drivers/xen/xen-pciback/conf_space_capability.c @@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value, { int err; u16 old_value; - pci_power_t new_state, old_state; + pci_power_t new_state; err = pci_read_config_word(dev, offset, &old_value); if (err) goto out; - old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK); new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK); new_value &= PM_OK_BITS; diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c index ba883a80b3c0..7b1077f0abcb 100644 --- a/drivers/xen/xlate_mmu.c +++ b/drivers/xen/xlate_mmu.c @@ -262,3 +262,35 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, return 0; } EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages); + +struct remap_pfn { + struct mm_struct *mm; + struct page **pages; + pgprot_t prot; + unsigned long i; +}; + +static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data) +{ + struct remap_pfn *r = data; + struct page *page = r->pages[r->i]; + pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot)); + + set_pte_at(r->mm, addr, ptep, pte); + r->i++; + + return 0; +} + +/* Used by the privcmd module, but has to be built-in on ARM */ +int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len) +{ + struct remap_pfn r = { + .mm = vma->vm_mm, + .pages = vma->vm_private_data, + .prot = vma->vm_page_prot, + }; + + return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r); +} +EXPORT_SYMBOL_GPL(xen_remap_vma_range); diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 1ce73e014139..114f281f3687 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -339,8 +339,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) call->tmp_u = htonl(0); afs_extract_to_tmp(call); } + /* Fall through */ - /* Fall through - and extract the returned data length */ + /* extract the returned data length */ case 1: _debug("extract data length"); ret = afs_extract_data(call, true); @@ -366,8 +367,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) call->bvec[0].bv_page = req->pages[req->index]; iov_iter_bvec(&call->iter, READ, call->bvec, 1, size); ASSERTCMP(size, <=, PAGE_SIZE); + /* Fall through */ - /* Fall through - and extract the returned data */ + /* extract the returned data */ case 2: _debug("extract data %zu/%llu", iov_iter_count(&call->iter), req->remain); @@ -394,8 +396,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) /* Discard any excess data the server gave us */ iov_iter_discard(&call->iter, READ, req->actual_len - req->len); call->unmarshall = 3; - /* Fall through */ + case 3: _debug("extract discard %zu/%llu", iov_iter_count(&call->iter), req->actual_len - req->len); @@ -407,8 +409,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) no_more_data: call->unmarshall = 4; afs_extract_to_buf(call, (21 + 3 + 6) * 4); + /* Fall through */ - /* Fall through - and extract the metadata */ + /* extract the metadata */ case 4: ret = afs_extract_data(call, false); if (ret < 0) @@ -1471,8 +1474,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) case 0: call->unmarshall++; afs_extract_to_buf(call, 12 * 4); + /* Fall through */ - /* Fall through - and extract the returned status record */ + /* extract the returned status record */ case 1: _debug("extract status"); ret = afs_extract_data(call, true); @@ -1483,8 +1487,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) xdr_decode_AFSFetchVolumeStatus(&bp, call->out_volstatus); call->unmarshall++; afs_extract_to_tmp(call); + /* Fall through */ - /* Fall through - and extract the volume name length */ + /* extract the volume name length */ case 2: ret = afs_extract_data(call, true); if (ret < 0) @@ -1498,8 +1503,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; + /* Fall through */ - /* Fall through - and extract the volume name */ + /* extract the volume name */ case 3: _debug("extract volname"); ret = afs_extract_data(call, true); @@ -1511,8 +1517,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) _debug("volname '%s'", p); afs_extract_to_tmp(call); call->unmarshall++; + /* Fall through */ - /* Fall through - and extract the offline message length */ + /* extract the offline message length */ case 4: ret = afs_extract_data(call, true); if (ret < 0) @@ -1526,8 +1533,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; + /* Fall through */ - /* Fall through - and extract the offline message */ + /* extract the offline message */ case 5: _debug("extract offline"); ret = afs_extract_data(call, true); @@ -1540,8 +1548,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; + /* Fall through */ - /* Fall through - and extract the message of the day length */ + /* extract the message of the day length */ case 6: ret = afs_extract_data(call, true); if (ret < 0) @@ -1555,8 +1564,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; + /* Fall through */ - /* Fall through - and extract the message of the day */ + /* extract the message of the day */ case 7: _debug("extract motd"); ret = afs_extract_data(call, false); @@ -1850,8 +1860,9 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; + /* Fall through */ - /* Fall through - and extract the capabilities word count */ + /* Extract the capabilities word count */ case 1: ret = afs_extract_data(call, true); if (ret < 0) @@ -1863,8 +1874,9 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call) call->count2 = count; iov_iter_discard(&call->iter, READ, count * sizeof(__be32)); call->unmarshall++; + /* Fall through */ - /* Fall through - and extract capabilities words */ + /* Extract capabilities words */ case 2: ret = afs_extract_data(call, false); if (ret < 0) @@ -2020,9 +2032,9 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; + /* Fall through */ /* Extract the file status count and array in two steps */ - /* Fall through */ case 1: _debug("extract status count"); ret = afs_extract_data(call, true); @@ -2039,8 +2051,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_counts: afs_extract_to_buf(call, 21 * sizeof(__be32)); - /* Fall through */ + case 2: _debug("extract status array %u", call->count); ret = afs_extract_data(call, true); @@ -2060,9 +2072,9 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) call->count = 0; call->unmarshall++; afs_extract_to_tmp(call); + /* Fall through */ /* Extract the callback count and array in two steps */ - /* Fall through */ case 3: _debug("extract CB count"); ret = afs_extract_data(call, true); @@ -2078,8 +2090,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_cbs: afs_extract_to_buf(call, 3 * sizeof(__be32)); - /* Fall through */ + case 4: _debug("extract CB array"); ret = afs_extract_data(call, true); @@ -2096,8 +2108,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) afs_extract_to_buf(call, 6 * sizeof(__be32)); call->unmarshall++; - /* Fall through */ + case 5: ret = afs_extract_data(call, false); if (ret < 0) @@ -2193,6 +2205,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; + /* Fall through */ /* extract the returned data length */ case 1: @@ -2210,6 +2223,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) acl->size = call->count2; afs_extract_begin(call, acl->data, size); call->unmarshall++; + /* Fall through */ /* extract the returned data */ case 2: @@ -2219,6 +2233,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) afs_extract_to_buf(call, (21 + 6) * 4); call->unmarshall++; + /* Fall through */ /* extract the metadata */ case 3: diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 18722aaeda33..2575503170fc 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -450,8 +450,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) req->offset = req->pos & (PAGE_SIZE - 1); afs_extract_to_tmp64(call); call->unmarshall++; + /* Fall through */ - /* Fall through - and extract the returned data length */ + /* extract the returned data length */ case 1: _debug("extract data length"); ret = afs_extract_data(call, true); @@ -477,8 +478,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) call->bvec[0].bv_page = req->pages[req->index]; iov_iter_bvec(&call->iter, READ, call->bvec, 1, size); ASSERTCMP(size, <=, PAGE_SIZE); + /* Fall through */ - /* Fall through - and extract the returned data */ + /* extract the returned data */ case 2: _debug("extract data %zu/%llu", iov_iter_count(&call->iter), req->remain); @@ -505,8 +507,8 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) /* Discard any excess data the server gave us */ iov_iter_discard(&call->iter, READ, req->actual_len - req->len); call->unmarshall = 3; - /* Fall through */ + case 3: _debug("extract discard %zu/%llu", iov_iter_count(&call->iter), req->actual_len - req->len); @@ -521,8 +523,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSCallBack) + sizeof(struct yfs_xdr_YFSVolSync)); + /* Fall through */ - /* Fall through - and extract the metadata */ + /* extract the metadata */ case 4: ret = afs_extract_data(call, false); if (ret < 0) @@ -539,8 +542,8 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) req->file_size = call->out_scb->status.size; call->unmarshall++; - /* Fall through */ + case 5: break; } @@ -1429,8 +1432,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) case 0: call->unmarshall++; afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchVolumeStatus)); + /* Fall through */ - /* Fall through - and extract the returned status record */ + /* extract the returned status record */ case 1: _debug("extract status"); ret = afs_extract_data(call, true); @@ -1441,8 +1445,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) xdr_decode_YFSFetchVolumeStatus(&bp, call->out_volstatus); call->unmarshall++; afs_extract_to_tmp(call); + /* Fall through */ - /* Fall through - and extract the volume name length */ + /* extract the volume name length */ case 2: ret = afs_extract_data(call, true); if (ret < 0) @@ -1456,8 +1461,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; + /* Fall through */ - /* Fall through - and extract the volume name */ + /* extract the volume name */ case 3: _debug("extract volname"); ret = afs_extract_data(call, true); @@ -1469,8 +1475,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) _debug("volname '%s'", p); afs_extract_to_tmp(call); call->unmarshall++; + /* Fall through */ - /* Fall through - and extract the offline message length */ + /* extract the offline message length */ case 4: ret = afs_extract_data(call, true); if (ret < 0) @@ -1484,8 +1491,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; + /* Fall through */ - /* Fall through - and extract the offline message */ + /* extract the offline message */ case 5: _debug("extract offline"); ret = afs_extract_data(call, true); @@ -1498,8 +1506,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; + /* Fall through */ - /* Fall through - and extract the message of the day length */ + /* extract the message of the day length */ case 6: ret = afs_extract_data(call, true); if (ret < 0) @@ -1513,8 +1522,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; + /* Fall through */ - /* Fall through - and extract the message of the day */ + /* extract the message of the day */ case 7: _debug("extract motd"); ret = afs_extract_data(call, false); @@ -1526,8 +1536,8 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) _debug("motd '%s'", p); call->unmarshall++; - /* Fall through */ + case 8: break; } @@ -1805,9 +1815,9 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; + /* Fall through */ /* Extract the file status count and array in two steps */ - /* Fall through */ case 1: _debug("extract status count"); ret = afs_extract_data(call, true); @@ -1824,8 +1834,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_counts: afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus)); - /* Fall through */ + case 2: _debug("extract status array %u", call->count); ret = afs_extract_data(call, true); @@ -1845,9 +1855,9 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) call->count = 0; call->unmarshall++; afs_extract_to_tmp(call); + /* Fall through */ /* Extract the callback count and array in two steps */ - /* Fall through */ case 3: _debug("extract CB count"); ret = afs_extract_data(call, true); @@ -1863,8 +1873,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_cbs: afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSCallBack)); - /* Fall through */ + case 4: _debug("extract CB array"); ret = afs_extract_data(call, true); @@ -1881,8 +1891,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync)); call->unmarshall++; - /* Fall through */ + case 5: ret = afs_extract_data(call, false); if (ret < 0) @@ -1892,8 +1902,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) xdr_decode_YFSVolSync(&bp, call->out_volsync); call->unmarshall++; - /* Fall through */ + case 6: break; } @@ -1978,6 +1988,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; + /* Fall through */ /* Extract the file ACL length */ case 1: @@ -1999,6 +2010,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) iov_iter_discard(&call->iter, READ, size); } call->unmarshall++; + /* Fall through */ /* Extract the file ACL */ case 2: @@ -2008,6 +2020,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; + /* Fall through */ /* Extract the volume ACL length */ case 3: @@ -2029,6 +2042,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) iov_iter_discard(&call->iter, READ, size); } call->unmarshall++; + /* Fall through */ /* Extract the volume ACL */ case 4: @@ -2041,6 +2055,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); call->unmarshall++; + /* Fall through */ /* extract the metadata */ case 5: @@ -2057,6 +2072,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) xdr_decode_YFSVolSync(&bp, call->out_volsync); call->unmarshall++; + /* Fall through */ case 6: break; diff --git a/fs/block_dev.c b/fs/block_dev.c index 4707dfff991b..a6f7c892cb4a 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -345,15 +345,24 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) struct bio *bio; bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; bool is_read = (iov_iter_rw(iter) == READ), is_sync; + bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0; loff_t pos = iocb->ki_pos; blk_qc_t qc = BLK_QC_T_NONE; - int ret = 0; + gfp_t gfp; + ssize_t ret; if ((pos | iov_iter_alignment(iter)) & (bdev_logical_block_size(bdev) - 1)) return -EINVAL; - bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool); + if (nowait) + gfp = GFP_NOWAIT; + else + gfp = GFP_KERNEL; + + bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool); + if (!bio) + return -EAGAIN; dio = container_of(bio, struct blkdev_dio, bio); dio->is_sync = is_sync = is_sync_kiocb(iocb); @@ -375,7 +384,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) if (!is_poll) blk_start_plug(&plug); + ret = 0; for (;;) { + int err; + bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = pos >> 9; bio->bi_write_hint = iocb->ki_hint; @@ -383,8 +395,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) bio->bi_end_io = blkdev_bio_end_io; bio->bi_ioprio = iocb->ki_ioprio; - ret = bio_iov_iter_get_pages(bio, iter); - if (unlikely(ret)) { + err = bio_iov_iter_get_pages(bio, iter); + if (unlikely(err)) { + if (!ret) + ret = err; bio->bi_status = BLK_STS_IOERR; bio_endio(bio); break; @@ -399,6 +413,14 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) task_io_account_write(bio->bi_iter.bi_size); } + /* + * Tell underlying layer to not block for resource shortage. + * And if we would have blocked, return error inline instead + * of through the bio->bi_end_io() callback. + */ + if (nowait) + bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE); + dio->size += bio->bi_iter.bi_size; pos += bio->bi_iter.bi_size; @@ -412,6 +434,12 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) } qc = submit_bio(bio); + if (qc == BLK_QC_T_EAGAIN) { + if (!ret) + ret = -EAGAIN; + goto error; + } + ret = dio->size; if (polled) WRITE_ONCE(iocb->ki_cookie, qc); @@ -432,8 +460,20 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) atomic_inc(&dio->ref); } - submit_bio(bio); - bio = bio_alloc(GFP_KERNEL, nr_pages); + qc = submit_bio(bio); + if (qc == BLK_QC_T_EAGAIN) { + if (!ret) + ret = -EAGAIN; + goto error; + } + ret = dio->size; + + bio = bio_alloc(gfp, nr_pages); + if (!bio) { + if (!ret) + ret = -EAGAIN; + goto error; + } } if (!is_poll) @@ -453,13 +493,16 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) } __set_current_state(TASK_RUNNING); +out: if (!ret) ret = blk_status_to_errno(dio->bio.bi_status); - if (likely(!ret)) - ret = dio->size; bio_put(&dio->bio); return ret; +error: + if (!is_poll) + blk_finish_plug(&plug); + goto out; } static ssize_t @@ -1139,8 +1182,7 @@ static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno) * Pointer to the block device containing @bdev on success, ERR_PTR() * value on failure. */ -static struct block_device *bd_start_claiming(struct block_device *bdev, - void *holder) +struct block_device *bd_start_claiming(struct block_device *bdev, void *holder) { struct gendisk *disk; struct block_device *whole; @@ -1187,6 +1229,62 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, return ERR_PTR(err); } } +EXPORT_SYMBOL(bd_start_claiming); + +static void bd_clear_claiming(struct block_device *whole, void *holder) +{ + lockdep_assert_held(&bdev_lock); + /* tell others that we're done */ + BUG_ON(whole->bd_claiming != holder); + whole->bd_claiming = NULL; + wake_up_bit(&whole->bd_claiming, 0); +} + +/** + * bd_finish_claiming - finish claiming of a block device + * @bdev: block device of interest + * @whole: whole block device (returned from bd_start_claiming()) + * @holder: holder that has claimed @bdev + * + * Finish exclusive open of a block device. Mark the device as exlusively + * open by the holder and wake up all waiters for exclusive open to finish. + */ +void bd_finish_claiming(struct block_device *bdev, struct block_device *whole, + void *holder) +{ + spin_lock(&bdev_lock); + BUG_ON(!bd_may_claim(bdev, whole, holder)); + /* + * Note that for a whole device bd_holders will be incremented twice, + * and bd_holder will be set to bd_may_claim before being set to holder + */ + whole->bd_holders++; + whole->bd_holder = bd_may_claim; + bdev->bd_holders++; + bdev->bd_holder = holder; + bd_clear_claiming(whole, holder); + spin_unlock(&bdev_lock); +} +EXPORT_SYMBOL(bd_finish_claiming); + +/** + * bd_abort_claiming - abort claiming of a block device + * @bdev: block device of interest + * @whole: whole block device (returned from bd_start_claiming()) + * @holder: holder that has claimed @bdev + * + * Abort claiming of a block device when the exclusive open failed. This can be + * also used when exclusive open is not actually desired and we just needed + * to block other exclusive openers for a while. + */ +void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, + void *holder) +{ + spin_lock(&bdev_lock); + bd_clear_claiming(whole, holder); + spin_unlock(&bdev_lock); +} +EXPORT_SYMBOL(bd_abort_claiming); #ifdef CONFIG_SYSFS struct bd_holder_disk { @@ -1656,29 +1754,7 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) /* finish claiming */ mutex_lock(&bdev->bd_mutex); - spin_lock(&bdev_lock); - - if (!res) { - BUG_ON(!bd_may_claim(bdev, whole, holder)); - /* - * Note that for a whole device bd_holders - * will be incremented twice, and bd_holder - * will be set to bd_may_claim before being - * set to holder - */ - whole->bd_holders++; - whole->bd_holder = bd_may_claim; - bdev->bd_holders++; - bdev->bd_holder = holder; - } - - /* tell others that we're done */ - BUG_ON(whole->bd_claiming != holder); - whole->bd_claiming = NULL; - wake_up_bit(&whole->bd_claiming, 0); - - spin_unlock(&bdev_lock); - + bd_finish_claiming(bdev, whole, holder); /* * Block event polling for write claims if requested. Any * write holder makes the write_holder state stick until diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index 212b4a854f2c..38651fae7f21 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -4,6 +4,7 @@ config BTRFS_FS tristate "Btrfs filesystem support" select CRYPTO select CRYPTO_CRC32C + select LIBCRC32C select ZLIB_INFLATE select ZLIB_DEFLATE select LZO_COMPRESS diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 89116afda7a2..e5d85311d5d5 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1483,7 +1483,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr, ulist_init(roots); ulist_init(tmp); - trans = btrfs_attach_transaction(root); + trans = btrfs_join_transaction_nostart(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) { ret = PTR_ERR(trans); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 41a2bd2e0c56..5f7ee70b3d1a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -4106,6 +4106,7 @@ void close_ctree(struct btrfs_fs_info *fs_info) percpu_counter_destroy(&fs_info->dev_replace.bio_counter); cleanup_srcu_struct(&fs_info->subvol_srcu); + btrfs_free_csum_hash(fs_info); btrfs_free_stripe_hash_table(fs_info); btrfs_free_ref_cache(fs_info); } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 1af069a9a0c7..ee582a36653d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -395,10 +395,31 @@ static noinline int add_async_extent(struct async_chunk *cow, return 0; } +/* + * Check if the inode has flags compatible with compression + */ +static inline bool inode_can_compress(struct inode *inode) +{ + if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW || + BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) + return false; + return true; +} + +/* + * Check if the inode needs to be submitted to compression, based on mount + * options, defragmentation, properties or heuristics. + */ static inline int inode_need_compress(struct inode *inode, u64 start, u64 end) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + if (!inode_can_compress(inode)) { + WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), + KERN_ERR "BTRFS: unexpected compression for ino %llu\n", + btrfs_ino(BTRFS_I(inode))); + return 0; + } /* force compress */ if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) return 1; @@ -1631,7 +1652,8 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page, } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) { ret = run_delalloc_nocow(inode, locked_page, start, end, page_started, 0, nr_written); - } else if (!inode_need_compress(inode, start, end)) { + } else if (!inode_can_compress(inode) || + !inode_need_compress(inode, start, end)) { ret = cow_file_range(inode, locked_page, start, end, end, page_started, nr_written, 1, NULL); } else { diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 98fccce4208c..393eceda57c8 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -346,9 +346,12 @@ void btrfs_tree_unlock(struct extent_buffer *eb) if (blockers) { btrfs_assert_no_spinning_writers(eb); eb->blocking_writers--; - /* Use the lighter barrier after atomic */ - smp_mb__after_atomic(); - cond_wake_up_nomb(&eb->write_lock_wq); + /* + * We need to order modifying blocking_writers above with + * actually waking up the sleepers to ensure they see the + * updated value of blocking_writers + */ + cond_wake_up(&eb->write_lock_wq); } else { btrfs_assert_spinning_writers_put(eb); write_unlock(&eb->lock); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 1744ba8b2754..ae7f64a8facb 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -985,13 +985,14 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree, struct extent_state **cached_state) { struct btrfs_ordered_extent *ordered; - struct extent_state *cachedp = NULL; + struct extent_state *cache = NULL; + struct extent_state **cachedp = &cache; if (cached_state) - cachedp = *cached_state; + cachedp = cached_state; while (1) { - lock_extent_bits(tree, start, end, &cachedp); + lock_extent_bits(tree, start, end, cachedp); ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1); if (!ordered) { @@ -1001,10 +1002,10 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree, * aren't exposing it outside of this function */ if (!cached_state) - refcount_dec(&cachedp->refs); + refcount_dec(&cache->refs); break; } - unlock_extent_cached(tree, start, end, &cachedp); + unlock_extent_cached(tree, start, end, cachedp); btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1); btrfs_put_ordered_extent(ordered); } diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 69b59bf75882..c3c0c064c25d 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -6322,68 +6322,21 @@ static int changed_extent(struct send_ctx *sctx, { int ret = 0; - if (sctx->cur_ino != sctx->cmp_key->objectid) { - - if (result == BTRFS_COMPARE_TREE_CHANGED) { - struct extent_buffer *leaf_l; - struct extent_buffer *leaf_r; - struct btrfs_file_extent_item *ei_l; - struct btrfs_file_extent_item *ei_r; - - leaf_l = sctx->left_path->nodes[0]; - leaf_r = sctx->right_path->nodes[0]; - ei_l = btrfs_item_ptr(leaf_l, - sctx->left_path->slots[0], - struct btrfs_file_extent_item); - ei_r = btrfs_item_ptr(leaf_r, - sctx->right_path->slots[0], - struct btrfs_file_extent_item); - - /* - * We may have found an extent item that has changed - * only its disk_bytenr field and the corresponding - * inode item was not updated. This case happens due to - * very specific timings during relocation when a leaf - * that contains file extent items is COWed while - * relocation is ongoing and its in the stage where it - * updates data pointers. So when this happens we can - * safely ignore it since we know it's the same extent, - * but just at different logical and physical locations - * (when an extent is fully replaced with a new one, we - * know the generation number must have changed too, - * since snapshot creation implies committing the current - * transaction, and the inode item must have been updated - * as well). - * This replacement of the disk_bytenr happens at - * relocation.c:replace_file_extents() through - * relocation.c:btrfs_reloc_cow_block(). - */ - if (btrfs_file_extent_generation(leaf_l, ei_l) == - btrfs_file_extent_generation(leaf_r, ei_r) && - btrfs_file_extent_ram_bytes(leaf_l, ei_l) == - btrfs_file_extent_ram_bytes(leaf_r, ei_r) && - btrfs_file_extent_compression(leaf_l, ei_l) == - btrfs_file_extent_compression(leaf_r, ei_r) && - btrfs_file_extent_encryption(leaf_l, ei_l) == - btrfs_file_extent_encryption(leaf_r, ei_r) && - btrfs_file_extent_other_encoding(leaf_l, ei_l) == - btrfs_file_extent_other_encoding(leaf_r, ei_r) && - btrfs_file_extent_type(leaf_l, ei_l) == - btrfs_file_extent_type(leaf_r, ei_r) && - btrfs_file_extent_disk_bytenr(leaf_l, ei_l) != - btrfs_file_extent_disk_bytenr(leaf_r, ei_r) && - btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) == - btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) && - btrfs_file_extent_offset(leaf_l, ei_l) == - btrfs_file_extent_offset(leaf_r, ei_r) && - btrfs_file_extent_num_bytes(leaf_l, ei_l) == - btrfs_file_extent_num_bytes(leaf_r, ei_r)) - return 0; - } - - inconsistent_snapshot_error(sctx, result, "extent"); - return -EIO; - } + /* + * We have found an extent item that changed without the inode item + * having changed. This can happen either after relocation (where the + * disk_bytenr of an extent item is replaced at + * relocation.c:replace_file_extents()) or after deduplication into a + * file in both the parent and send snapshots (where an extent item can + * get modified or replaced with a new one). Note that deduplication + * updates the inode item, but it only changes the iversion (sequence + * field in the inode item) of the inode, so if a file is deduplicated + * the same amount of times in both the parent and send snapshots, its + * iversion becames the same in both snapshots, whence the inode item is + * the same on both snapshots. + */ + if (sctx->cur_ino != sctx->cmp_key->objectid) + return 0; if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { if (result != BTRFS_COMPARE_TREE_DELETED) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 3b8ae1a8f02d..e3adb714c04b 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH), [TRANS_STATE_COMMIT_DOING] = (__TRANS_START | __TRANS_ATTACH | - __TRANS_JOIN), + __TRANS_JOIN | + __TRANS_JOIN_NOSTART), [TRANS_STATE_UNBLOCKED] = (__TRANS_START | __TRANS_ATTACH | __TRANS_JOIN | - __TRANS_JOIN_NOLOCK), + __TRANS_JOIN_NOLOCK | + __TRANS_JOIN_NOSTART), [TRANS_STATE_COMPLETED] = (__TRANS_START | __TRANS_ATTACH | __TRANS_JOIN | - __TRANS_JOIN_NOLOCK), + __TRANS_JOIN_NOLOCK | + __TRANS_JOIN_NOSTART), }; void btrfs_put_transaction(struct btrfs_transaction *transaction) @@ -543,7 +546,8 @@ again: ret = join_transaction(fs_info, type); if (ret == -EBUSY) { wait_current_trans(fs_info); - if (unlikely(type == TRANS_ATTACH)) + if (unlikely(type == TRANS_ATTACH || + type == TRANS_JOIN_NOSTART)) ret = -ENOENT; } } while (ret == -EBUSY); @@ -660,6 +664,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root } /* + * Similar to regular join but it never starts a transaction when none is + * running or after waiting for the current one to finish. + */ +struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root) +{ + return start_transaction(root, 0, TRANS_JOIN_NOSTART, + BTRFS_RESERVE_NO_FLUSH, true); +} + +/* * btrfs_attach_transaction() - catch the running transaction * * It is used when we want to commit the current the transaction, but @@ -2037,6 +2051,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) } } else { spin_unlock(&fs_info->trans_lock); + /* + * The previous transaction was aborted and was already removed + * from the list of transactions at fs_info->trans_list. So we + * abort to prevent writing a new superblock that reflects a + * corrupt state (pointing to trees with unwritten nodes/leafs). + */ + if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) { + ret = -EROFS; + goto cleanup_transaction; + } } extwriter_counter_dec(cur_trans, trans->type); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 527ea94b57d9..2c5a6f6e5bb0 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -94,11 +94,13 @@ struct btrfs_transaction { #define __TRANS_JOIN (1U << 11) #define __TRANS_JOIN_NOLOCK (1U << 12) #define __TRANS_DUMMY (1U << 13) +#define __TRANS_JOIN_NOSTART (1U << 14) #define TRANS_START (__TRANS_START | __TRANS_FREEZABLE) #define TRANS_ATTACH (__TRANS_ATTACH) #define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE) #define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK) +#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART) #define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH) @@ -183,6 +185,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( int min_factor); struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); +struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_attach_transaction_barrier( struct btrfs_root *root); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index a13ddba1ebc3..d74b74ca07af 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -5941,6 +5941,7 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, u64 stripe_len; u64 raid56_full_stripe_start = (u64)-1; int data_stripes; + int ret = 0; ASSERT(op != BTRFS_MAP_DISCARD); @@ -5961,8 +5962,8 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, btrfs_crit(fs_info, "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu", stripe_offset, offset, em->start, logical, stripe_len); - free_extent_map(em); - return -EINVAL; + ret = -EINVAL; + goto out; } /* stripe_offset is the offset of this block in its stripe */ @@ -6009,7 +6010,10 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, io_geom->stripe_offset = stripe_offset; io_geom->raid56_stripe_offset = raid56_full_stripe_start; - return 0; +out: + /* once for us */ + free_extent_map(em); + return ret; } static int __btrfs_map_block(struct btrfs_fs_info *fs_info, diff --git a/fs/coredump.c b/fs/coredump.c index e42e17e55bfd..b1ea7dfbd149 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -7,6 +7,7 @@ #include <linux/stat.h> #include <linux/fcntl.h> #include <linux/swap.h> +#include <linux/ctype.h> #include <linux/string.h> #include <linux/init.h> #include <linux/pagemap.h> @@ -187,11 +188,13 @@ put_exe_file: * name into corename, which must have space for at least * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. */ -static int format_corename(struct core_name *cn, struct coredump_params *cprm) +static int format_corename(struct core_name *cn, struct coredump_params *cprm, + size_t **argv, int *argc) { const struct cred *cred = current_cred(); const char *pat_ptr = core_pattern; int ispipe = (*pat_ptr == '|'); + bool was_space = false; int pid_in_pattern = 0; int err = 0; @@ -201,12 +204,35 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm) return -ENOMEM; cn->corename[0] = '\0'; - if (ispipe) + if (ispipe) { + int argvs = sizeof(core_pattern) / 2; + (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL); + if (!(*argv)) + return -ENOMEM; + (*argv)[(*argc)++] = 0; ++pat_ptr; + } /* Repeat as long as we have more pattern to process and more output space */ while (*pat_ptr) { + /* + * Split on spaces before doing template expansion so that + * %e and %E don't get split if they have spaces in them + */ + if (ispipe) { + if (isspace(*pat_ptr)) { + was_space = true; + pat_ptr++; + continue; + } else if (was_space) { + was_space = false; + err = cn_printf(cn, "%c", '\0'); + if (err) + return err; + (*argv)[(*argc)++] = cn->used; + } + } if (*pat_ptr != '%') { err = cn_printf(cn, "%c", *pat_ptr++); } else { @@ -546,6 +572,8 @@ void do_coredump(const kernel_siginfo_t *siginfo) struct cred *cred; int retval = 0; int ispipe; + size_t *argv = NULL; + int argc = 0; struct files_struct *displaced; /* require nonrelative corefile path and be extra careful */ bool need_suid_safe = false; @@ -592,9 +620,10 @@ void do_coredump(const kernel_siginfo_t *siginfo) old_cred = override_creds(cred); - ispipe = format_corename(&cn, &cprm); + ispipe = format_corename(&cn, &cprm, &argv, &argc); if (ispipe) { + int argi; int dump_count; char **helper_argv; struct subprocess_info *sub_info; @@ -637,12 +666,16 @@ void do_coredump(const kernel_siginfo_t *siginfo) goto fail_dropcount; } - helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL); + helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv), + GFP_KERNEL); if (!helper_argv) { printk(KERN_WARNING "%s failed to allocate memory\n", __func__); goto fail_dropcount; } + for (argi = 0; argi < argc; argi++) + helper_argv[argi] = cn.corename + argv[argi]; + helper_argv[argi] = NULL; retval = -ENOMEM; sub_info = call_usermodehelper_setup(helper_argv[0], @@ -652,7 +685,7 @@ void do_coredump(const kernel_siginfo_t *siginfo) retval = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); - argv_free(helper_argv); + kfree(helper_argv); if (retval) { printk(KERN_INFO "Core dump to |%s pipe failed\n", cn.corename); @@ -766,6 +799,7 @@ fail_dropcount: if (ispipe) atomic_dec(&core_dump_count); fail_unlock: + kfree(argv); kfree(cn.corename); coredump_finish(mm, core_dumped); revert_creds(old_cred); @@ -266,7 +266,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry) static void put_unlocked_entry(struct xa_state *xas, void *entry) { /* If we were the only waiter woken, wake the next one */ - if (entry && dax_is_conflict(entry)) + if (entry && !dax_is_conflict(entry)) dax_wake_entry(xas, entry, false); } diff --git a/fs/exec.c b/fs/exec.c index c71cbfe6826a..f7f6a140856a 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1828,7 +1828,7 @@ static int __do_execve_file(int fd, struct filename *filename, membarrier_execve(current); rseq_execve(current); acct_update_integrals(current); - task_numa_free(current); + task_numa_free(current, false); free_bprm(bprm); kfree(pathbuf); if (filename) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index f8d46df8fa9e..3e58a6f697dd 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -1653,19 +1653,12 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id) static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) { struct f2fs_inode_info *fi = F2FS_I(inode); - u32 oldflags; /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) return -EPERM; - oldflags = fi->i_flags; - - if ((iflags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL)) - if (!capable(CAP_LINUX_IMMUTABLE)) - return -EPERM; - - fi->i_flags = iflags | (oldflags & ~mask); + fi->i_flags = iflags | (fi->i_flags & ~mask); if (fi->i_flags & F2FS_PROJINHERIT_FL) set_inode_flag(inode, FI_PROJ_INHERIT); @@ -1770,7 +1763,8 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); - u32 fsflags; + struct f2fs_inode_info *fi = F2FS_I(inode); + u32 fsflags, old_fsflags; u32 iflags; int ret; @@ -1794,8 +1788,14 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) inode_lock(inode); + old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags); + ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags); + if (ret) + goto out; + ret = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL)); +out: inode_unlock(inode); mnt_drop_write_file(filp); return ret; @@ -2855,52 +2855,32 @@ static inline u32 f2fs_xflags_to_iflags(u32 xflags) return iflags; } -static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) +static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa) { - struct inode *inode = file_inode(filp); struct f2fs_inode_info *fi = F2FS_I(inode); - struct fsxattr fa; - memset(&fa, 0, sizeof(struct fsxattr)); - fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags); + simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags)); if (f2fs_sb_has_project_quota(F2FS_I_SB(inode))) - fa.fsx_projid = (__u32)from_kprojid(&init_user_ns, - fi->i_projid); - - if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) - return -EFAULT; - return 0; + fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid); } -static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa) +static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) { - /* - * Project Quota ID state is only allowed to change from within the init - * namespace. Enforce that restriction only if we are trying to change - * the quota ID state. Everything else is allowed in user namespaces. - */ - if (current_user_ns() == &init_user_ns) - return 0; + struct inode *inode = file_inode(filp); + struct fsxattr fa; - if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid) - return -EINVAL; - - if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) { - if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT)) - return -EINVAL; - } else { - if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT) - return -EINVAL; - } + f2fs_fill_fsxattr(inode, &fa); + if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) + return -EFAULT; return 0; } static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); - struct fsxattr fa; + struct fsxattr fa, old_fa; u32 iflags; int err; @@ -2923,9 +2903,12 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) return err; inode_lock(inode); - err = f2fs_ioctl_check_project(inode, &fa); + + f2fs_fill_fsxattr(inode, &old_fa); + err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa); if (err) goto out; + err = f2fs_setflags_common(inode, iflags, f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS)); if (err) diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 6691f526fa40..8974672db78f 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -796,6 +796,29 @@ static int move_data_block(struct inode *inode, block_t bidx, if (lfs_mode) down_write(&fio.sbi->io_order_lock); + mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), + fio.old_blkaddr, false); + if (!mpage) + goto up_out; + + fio.encrypted_page = mpage; + + /* read source block in mpage */ + if (!PageUptodate(mpage)) { + err = f2fs_submit_page_bio(&fio); + if (err) { + f2fs_put_page(mpage, 1); + goto up_out; + } + lock_page(mpage); + if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || + !PageUptodate(mpage))) { + err = -EIO; + f2fs_put_page(mpage, 1); + goto up_out; + } + } + f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, &sum, CURSEG_COLD_DATA, NULL, false); @@ -803,44 +826,18 @@ static int move_data_block(struct inode *inode, block_t bidx, newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); if (!fio.encrypted_page) { err = -ENOMEM; - goto recover_block; - } - - mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), - fio.old_blkaddr, FGP_LOCK, GFP_NOFS); - if (mpage) { - bool updated = false; - - if (PageUptodate(mpage)) { - memcpy(page_address(fio.encrypted_page), - page_address(mpage), PAGE_SIZE); - updated = true; - } f2fs_put_page(mpage, 1); - invalidate_mapping_pages(META_MAPPING(fio.sbi), - fio.old_blkaddr, fio.old_blkaddr); - if (updated) - goto write_page; - } - - err = f2fs_submit_page_bio(&fio); - if (err) - goto put_page_out; - - /* write page */ - lock_page(fio.encrypted_page); - - if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) { - err = -EIO; - goto put_page_out; - } - if (unlikely(!PageUptodate(fio.encrypted_page))) { - err = -EIO; - goto put_page_out; + goto recover_block; } -write_page: + /* write target block */ f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true); + memcpy(page_address(fio.encrypted_page), + page_address(mpage), PAGE_SIZE); + f2fs_put_page(mpage, 1); + invalidate_mapping_pages(META_MAPPING(fio.sbi), + fio.old_blkaddr, fio.old_blkaddr); + set_page_dirty(fio.encrypted_page); if (clear_page_dirty_for_io(fio.encrypted_page)) dec_page_count(fio.sbi, F2FS_DIRTY_META); @@ -871,11 +868,12 @@ write_page: put_page_out: f2fs_put_page(fio.encrypted_page, 1); recover_block: - if (lfs_mode) - up_write(&fio.sbi->io_order_lock); if (err) f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, true, true); +up_out: + if (lfs_mode) + up_write(&fio.sbi->io_order_lock); put_out: f2fs_put_dnode(&dn); out: diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 6de6cda44031..78a1b873e48a 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -2422,6 +2422,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, size_t crc_offset = 0; __u32 crc = 0; + if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) { + f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)", + F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); + return -EINVAL; + } + /* Check checksum_offset and crc in superblock */ if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) { crc_offset = le32_to_cpu(raw_super->checksum_offset); @@ -2429,26 +2435,20 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, offsetof(struct f2fs_super_block, crc)) { f2fs_info(sbi, "Invalid SB checksum offset: %zu", crc_offset); - return 1; + return -EFSCORRUPTED; } crc = le32_to_cpu(raw_super->crc); if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) { f2fs_info(sbi, "Invalid SB checksum value: %u", crc); - return 1; + return -EFSCORRUPTED; } } - if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { - f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)", - F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); - return 1; - } - /* Currently, support only 4KB page cache size */ if (F2FS_BLKSIZE != PAGE_SIZE) { f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB", PAGE_SIZE); - return 1; + return -EFSCORRUPTED; } /* Currently, support only 4KB block size */ @@ -2456,14 +2456,14 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, if (blocksize != F2FS_BLKSIZE) { f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB", blocksize); - return 1; + return -EFSCORRUPTED; } /* check log blocks per segment */ if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) { f2fs_info(sbi, "Invalid log blocks per segment (%u)", le32_to_cpu(raw_super->log_blocks_per_seg)); - return 1; + return -EFSCORRUPTED; } /* Currently, support 512/1024/2048/4096 bytes sector size */ @@ -2473,7 +2473,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, F2FS_MIN_LOG_SECTOR_SIZE) { f2fs_info(sbi, "Invalid log sectorsize (%u)", le32_to_cpu(raw_super->log_sectorsize)); - return 1; + return -EFSCORRUPTED; } if (le32_to_cpu(raw_super->log_sectors_per_block) + le32_to_cpu(raw_super->log_sectorsize) != @@ -2481,7 +2481,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)", le32_to_cpu(raw_super->log_sectors_per_block), le32_to_cpu(raw_super->log_sectorsize)); - return 1; + return -EFSCORRUPTED; } segment_count = le32_to_cpu(raw_super->segment_count); @@ -2495,7 +2495,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, if (segment_count > F2FS_MAX_SEGMENT || segment_count < F2FS_MIN_SEGMENTS) { f2fs_info(sbi, "Invalid segment count (%u)", segment_count); - return 1; + return -EFSCORRUPTED; } if (total_sections > segment_count || @@ -2503,25 +2503,25 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, segs_per_sec > segment_count || !segs_per_sec) { f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)", segment_count, total_sections, segs_per_sec); - return 1; + return -EFSCORRUPTED; } if ((segment_count / segs_per_sec) < total_sections) { f2fs_info(sbi, "Small segment_count (%u < %u * %u)", segment_count, segs_per_sec, total_sections); - return 1; + return -EFSCORRUPTED; } if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) { f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)", segment_count, le64_to_cpu(raw_super->block_count)); - return 1; + return -EFSCORRUPTED; } if (secs_per_zone > total_sections || !secs_per_zone) { f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)", secs_per_zone, total_sections); - return 1; + return -EFSCORRUPTED; } if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION || raw_super->hot_ext_count > F2FS_MAX_EXTENSION || @@ -2531,7 +2531,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, le32_to_cpu(raw_super->extension_count), raw_super->hot_ext_count, F2FS_MAX_EXTENSION); - return 1; + return -EFSCORRUPTED; } if (le32_to_cpu(raw_super->cp_payload) > @@ -2539,7 +2539,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, f2fs_info(sbi, "Insane cp_payload (%u > %u)", le32_to_cpu(raw_super->cp_payload), blocks_per_seg - F2FS_CP_PACKS); - return 1; + return -EFSCORRUPTED; } /* check reserved ino info */ @@ -2550,12 +2550,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, le32_to_cpu(raw_super->node_ino), le32_to_cpu(raw_super->meta_ino), le32_to_cpu(raw_super->root_ino)); - return 1; + return -EFSCORRUPTED; } /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ if (sanity_check_area_boundary(sbi, bh)) - return 1; + return -EFSCORRUPTED; return 0; } @@ -2870,10 +2870,10 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi, } /* sanity checking of raw super */ - if (sanity_check_raw_super(sbi, bh)) { + err = sanity_check_raw_super(sbi, bh); + if (err) { f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock", block + 1); - err = -EFSCORRUPTED; brelse(bh); continue; } diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 79581b9bdebb..4df26ef2b2b1 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1002,11 +1002,16 @@ static void gfs2_iomap_page_done(struct inode *inode, loff_t pos, unsigned copied, struct page *page, struct iomap *iomap) { + struct gfs2_trans *tr = current->journal_info; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); if (page && !gfs2_is_stuffed(ip)) gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied); + + if (tr->tr_num_buf_new) + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); + gfs2_trans_end(sdp); } @@ -1099,8 +1104,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, tr = current->journal_info; if (tr->tr_num_buf_new) __mark_inode_dirty(inode, I_DIRTY_DATASYNC); - else - gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[0]); gfs2_trans_end(sdp); } @@ -1181,10 +1184,16 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length, if (ip->i_qadata && ip->i_qadata->qa_qd_num) gfs2_quota_unlock(ip); + + if (unlikely(!written)) + goto out_unlock; + if (iomap->flags & IOMAP_F_SIZE_CHANGED) mark_inode_dirty(inode); - gfs2_write_unlock(inode); + set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); +out_unlock: + gfs2_write_unlock(inode); out: return 0; } diff --git a/fs/io_uring.c b/fs/io_uring.c index e2a66e12fbc6..d542f1cf4428 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -202,7 +202,7 @@ struct async_list { struct file *file; off_t io_end; - size_t io_pages; + size_t io_len; }; struct io_ring_ctx { @@ -333,7 +333,8 @@ struct io_kiocb { #define REQ_F_IO_DRAIN 16 /* drain existing IO first */ #define REQ_F_IO_DRAINED 32 /* drain done */ #define REQ_F_LINK 64 /* linked sqes */ -#define REQ_F_FAIL_LINK 128 /* fail rest of links */ +#define REQ_F_LINK_DONE 128 /* linked sqes done */ +#define REQ_F_FAIL_LINK 256 /* fail rest of links */ u64 user_data; u32 result; u32 sequence; @@ -429,7 +430,7 @@ static inline bool io_sequence_defer(struct io_ring_ctx *ctx, if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN) return false; - return req->sequence > ctx->cached_cq_tail + ctx->sq_ring->dropped; + return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped; } static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) @@ -632,6 +633,7 @@ static void io_req_link_next(struct io_kiocb *req) nxt->flags |= REQ_F_LINK; } + nxt->flags |= REQ_F_LINK_DONE; INIT_WORK(&nxt->work, io_sq_wq_submit_work); queue_work(req->ctx->sqo_wq, &nxt->work); } @@ -1064,8 +1066,44 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw, */ offset = buf_addr - imu->ubuf; iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len); - if (offset) - iov_iter_advance(iter, offset); + + if (offset) { + /* + * Don't use iov_iter_advance() here, as it's really slow for + * using the latter parts of a big fixed buffer - it iterates + * over each segment manually. We can cheat a bit here, because + * we know that: + * + * 1) it's a BVEC iter, we set it up + * 2) all bvecs are PAGE_SIZE in size, except potentially the + * first and last bvec + * + * So just find our index, and adjust the iterator afterwards. + * If the offset is within the first bvec (or the whole first + * bvec, just use iov_iter_advance(). This makes it easier + * since we can just skip the first segment, which may not + * be PAGE_SIZE aligned. + */ + const struct bio_vec *bvec = imu->bvec; + + if (offset <= bvec->bv_len) { + iov_iter_advance(iter, offset); + } else { + unsigned long seg_skip; + + /* skip first vec */ + offset -= bvec->bv_len; + seg_skip = 1 + (offset >> PAGE_SHIFT); + + iter->bvec = bvec + seg_skip; + iter->nr_segs -= seg_skip; + iter->count -= (seg_skip << PAGE_SHIFT); + iter->iov_offset = offset & ~PAGE_MASK; + if (iter->iov_offset) + iter->count -= iter->iov_offset; + } + } + return 0; } @@ -1120,28 +1158,26 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len) off_t io_end = kiocb->ki_pos + len; if (filp == async_list->file && kiocb->ki_pos == async_list->io_end) { - unsigned long max_pages; + unsigned long max_bytes; /* Use 8x RA size as a decent limiter for both reads/writes */ - max_pages = filp->f_ra.ra_pages; - if (!max_pages) - max_pages = VM_READAHEAD_PAGES; - max_pages *= 8; - - /* If max pages are exceeded, reset the state */ - len >>= PAGE_SHIFT; - if (async_list->io_pages + len <= max_pages) { + max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3); + if (!max_bytes) + max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3); + + /* If max len are exceeded, reset the state */ + if (async_list->io_len + len <= max_bytes) { req->flags |= REQ_F_SEQ_PREV; - async_list->io_pages += len; + async_list->io_len += len; } else { io_end = 0; - async_list->io_pages = 0; + async_list->io_len = 0; } } /* New file? Reset state. */ if (async_list->file != filp) { - async_list->io_pages = 0; + async_list->io_len = 0; async_list->file = filp; } async_list->io_end = io_end; @@ -1630,6 +1666,8 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) INIT_LIST_HEAD(&poll->wait.entry); init_waitqueue_func_entry(&poll->wait, io_poll_wake); + INIT_LIST_HEAD(&req->list); + mask = vfs_poll(poll->file, &ipt.pt) & poll->events; spin_lock_irq(&ctx->completion_lock); @@ -1800,6 +1838,7 @@ restart: do { struct sqe_submit *s = &req->submit; const struct io_uring_sqe *sqe = s->sqe; + unsigned int flags = req->flags; /* Ensure we clear previously set non-block flag */ req->rw.ki_flags &= ~IOCB_NOWAIT; @@ -1844,6 +1883,10 @@ restart: /* async context always use a copy of the sqe */ kfree(sqe); + /* req from defer and link list needn't decrease async cnt */ + if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE)) + goto out; + if (!async_list) break; if (!list_empty(&req_list)) { @@ -1891,6 +1934,7 @@ restart: } } +out: if (cur_mm) { set_fs(old_fs); unuse_mm(cur_mm); @@ -1917,6 +1961,10 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req) ret = true; spin_lock(&list->lock); list_add_tail(&req->list, &list->list); + /* + * Ensure we see a simultaneous modification from io_sq_wq_submit_work() + */ + smp_mb(); if (!atomic_read(&list->cnt)) { list_del_init(&req->list); ret = false; diff --git a/fs/iomap/Makefile b/fs/iomap/Makefile index 2d165388d952..93cd11938bf5 100644 --- a/fs/iomap/Makefile +++ b/fs/iomap/Makefile @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-or-newer +# SPDX-License-Identifier: GPL-2.0-or-later # # Copyright (c) 2019 Oracle. # All Rights Reserved. diff --git a/fs/namespace.c b/fs/namespace.c index 6464ea4acba9..d28d30b13043 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1463,7 +1463,6 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how) p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; disconnect = disconnect_mount(p, how); - if (mnt_has_parent(p)) { mnt_add_count(p->mnt_parent, -1); if (!disconnect) { @@ -1471,10 +1470,11 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how) list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); } else { umount_mnt(p); - hlist_add_head(&p->mnt_umount, &unmounted); } } change_mnt_propagation(p, MS_PRIVATE); + if (disconnect) + hlist_add_head(&p->mnt_umount, &unmounted); } } diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 385f3aaa2448..90c830e3758e 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -3825,7 +3825,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); int low_bucket = 0, bucket, high_bucket; struct ocfs2_xattr_bucket *search; - u32 last_hash; u64 blkno, lower_blkno = 0; search = ocfs2_xattr_bucket_new(inode); @@ -3869,8 +3868,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, if (xh->xh_count) xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1]; - last_hash = le32_to_cpu(xe->xe_name_hash); - /* record lower_blkno which may be the insert place. */ lower_blkno = blkno; diff --git a/fs/open.c b/fs/open.c index b5b80469b93d..a59abe3c669a 100644 --- a/fs/open.c +++ b/fs/open.c @@ -374,6 +374,25 @@ long do_faccessat(int dfd, const char __user *filename, int mode) override_cred->cap_permitted; } + /* + * The new set of credentials can *only* be used in + * task-synchronous circumstances, and does not need + * RCU freeing, unless somebody then takes a separate + * reference to it. + * + * NOTE! This is _only_ true because this credential + * is used purely for override_creds() that installs + * it as the subjective cred. Other threads will be + * accessing ->real_cred, not the subjective cred. + * + * If somebody _does_ make a copy of this (using the + * 'get_current_cred()' function), that will clear the + * non_rcu field, because now that other user may be + * expecting RCU freeing. But normal thread-synchronous + * cred accesses will keep things non-RCY. + */ + override_cred->non_rcu = 1; + old_cred = override_creds(override_cred); retry: res = user_path_at(dfd, filename, lookup_flags, &path); diff --git a/fs/super.c b/fs/super.c index 113c58f19425..5960578a4076 100644 --- a/fs/super.c +++ b/fs/super.c @@ -478,13 +478,10 @@ EXPORT_SYMBOL(generic_shutdown_super); bool mount_capable(struct fs_context *fc) { - struct user_namespace *user_ns = fc->global ? &init_user_ns - : fc->user_ns; - if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) return capable(CAP_SYS_ADMIN); else - return ns_capable(user_ns, CAP_SYS_ADMIN); + return ns_capable(fc->user_ns, CAP_SYS_ADMIN); } /** diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c index 94c4f1de1922..77ff9f97bcda 100644 --- a/fs/xfs/scrub/dabtree.c +++ b/fs/xfs/scrub/dabtree.c @@ -278,7 +278,11 @@ xchk_da_btree_block_check_sibling( /* Compare upper level pointer to sibling pointer. */ if (ds->state->altpath.blk[level].blkno != sibling) xchk_da_set_corrupt(ds, level); - xfs_trans_brelse(ds->dargs.trans, ds->state->altpath.blk[level].bp); + if (ds->state->altpath.blk[level].bp) { + xfs_trans_brelse(ds->dargs.trans, + ds->state->altpath.blk[level].bp); + ds->state->altpath.blk[level].bp = NULL; + } out: return error; } diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index a8a06bb78ea8..f5c955d35be4 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -272,6 +272,7 @@ xfs_bulkstat_to_bstat( struct xfs_bstat *bs1, const struct xfs_bulkstat *bstat) { + /* memset is needed here because of padding holes in the structure. */ memset(bs1, 0, sizeof(struct xfs_bstat)); bs1->bs_ino = bstat->bs_ino; bs1->bs_mode = bstat->bs_mode; @@ -388,6 +389,8 @@ xfs_inumbers_to_inogrp( struct xfs_inogrp *ig1, const struct xfs_inumbers *ig) { + /* memset is needed here because of padding holes in the structure. */ + memset(ig1, 0, sizeof(struct xfs_inogrp)); ig1->xi_startino = ig->xi_startino; ig1->xi_alloccount = ig->xi_alloccount; ig1->xi_allocmask = ig->xi_allocmask; diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index 8666fe7f35d7..02970b11f71f 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -118,26 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) { - int oldval = 0, ret; - - pagefault_disable(); - - switch (op) { - case FUTEX_OP_SET: - case FUTEX_OP_ADD: - case FUTEX_OP_OR: - case FUTEX_OP_ANDN: - case FUTEX_OP_XOR: - default: - ret = -ENOSYS; - } - - pagefault_enable(); - - if (!ret) - *oval = oldval; - - return ret; + return -ENOSYS; } static inline int diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h index c64bea7a52be..e9f20b813a69 100644 --- a/include/asm-generic/getorder.h +++ b/include/asm-generic/getorder.h @@ -7,24 +7,6 @@ #include <linux/compiler.h> #include <linux/log2.h> -/* - * Runtime evaluation of get_order() - */ -static inline __attribute_const__ -int __get_order(unsigned long size) -{ - int order; - - size--; - size >>= PAGE_SHIFT; -#if BITS_PER_LONG == 32 - order = fls(size); -#else - order = fls64(size); -#endif - return order; -} - /** * get_order - Determine the allocation order of a memory size * @size: The size for which to get the order @@ -43,19 +25,27 @@ int __get_order(unsigned long size) * to hold an object of the specified size. * * The result is undefined if the size is 0. - * - * This function may be used to initialise variables with compile time - * evaluations of constants. */ -#define get_order(n) \ -( \ - __builtin_constant_p(n) ? ( \ - ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \ - (((n) < (1UL << PAGE_SHIFT)) ? 0 : \ - ilog2((n) - 1) - PAGE_SHIFT + 1) \ - ) : \ - __get_order(n) \ -) +static inline __attribute_const__ int get_order(unsigned long size) +{ + if (__builtin_constant_p(size)) { + if (!size) + return BITS_PER_LONG - PAGE_SHIFT; + + if (size < (1UL << PAGE_SHIFT)) + return 0; + + return ilog2((size) - 1) - PAGE_SHIFT + 1; + } + + size--; + size >>= PAGE_SHIFT; +#if BITS_PER_LONG == 32 + return fls(size); +#else + return fls64(size); +#endif +} #endif /* __ASSEMBLY__ */ diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h index dcb95bd9dee6..55c3b123581b 100644 --- a/include/drm/i915_component.h +++ b/include/drm/i915_component.h @@ -34,7 +34,7 @@ enum i915_component_type { /* MAX_PORT is the number of port * It must be sync with I915_MAX_PORTS defined i915_drv.h */ -#define MAX_PORTS 6 +#define MAX_PORTS 9 /** * struct i915_audio_component - Used for direct communication between i915 and hda drivers diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 7523e9a7b6e2..23274cf92712 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -30,11 +30,11 @@ #include <uapi/drm/i915_drm.h> /* For use by IPS driver */ -extern unsigned long i915_read_mch_val(void); -extern bool i915_gpu_raise(void); -extern bool i915_gpu_lower(void); -extern bool i915_gpu_busy(void); -extern bool i915_gpu_turbo_disable(void); +unsigned long i915_read_mch_val(void); +bool i915_gpu_raise(void); +bool i915_gpu_lower(void); +bool i915_gpu_busy(void); +bool i915_gpu_turbo_disable(void); /* Exported from arch/x86/kernel/early-quirks.c */ extern struct resource intel_graphics_stolen_res; @@ -109,6 +109,9 @@ enum port { PORT_D, PORT_E, PORT_F, + PORT_G, + PORT_H, + PORT_I, I915_MAX_PORTS }; diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 6d60ea68c171..a70c982ddff9 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -568,7 +568,8 @@ INTEL_VGA_DEVICE(0x8A56, info), \ INTEL_VGA_DEVICE(0x8A71, info), \ INTEL_VGA_DEVICE(0x8A70, info), \ - INTEL_VGA_DEVICE(0x8A53, info) + INTEL_VGA_DEVICE(0x8A53, info), \ + INTEL_VGA_DEVICE(0x8A54, info) #define INTEL_ICL_11_IDS(info) \ INTEL_ICL_PORT_F_IDS(info), \ @@ -582,4 +583,14 @@ INTEL_VGA_DEVICE(0x4551, info), \ INTEL_VGA_DEVICE(0x4541, info) +/* TGL */ +#define INTEL_TGL_12_IDS(info) \ + INTEL_VGA_DEVICE(0x9A49, info), \ + INTEL_VGA_DEVICE(0x9A40, info), \ + INTEL_VGA_DEVICE(0x9A59, info), \ + INTEL_VGA_DEVICE(0x9A60, info), \ + INTEL_VGA_DEVICE(0x9A68, info), \ + INTEL_VGA_DEVICE(0x9A70, info), \ + INTEL_VGA_DEVICE(0x9A78, info) + #endif /* _I915_PCIIDS_H */ diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 689a58231288..12811091fd50 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -181,6 +181,7 @@ struct blkcg_policy { extern struct blkcg blkcg_root; extern struct cgroup_subsys_state * const blkcg_root_css; +extern bool blkcg_debug_stats; struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index feff3fe4467e..1b1fa1557e68 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -311,6 +311,7 @@ enum req_flag_bits { __REQ_RAHEAD, /* read ahead, can fail anytime */ __REQ_BACKGROUND, /* background IO */ __REQ_NOWAIT, /* Don't wait if request will block */ + __REQ_NOWAIT_INLINE, /* Return would-block error inline */ /* * When a shared kthread needs to issue a bio for a cgroup, doing * so synchronously can lead to priority inversions as the kthread @@ -345,6 +346,7 @@ enum req_flag_bits { #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) +#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE) #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) @@ -418,12 +420,13 @@ static inline int op_stat_group(unsigned int op) typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U +#define BLK_QC_T_EAGAIN -2U #define BLK_QC_T_SHIFT 16 #define BLK_QC_T_INTERNAL (1U << 31) static inline bool blk_qc_t_valid(blk_qc_t cookie) { - return cookie != BLK_QC_T_NONE; + return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN; } static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) diff --git a/include/linux/clk.h b/include/linux/clk.h index 3c096c7a51dc..853a8f181394 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -359,6 +359,7 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, /** * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks * @dev: device for clock "consumer" + * @num_clks: the number of clk_bulk_data * @clks: pointer to the clk_bulk_data table of consumer * * Behaves the same as devm_clk_bulk_get() except where there is no clock diff --git a/include/linux/connector.h b/include/linux/connector.h index 6b6c7396a584..cb732643471b 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -50,7 +50,6 @@ struct cn_dev { u32 seq, groups; struct sock *nls; - void (*input) (struct sk_buff *skb); struct cn_queue_dev *cbdev; }; diff --git a/include/linux/cred.h b/include/linux/cred.h index 7eb43a038330..f7a30e0099be 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -145,7 +145,11 @@ struct cred { struct user_struct *user; /* real user ID subscription */ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ struct group_info *group_info; /* supplementary groups for euid/fsgid */ - struct rcu_head rcu; /* RCU deletion hook */ + /* RCU deletion */ + union { + int non_rcu; /* Can we skip RCU deletion? */ + struct rcu_head rcu; /* RCU deletion hook */ + }; } __randomize_layout; extern void __put_cred(struct cred *); @@ -246,6 +250,7 @@ static inline const struct cred *get_cred(const struct cred *cred) if (!cred) return cred; validate_creds(cred); + nonconst_cred->non_rcu = 0; return get_new_cred(nonconst_cred); } @@ -257,6 +262,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred) if (!atomic_inc_not_zero(&nonconst_cred->usage)) return NULL; validate_creds(cred); + nonconst_cred->non_rcu = 0; return cred; } diff --git a/include/linux/device.h b/include/linux/device.h index c330b75c6c57..6717adee33f0 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -915,6 +915,8 @@ struct dev_links_info { * This identifies the device type and carries type-specific * information. * @mutex: Mutex to synchronize calls to its driver. + * @lockdep_mutex: An optional debug lock that a subsystem can use as a + * peer lock to gain localized lockdep coverage of the device_lock. * @bus: Type of bus device is on. * @driver: Which driver has allocated this * @platform_data: Platform data specific to the device. @@ -998,6 +1000,9 @@ struct device { core doesn't touch it */ void *driver_data; /* Driver data, set and get with dev_set_drvdata/dev_get_drvdata */ +#ifdef CONFIG_PROVE_LOCKING + struct mutex lockdep_mutex; +#endif struct mutex mutex; /* mutex to synchronize calls to * its driver. */ @@ -1383,6 +1388,7 @@ extern int (*platform_notify_remove)(struct device *dev); */ extern struct device *get_device(struct device *dev); extern void put_device(struct device *dev); +extern bool kill_device(struct device *dev); #ifdef CONFIG_DEVTMPFS extern int devtmpfs_create_node(struct device *dev); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index e11b115dd0e4..f7d1eea32c78 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -689,8 +689,8 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) */ static inline bool dma_addressing_limited(struct device *dev) { - return min_not_zero(*dev->dma_mask, dev->bus_dma_mask) < - dma_get_required_mask(dev); + return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) < + dma_get_required_mask(dev); } #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 17cd0078377c..1dd014c9c87b 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -45,7 +45,6 @@ struct elevator_mq_ops { struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); bool (*has_work)(struct blk_mq_hw_ctx *); void (*completed_request)(struct request *, u64); - void (*started_request)(struct request *); void (*requeue_request)(struct request *); struct request *(*former_request)(struct request_queue *, struct request *); struct request *(*next_request)(struct request_queue *, struct request *); diff --git a/include/linux/fs.h b/include/linux/fs.h index 56b8e358af5c..997a530ff4e9 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2598,6 +2598,12 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, void *holder); extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); +extern struct block_device *bd_start_claiming(struct block_device *bdev, + void *holder); +extern void bd_finish_claiming(struct block_device *bdev, + struct block_device *whole, void *holder); +extern void bd_abort_claiming(struct block_device *bdev, + struct block_device *whole, void *holder); extern void blkdev_put(struct block_device *bdev, fmode_t mode); extern int __blkdev_reread_part(struct block_device *bdev); extern int blkdev_reread_part(struct block_device *bdev); diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 9ddcf50a3c59..a7f08fb0f865 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -247,7 +247,7 @@ static inline void gpiod_put(struct gpio_desc *desc) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline void devm_gpiod_unhinge(struct device *dev, @@ -256,7 +256,7 @@ static inline void devm_gpiod_unhinge(struct device *dev, might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline void gpiod_put_array(struct gpio_descs *descs) @@ -264,7 +264,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(descs); } static inline struct gpio_desc *__must_check @@ -317,7 +317,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline void devm_gpiod_put_array(struct device *dev, @@ -326,32 +326,32 @@ static inline void devm_gpiod_put_array(struct device *dev, might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(descs); } static inline int gpiod_get_direction(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_input(struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_output(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } @@ -359,7 +359,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) static inline int gpiod_get_value(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_array_value(unsigned int array_size, @@ -368,13 +368,13 @@ static inline int gpiod_get_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_value(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_array_value(unsigned int array_size, struct gpio_desc **desc_array, @@ -382,13 +382,13 @@ static inline int gpiod_set_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_get_raw_value(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_raw_array_value(unsigned int array_size, @@ -397,13 +397,13 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, @@ -411,14 +411,14 @@ static inline int gpiod_set_raw_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_array_value_cansleep(unsigned int array_size, @@ -427,13 +427,13 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, @@ -441,13 +441,13 @@ static inline int gpiod_set_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size, @@ -456,14 +456,14 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, @@ -471,41 +471,41 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_is_active_low(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_to_irq(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -EINVAL; } @@ -513,7 +513,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -EINVAL; } @@ -525,7 +525,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio) static inline int desc_to_gpio(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -EINVAL; } diff --git a/include/linux/hmm.h b/include/linux/hmm.h index b8a08b2a10ca..7ef56dc18050 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range, */ #define HMM_RANGE_DEFAULT_TIMEOUT 1000 -/* This is a temporary helper to avoid merge conflict between trees. */ -static inline bool hmm_vma_range_done(struct hmm_range *range) -{ - bool ret = hmm_range_valid(range); - - hmm_range_unregister(range); - return ret; -} - -/* This is a temporary helper to avoid merge conflict between trees. */ -static inline int hmm_vma_fault(struct hmm_mirror *mirror, - struct hmm_range *range, bool block) -{ - long ret; - - /* - * With the old API the driver must set each individual entries with - * the requested flags (valid, write, ...). So here we set the mask to - * keep intact the entries provided by the driver and zero out the - * default_flags. - */ - range->default_flags = 0; - range->pfn_flags_mask = -1UL; - - ret = hmm_range_register(range, mirror, - range->start, range->end, - PAGE_SHIFT); - if (ret) - return (int)ret; - - if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { - /* - * The mmap_sem was taken by driver we release it here and - * returns -EAGAIN which correspond to mmap_sem have been - * drop in the old API. - */ - up_read(&range->vma->vm_mm->mmap_sem); - return -EAGAIN; - } - - ret = hmm_range_fault(range, block); - if (ret <= 0) { - if (ret == -EBUSY || !ret) { - /* Same as above, drop mmap_sem to match old API. */ - up_read(&range->vma->vm_mm->mmap_sem); - ret = -EBUSY; - } else if (ret == -EAGAIN) - ret = -EBUSY; - hmm_range_unregister(range); - return ret; - } - return 0; -} - /* Below are for HMM internal use only! Not to be used by device driver! */ static inline void hmm_mm_init(struct mm_struct *mm) { diff --git a/include/linux/iova.h b/include/linux/iova.h index 781b96ac706f..a0637abffee8 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -155,6 +155,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); +bool has_iova_flush_queue(struct iova_domain *iovad); int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); @@ -235,6 +236,11 @@ static inline void init_iova_domain(struct iova_domain *iovad, { } +static inline bool has_iova_flush_queue(struct iova_domain *iovad) +{ + return false; +} + static inline int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index b2c1648f7e5d..5714fd35a83c 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -814,6 +814,7 @@ struct tee_client_device_id { /** * struct wmi_device_id - WMI device identifier * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba + * @context: pointer to driver specific data */ struct wmi_device_id { const char guid_string[UUID_STRING_LEN+1]; diff --git a/include/linux/netfilter/nf_conntrack_h323_asn1.h b/include/linux/netfilter/nf_conntrack_h323_asn1.h index 91d6275292a5..19df78341fb3 100644 --- a/include/linux/netfilter/nf_conntrack_h323_asn1.h +++ b/include/linux/netfilter/nf_conntrack_h323_asn1.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /**************************************************************************** - * ip_conntrack_h323_asn1.h - BER and PER decoding library for H.323 - * conntrack/NAT module. + * BER and PER decoding library for H.323 conntrack/NAT module. * * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net> * diff --git a/include/linux/of.h b/include/linux/of.h index 0cf857012f11..844f89e1b039 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -1164,7 +1164,7 @@ static inline int of_property_read_string_index(const struct device_node *np, } /** - * of_property_read_bool - Findfrom a property + * of_property_read_bool - Find a property * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index 1dda31825ec4..71283739ffd2 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -32,6 +32,7 @@ #endif /* CONFIG_SPARSEMEM */ +#ifndef BUILD_VDSO32_64 /* * page->flags layout: * @@ -76,20 +77,22 @@ #define LAST_CPUPID_SHIFT 0 #endif -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS +#ifdef CONFIG_KASAN_SW_TAGS +#define KASAN_TAG_WIDTH 8 +#else +#define KASAN_TAG_WIDTH 0 +#endif + +#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \ + <= BITS_PER_LONG - NR_PAGEFLAGS #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT #else #define LAST_CPUPID_WIDTH 0 #endif -#ifdef CONFIG_KASAN_SW_TAGS -#define KASAN_TAG_WIDTH 8 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \ > BITS_PER_LONG - NR_PAGEFLAGS -#error "KASAN: not enough bits in page flags for tag" -#endif -#else -#define KASAN_TAG_WIDTH 0 +#error "Not enough bits in page flags" #endif /* @@ -104,4 +107,5 @@ #define LAST_CPUPID_NOT_IN_PAGE_FLAGS #endif +#endif #endif /* _LINUX_PAGE_FLAGS_LAYOUT */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index b848517da64c..f91cb8898ff0 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -152,6 +152,8 @@ enum pageflags { PG_savepinned = PG_dirty, /* Has a grant mapping of another (foreign) domain's page. */ PG_foreign = PG_owner_priv_1, + /* Remapped by swiotlb-xen. */ + PG_xen_remapped = PG_owner_priv_1, /* SLOB */ PG_slob_free = PG_private, @@ -329,6 +331,8 @@ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); +PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) + TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) diff --git a/include/linux/sched.h b/include/linux/sched.h index 8dc1811487f5..9f51932bd543 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1092,7 +1092,15 @@ struct task_struct { u64 last_sum_exec_runtime; struct callback_head numa_work; - struct numa_group *numa_group; + /* + * This pointer is only modified for current in syscall and + * pagefault context (and for tasks being destroyed), so it can be read + * from any of the following contexts: + * - RCU read-side critical section + * - current->numa_group from everywhere + * - task's runqueue locked, task not running + */ + struct numa_group __rcu *numa_group; /* * numa_faults is an array split into four regions: diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h index e7dd04a84ba8..3988762efe15 100644 --- a/include/linux/sched/numa_balancing.h +++ b/include/linux/sched/numa_balancing.h @@ -19,7 +19,7 @@ extern void task_numa_fault(int last_node, int node, int pages, int flags); extern pid_t task_numa_group_id(struct task_struct *p); extern void set_numabalancing_state(bool enabled); -extern void task_numa_free(struct task_struct *p); +extern void task_numa_free(struct task_struct *p, bool final); extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, int src_nid, int dst_cpu); #else @@ -34,7 +34,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p) static inline void set_numabalancing_state(bool enabled) { } -static inline void task_numa_free(struct task_struct *p) +static inline void task_numa_free(struct task_struct *p, bool final) { } static inline bool should_numa_migrate_memory(struct task_struct *p, diff --git a/include/linux/wait.h b/include/linux/wait.h index b6f77cf60dd7..30c515520fb2 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -127,6 +127,19 @@ static inline int waitqueue_active(struct wait_queue_head *wq_head) } /** + * wq_has_single_sleeper - check if there is only one sleeper + * @wq_head: wait queue head + * + * Returns true of wq_head has only one sleeper on the list. + * + * Please refer to the comment for waitqueue_active. + */ +static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head) +{ + return list_is_singular(&wq_head->head); +} + +/** * wq_has_sleeper - check if there are any waiting processes * @wq_head: wait queue head * diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 88c27153a4bc..45850a8391d9 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4170,7 +4170,7 @@ struct sta_opmode_info { u8 rx_nss; }; -#define VENDOR_CMD_RAW_DATA ((const struct nla_policy *)ERR_PTR(-ENODATA)) +#define VENDOR_CMD_RAW_DATA ((const struct nla_policy *)(long)(-ENODATA)) /** * struct wiphy_vendor_command - vendor command definition diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index db337299e81e..b16d21636d69 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -2,8 +2,8 @@ #define _NET_FLOW_OFFLOAD_H #include <linux/kernel.h> +#include <linux/list.h> #include <net/flow_dissector.h> -#include <net/sch_generic.h> struct flow_match { struct flow_dissector *dissector; @@ -249,6 +249,10 @@ enum flow_block_binder_type { FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, }; +struct flow_block { + struct list_head cb_list; +}; + struct netlink_ext_ack; struct flow_block_offload { @@ -256,29 +260,33 @@ struct flow_block_offload { enum flow_block_binder_type binder_type; bool block_shared; struct net *net; + struct flow_block *block; struct list_head cb_list; struct list_head *driver_block_list; struct netlink_ext_ack *extack; }; +enum tc_setup_type; +typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data, + void *cb_priv); + struct flow_block_cb { struct list_head driver_list; struct list_head list; - struct net *net; - tc_setup_cb_t *cb; + flow_setup_cb_t *cb; void *cb_ident; void *cb_priv; void (*release)(void *cb_priv); unsigned int refcnt; }; -struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb, +struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, void (*release)(void *cb_priv)); void flow_block_cb_free(struct flow_block_cb *block_cb); -struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *offload, - tc_setup_cb_t *cb, void *cb_ident); +struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, + flow_setup_cb_t *cb, void *cb_ident); void *flow_block_cb_priv(struct flow_block_cb *block_cb); void flow_block_cb_incref(struct flow_block_cb *block_cb); @@ -296,11 +304,12 @@ static inline void flow_block_cb_remove(struct flow_block_cb *block_cb, list_move(&block_cb->list, &offload->cb_list); } -bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident, +bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, struct list_head *driver_block_list); int flow_block_cb_setup_simple(struct flow_block_offload *f, - struct list_head *driver_list, tc_setup_cb_t *cb, + struct list_head *driver_list, + flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, bool ingress_only); enum flow_cls_command { @@ -333,4 +342,9 @@ flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd) return flow_cmd->rule; } +static inline void flow_block_init(struct flow_block *flow_block) +{ + INIT_LIST_HEAD(&flow_block->cb_list); +} + #endif /* _NET_FLOW_OFFLOAD_H */ diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index 93ce6b0daaba..573429be4d59 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h @@ -76,6 +76,11 @@ struct nf_conntrack_expect_policy { #define NF_CT_EXPECT_CLASS_DEFAULT 0 #define NF_CT_EXPECT_MAX_CNT 255 +/* Allow to reuse expectations with the same tuples from different master + * conntracks. + */ +#define NF_CT_EXP_F_SKIP_MASTER 0x1 + int nf_conntrack_expect_pernet_init(struct net *net); void nf_conntrack_expect_pernet_fini(struct net *net); @@ -122,10 +127,11 @@ void nf_ct_expect_init(struct nf_conntrack_expect *, unsigned int, u_int8_t, u_int8_t, const __be16 *, const __be16 *); void nf_ct_expect_put(struct nf_conntrack_expect *exp); int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, - u32 portid, int report); -static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect) + u32 portid, int report, unsigned int flags); +static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect, + unsigned int flags) { - return nf_ct_expect_related_report(expect, 0, 0); + return nf_ct_expect_related_report(expect, 0, 0, flags); } #endif /*_NF_CONNTRACK_EXPECT_H*/ diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h index 8f00125b06f4..44513b93bd55 100644 --- a/include/net/netfilter/nf_conntrack_synproxy.h +++ b/include/net/netfilter/nf_conntrack_synproxy.h @@ -68,6 +68,7 @@ struct synproxy_options { u8 options; u8 wscale; u16 mss; + u16 mss_encode; u32 tsval; u32 tsecr; }; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 35dfdd9f69b3..9b624566b82d 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -11,6 +11,7 @@ #include <linux/rhashtable.h> #include <net/netfilter/nf_flow_table.h> #include <net/netlink.h> +#include <net/flow_offload.h> struct module; @@ -951,7 +952,7 @@ struct nft_stats { * @stats: per-cpu chain stats * @chain: the chain * @dev_name: device name that this base chain is attached to (if any) - * @cb_list: list of flow block callbacks (for hardware offload) + * @flow_block: flow block (for hardware offload) */ struct nft_base_chain { struct nf_hook_ops ops; @@ -961,7 +962,7 @@ struct nft_base_chain { struct nft_stats __percpu *stats; struct nft_chain chain; char dev_name[IFNAMSIZ]; - struct list_head cb_list; + struct flow_block flow_block; }; static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 841faadceb6e..e429809ca90d 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -6,7 +6,6 @@ #include <linux/workqueue.h> #include <net/sch_generic.h> #include <net/act_api.h> -#include <net/flow_offload.h> #include <net/net_namespace.h> /* TC action not accessible from user space */ @@ -126,14 +125,14 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block) } static inline -int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb, +int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb, void *cb_priv) { return 0; } static inline -void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb, +void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb, void *cb_priv) { } diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 855167bbc372..6b6b01234dd9 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -15,6 +15,7 @@ #include <linux/mutex.h> #include <net/gen_stats.h> #include <net/rtnetlink.h> +#include <net/flow_offload.h> struct Qdisc_ops; struct qdisc_walker; @@ -22,9 +23,6 @@ struct tcf_walker; struct module; struct bpf_flow_keys; -typedef int tc_setup_cb_t(enum tc_setup_type type, - void *type_data, void *cb_priv); - typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, enum tc_setup_type type, void *type_data); @@ -313,7 +311,7 @@ struct tcf_proto_ops { void (*walk)(struct tcf_proto *tp, struct tcf_walker *arg, bool rtnl_held); int (*reoffload)(struct tcf_proto *tp, bool add, - tc_setup_cb_t *cb, void *cb_priv, + flow_setup_cb_t *cb, void *cb_priv, struct netlink_ext_ack *extack); void (*bind_class)(void *, u32, unsigned long); void * (*tmplt_create)(struct net *net, @@ -401,7 +399,7 @@ struct tcf_block { refcount_t refcnt; struct net *net; struct Qdisc *q; - struct list_head cb_list; + struct flow_block flow_block; struct list_head owner_list; bool keep_dst; unsigned int offloadcnt; /* Number of oddloaded filters */ diff --git a/include/net/tcp.h b/include/net/tcp.h index f42d300f0cfa..e5cf514ba118 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1709,6 +1709,11 @@ static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) return skb_rb_first(&sk->tcp_rtx_queue); } +static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) +{ + return skb_rb_last(&sk->tcp_rtx_queue); +} + static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) { return skb_peek(&sk->sk_write_queue); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index c5f8a9f17063..4f225175cb91 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2647,7 +2647,9 @@ struct ib_client { const union ib_gid *gid, const struct sockaddr *addr, void *client_data); - struct list_head list; + + refcount_t uses; + struct completion uses_zero; u32 client_id; /* kverbs are not required by the client */ diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 0eeea520a853..e06c77d76463 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -608,7 +608,7 @@ static inline void rvt_qp_wqe_reserve( /** * rvt_qp_wqe_unreserve - clean reserved operation * @qp - the rvt qp - * @wqe - the send wqe + * @flags - send wqe flags * * This decrements the reserve use count. * @@ -620,11 +620,9 @@ static inline void rvt_qp_wqe_reserve( * the compiler does not juggle the order of the s_last * ring index and the decrementing of s_reserved_used. */ -static inline void rvt_qp_wqe_unreserve( - struct rvt_qp *qp, - struct rvt_swqe *wqe) +static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags) { - if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) { + if (unlikely(flags & RVT_SEND_RESERVE_USED)) { atomic_dec(&qp->s_reserved_used); /* insure no compiler re-order up to s_last change */ smp_mb__after_atomic(); @@ -853,6 +851,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp, u32 byte_len, last; int flags = wqe->wr.send_flags; + rvt_qp_wqe_unreserve(qp, flags); rvt_put_qp_swqe(qp, wqe); need_completion = diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 2d64b53f947c..9b87e1a1c646 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -115,7 +115,7 @@ struct fc_disc_port { struct fc_lport *lp; struct list_head peers; struct work_struct rport_work; - u32 port_id; + u32 port_id; }; /** @@ -155,14 +155,14 @@ struct fc_rport_operations { */ struct fc_rport_libfc_priv { struct fc_lport *local_port; - enum fc_rport_state rp_state; + enum fc_rport_state rp_state; u16 flags; #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0) #define FC_RP_FLAGS_RETRY (1 << 1) #define FC_RP_STARTED (1 << 2) #define FC_RP_FLAGS_CONF_REQ (1 << 3) - unsigned int e_d_tov; - unsigned int r_a_tov; + unsigned int e_d_tov; + unsigned int r_a_tov; }; /** @@ -191,24 +191,24 @@ struct fc_rport_priv { struct fc_lport *local_port; struct fc_rport *rport; struct kref kref; - enum fc_rport_state rp_state; + enum fc_rport_state rp_state; struct fc_rport_identifiers ids; u16 flags; - u16 max_seq; + u16 max_seq; u16 disc_id; u16 maxframe_size; - unsigned int retries; - unsigned int major_retries; - unsigned int e_d_tov; - unsigned int r_a_tov; - struct mutex rp_mutex; + unsigned int retries; + unsigned int major_retries; + unsigned int e_d_tov; + unsigned int r_a_tov; + struct mutex rp_mutex; struct delayed_work retry_work; - enum fc_rport_event event; + enum fc_rport_event event; struct fc_rport_operations *ops; - struct list_head peers; - struct work_struct event_work; + struct list_head peers; + struct work_struct event_work; u32 supported_classes; - u16 prli_count; + u16 prli_count; struct rcu_head rcu; u16 sp_features; u8 spp_type; @@ -618,12 +618,12 @@ struct libfc_function_template { * @disc_callback: Callback routine called when discovery completes */ struct fc_disc { - unsigned char retry_count; - unsigned char pending; - unsigned char requested; - unsigned short seq_count; - unsigned char buf_len; - u16 disc_id; + unsigned char retry_count; + unsigned char pending; + unsigned char requested; + unsigned short seq_count; + unsigned char buf_len; + u16 disc_id; struct list_head rports; void *priv; @@ -697,7 +697,7 @@ struct fc_lport { struct fc_rport_priv *ms_rdata; struct fc_rport_priv *ptp_rdata; void *scsi_priv; - struct fc_disc disc; + struct fc_disc disc; /* Virtual port information */ struct list_head vports; @@ -715,7 +715,7 @@ struct fc_lport { u8 retry_count; /* Fabric information */ - u32 port_id; + u32 port_id; u64 wwpn; u64 wwnn; unsigned int service_params; @@ -743,11 +743,11 @@ struct fc_lport { struct fc_ns_fts fcts; /* Miscellaneous */ - struct mutex lp_mutex; - struct list_head list; + struct mutex lp_mutex; + struct list_head list; struct delayed_work retry_work; void *prov[FC_FC4_PROV_SIZE]; - struct list_head lport_list; + struct list_head lport_list; }; /** diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index c50fb297e265..2568cb0627ec 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -31,7 +31,7 @@ * FIP tunable parameters. */ #define FCOE_CTLR_START_DELAY 2000 /* mS after first adv. to choose FCF */ -#define FCOE_CTRL_SOL_TOV 2000 /* min. solicitation interval (mS) */ +#define FCOE_CTLR_SOL_TOV 2000 /* min. solicitation interval (mS) */ #define FCOE_CTLR_FCF_LIMIT 20 /* max. number of FCF entries */ #define FCOE_CTLR_VN2VN_LOGIN_LIMIT 3 /* max. VN2VN rport login retries */ @@ -229,6 +229,7 @@ struct fcoe_fcf { * @vn_mac: VN_Node assigned MAC address for data */ struct fcoe_rport { + struct fc_rport_priv rdata; unsigned long time; u16 fcoe_len; u16 flags; diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h index 3f9d6b6a5691..c1036d16ed03 100644 --- a/include/soc/fsl/qe/qe.h +++ b/include/soc/fsl/qe/qe.h @@ -259,7 +259,7 @@ static inline int qe_alive_during_sleep(void) /* Structure that defines QE firmware binary files. * - * See Documentation/powerpc/qe_firmware.txt for a description of these + * See Documentation/powerpc/qe_firmware.rst for a description of these * fields. */ struct qe_firmware { diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index c5188ff724d1..bc88d6f964da 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h @@ -173,10 +173,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream) if (snd_BUG_ON(!stream)) return; - if (stream->direction == SND_COMPRESS_PLAYBACK) - stream->runtime->state = SNDRV_PCM_STATE_SETUP; - else - stream->runtime->state = SNDRV_PCM_STATE_PREPARED; + stream->runtime->state = SNDRV_PCM_STATE_SETUP; wake_up(&stream->runtime->sleep); } diff --git a/include/sound/sof/control.h b/include/sound/sof/control.h index bded69e696d4..6080ea0facd7 100644 --- a/include/sound/sof/control.h +++ b/include/sound/sof/control.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h index 4bb8ee138ba7..65e4c20e567c 100644 --- a/include/sound/sof/dai-intel.h +++ b/include/sound/sof/dai-intel.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h index 3d174e20aa53..5b8de1b1983c 100644 --- a/include/sound/sof/dai.h +++ b/include/sound/sof/dai.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h index 12867bbd4372..10f00c08dbb7 100644 --- a/include/sound/sof/header.h +++ b/include/sound/sof/header.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h index 16528d2b4a50..a9156b4a062c 100644 --- a/include/sound/sof/info.h +++ b/include/sound/sof/info.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/pm.h b/include/sound/sof/pm.h index 8ae3ad45bdf7..003879401d63 100644 --- a/include/sound/sof/pm.h +++ b/include/sound/sof/pm.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h index 643f175cb479..0b71b381b952 100644 --- a/include/sound/sof/stream.h +++ b/include/sound/sof/stream.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h index 41dcabf89899..c47b36240920 100644 --- a/include/sound/sof/topology.h +++ b/include/sound/sof/topology.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/trace.h b/include/sound/sof/trace.h index 9257d5473d97..fda6e8f6ead4 100644 --- a/include/sound/sof/trace.h +++ b/include/sound/sof/trace.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/xtensa.h b/include/sound/sof/xtensa.h index d25c764b10e8..dd53d36b34e1 100644 --- a/include/sound/sof/xtensa.h +++ b/include/sound/sof/xtensa.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h index 2212adda8f77..64e92d56c6a8 100644 --- a/include/trace/events/dma_fence.h +++ b/include/trace/events/dma_fence.h @@ -2,7 +2,7 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM dma_fence -#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_TRACE_DMA_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_DMA_FENCE_H #include <linux/tracepoint.h> diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h index f3a12566bed0..6678cf8b235b 100644 --- a/include/trace/events/napi.h +++ b/include/trace/events/napi.h @@ -3,7 +3,7 @@ #define TRACE_SYSTEM napi #if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_NAPI_H_ +#define _TRACE_NAPI_H #include <linux/netdevice.h> #include <linux/tracepoint.h> @@ -38,7 +38,7 @@ TRACE_EVENT(napi_poll, #undef NO_DEV -#endif /* _TRACE_NAPI_H_ */ +#endif /* _TRACE_NAPI_H */ /* This part must be outside protection */ #include <trace/define_trace.h> diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h index 60d0d8bd336d..0d1a9ebf55ba 100644 --- a/include/trace/events/qdisc.h +++ b/include/trace/events/qdisc.h @@ -2,7 +2,7 @@ #define TRACE_SYSTEM qdisc #if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_QDISC_H_ +#define _TRACE_QDISC_H #include <linux/skbuff.h> #include <linux/netdevice.h> @@ -44,7 +44,7 @@ TRACE_EVENT(qdisc_dequeue, __entry->txq_state, __entry->packets, __entry->skbaddr ) ); -#endif /* _TRACE_QDISC_H_ */ +#endif /* _TRACE_QDISC_H */ /* This part must be outside protection */ #include <trace/define_trace.h> diff --git a/include/trace/events/tegra_apb_dma.h b/include/trace/events/tegra_apb_dma.h index 0818f6286110..971cd02d2daf 100644 --- a/include/trace/events/tegra_apb_dma.h +++ b/include/trace/events/tegra_apb_dma.h @@ -1,5 +1,5 @@ #if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_TEGRA_APM_DMA_H +#define _TRACE_TEGRA_APB_DMA_H #include <linux/tracepoint.h> #include <linux/dmaengine.h> @@ -55,7 +55,7 @@ TRACE_EVENT(tegra_dma_isr, TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq) ); -#endif /* _TRACE_TEGRADMA_H */ +#endif /* _TRACE_TEGRA_APB_DMA_H */ /* This part must be outside protection */ #include <trace/define_trace.h> diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 328d05e77d9f..469dc512cca3 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -521,6 +521,7 @@ typedef struct drm_i915_irq_wait { #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3) +#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4) #define I915_PARAM_HUC_STATUS 42 diff --git a/include/uapi/linux/bpfilter.h b/include/uapi/linux/bpfilter.h index 2ec3cc99ea4c..cbc1f5813f50 100644 --- a/include/uapi/linux/bpfilter.h +++ b/include/uapi/linux/bpfilter.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_LINUX_BPFILTER_H #define _UAPI_LINUX_BPFILTER_H diff --git a/include/uapi/linux/ipmi_bmc.h b/include/uapi/linux/ipmi_bmc.h index 1670f0944227..782a03eb1086 100644 --- a/include/uapi/linux/ipmi_bmc.h +++ b/include/uapi/linux/ipmi_bmc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (c) 2015-2018, Intel Corporation. */ diff --git a/include/uapi/linux/isst_if.h b/include/uapi/linux/isst_if.h index d10b832c58c5..0a52b7b093d3 100644 --- a/include/uapi/linux/isst_if.h +++ b/include/uapi/linux/isst_if.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Intel Speed Select Interface: OS to hardware Interface * Copyright (c) 2019, Intel Corporation. diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a7c19540ce21..5e3f12d5359e 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -116,7 +116,7 @@ struct kvm_irq_level { * ACPI gsi notion of irq. * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47.. * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23.. - * For ARM: See Documentation/virtual/kvm/api.txt + * For ARM: See Documentation/virt/kvm/api.txt */ union { __u32 irq; @@ -1086,7 +1086,7 @@ struct kvm_xen_hvm_config { * * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies * the irqfd to operate in resampling mode for level triggered interrupt - * emulation. See Documentation/virtual/kvm/api.txt. + * emulation. See Documentation/virt/kvm/api.txt. */ #define KVM_IRQFD_FLAG_RESAMPLE (1 << 1) diff --git a/include/uapi/linux/netfilter/nf_synproxy.h b/include/uapi/linux/netfilter/nf_synproxy.h index 6f3791c8946f..00d787f0260e 100644 --- a/include/uapi/linux/netfilter/nf_synproxy.h +++ b/include/uapi/linux/netfilter/nf_synproxy.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _NF_SYNPROXY_H #define _NF_SYNPROXY_H diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 75758ec26c8b..beb9a9d0c00a 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2863,7 +2863,7 @@ enum nl80211_attrs { #define NL80211_HT_CAPABILITY_LEN 26 #define NL80211_VHT_CAPABILITY_LEN 12 #define NL80211_HE_MIN_CAPABILITY_LEN 16 -#define NL80211_HE_MAX_CAPABILITY_LEN 51 +#define NL80211_HE_MAX_CAPABILITY_LEN 54 #define NL80211_MAX_NR_CIPHER_SUITES 5 #define NL80211_MAX_NR_AKM_SUITES 2 diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 8654b2442f6a..592a0c1b77c9 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Userspace interface for AMD Secure Encrypted Virtualization (SEV) * platform management commands. diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h index 782069dcf607..4accfa7e266d 100644 --- a/include/uapi/linux/rxrpc.h +++ b/include/uapi/linux/rxrpc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */ /* Types and definitions for AF_RXRPC. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 5642c05e0da0..3cc3af1c2ee1 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -150,9 +150,6 @@ #define PORT_PNX8XXX 70 -/* Hilscher netx */ -#define PORT_NETX 71 - /* SUN4V Hypervisor Console */ #define PORT_SUNHV 72 diff --git a/include/uapi/linux/usb/g_uvc.h b/include/uapi/linux/usb/g_uvc.h index 3c9ee3020cbb..652f169a019e 100644 --- a/include/uapi/linux/usb/g_uvc.h +++ b/include/uapi/linux/usb/g_uvc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ /* * g_uvc.h -- USB Video Class Gadget driver API * diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h index 26f39816af14..c27289fd619a 100644 --- a/include/uapi/linux/vbox_vmmdev_types.h +++ b/include/uapi/linux/vbox_vmmdev_types.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR CDDL-1.0) */ /* * Virtual Device for Guest <-> VMM/Host communication, type definitions * which are also used for the vboxguest ioctl interface / by vboxsf diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h index 612f0c7d3558..9cec58a6a5ea 100644 --- a/include/uapi/linux/vboxguest.h +++ b/include/uapi/linux/vboxguest.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR CDDL-1.0) */ /* * VBoxGuest - VirtualBox Guest Additions Driver Interface. * diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 9d9705ceda76..2427bc4d8eba 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -518,7 +518,13 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_RGBX444 v4l2_fourcc('R', 'X', '1', '2') /* 16 rrrrgggg bbbbxxxx */ #define V4L2_PIX_FMT_ABGR444 v4l2_fourcc('A', 'B', '1', '2') /* 16 aaaabbbb ggggrrrr */ #define V4L2_PIX_FMT_XBGR444 v4l2_fourcc('X', 'B', '1', '2') /* 16 xxxxbbbb ggggrrrr */ -#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('B', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */ + +/* + * Originally this had 'BA12' as fourcc, but this clashed with the older + * V4L2_PIX_FMT_SGRBG12 which inexplicably used that same fourcc. + * So use 'GA12' instead for V4L2_PIX_FMT_BGRA444. + */ +#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('G', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */ #define V4L2_PIX_FMT_BGRX444 v4l2_fourcc('B', 'X', '1', '2') /* 16 bbbbgggg rrrrxxxx */ #define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */ #define V4L2_PIX_FMT_ARGB555 v4l2_fourcc('A', 'R', '1', '5') /* 16 ARGB-1-5-5-5 */ diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h index ba1b460c9944..237e36a280cb 100644 --- a/include/uapi/linux/virtio_iommu.h +++ b/include/uapi/linux/virtio_iommu.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* - * Virtio-iommu definition v0.9 + * Virtio-iommu definition v0.12 * - * Copyright (C) 2018 Arm Ltd. + * Copyright (C) 2019 Arm Ltd. */ #ifndef _UAPI_LINUX_VIRTIO_IOMMU_H #define _UAPI_LINUX_VIRTIO_IOMMU_H @@ -11,26 +11,31 @@ /* Feature bits */ #define VIRTIO_IOMMU_F_INPUT_RANGE 0 -#define VIRTIO_IOMMU_F_DOMAIN_BITS 1 +#define VIRTIO_IOMMU_F_DOMAIN_RANGE 1 #define VIRTIO_IOMMU_F_MAP_UNMAP 2 #define VIRTIO_IOMMU_F_BYPASS 3 #define VIRTIO_IOMMU_F_PROBE 4 +#define VIRTIO_IOMMU_F_MMIO 5 -struct virtio_iommu_range { - __u64 start; - __u64 end; +struct virtio_iommu_range_64 { + __le64 start; + __le64 end; +}; + +struct virtio_iommu_range_32 { + __le32 start; + __le32 end; }; struct virtio_iommu_config { /* Supported page sizes */ - __u64 page_size_mask; + __le64 page_size_mask; /* Supported IOVA range */ - struct virtio_iommu_range input_range; + struct virtio_iommu_range_64 input_range; /* Max domain ID size */ - __u8 domain_bits; - __u8 padding[3]; + struct virtio_iommu_range_32 domain_range; /* Probe buffer size */ - __u32 probe_size; + __le32 probe_size; }; /* Request types */ @@ -49,6 +54,7 @@ struct virtio_iommu_config { #define VIRTIO_IOMMU_S_RANGE 0x05 #define VIRTIO_IOMMU_S_NOENT 0x06 #define VIRTIO_IOMMU_S_FAULT 0x07 +#define VIRTIO_IOMMU_S_NOMEM 0x08 struct virtio_iommu_req_head { __u8 type; @@ -78,12 +84,10 @@ struct virtio_iommu_req_detach { #define VIRTIO_IOMMU_MAP_F_READ (1 << 0) #define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1) -#define VIRTIO_IOMMU_MAP_F_EXEC (1 << 2) -#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 3) +#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 2) #define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \ VIRTIO_IOMMU_MAP_F_WRITE | \ - VIRTIO_IOMMU_MAP_F_EXEC | \ VIRTIO_IOMMU_MAP_F_MMIO) struct virtio_iommu_req_map { diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h index 9a63ed6d062f..b022787ffb94 100644 --- a/include/uapi/linux/virtio_pmem.h +++ b/include/uapi/linux/virtio_pmem.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */ /* * Definitions for virtio-pmem devices. * diff --git a/include/uapi/linux/vmcore.h b/include/uapi/linux/vmcore.h index 022619668e0e..3e9da91866ff 100644 --- a/include/uapi/linux/vmcore.h +++ b/include/uapi/linux/vmcore.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_VMCORE_H #define _UAPI_VMCORE_H diff --git a/include/uapi/linux/wmi.h b/include/uapi/linux/wmi.h index c36f2d7675a4..7085c5dca9fa 100644 --- a/include/uapi/linux/wmi.h +++ b/include/uapi/linux/wmi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * User API methods for ACPI-WMI mapping driver * diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h index 6d701af9fc42..fb792e882cef 100644 --- a/include/uapi/misc/fastrpc.h +++ b/include/uapi/misc/fastrpc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef __QCOM_FASTRPC_H__ #define __QCOM_FASTRPC_H__ diff --git a/include/uapi/rdma/rvt-abi.h b/include/uapi/rdma/rvt-abi.h index 7328293c715c..7c05a02d2be5 100644 --- a/include/uapi/rdma/rvt-abi.h +++ b/include/uapi/rdma/rvt-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ /* * This file contains defines, structures, etc. that are used diff --git a/include/uapi/rdma/siw-abi.h b/include/uapi/rdma/siw-abi.h index 3dd8071ace7b..7de68f1dc707 100644 --- a/include/uapi/rdma/siw-abi.h +++ b/include/uapi/rdma/siw-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) or BSD-3-Clause */ /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ /* Copyright (c) 2008-2019, IBM Corporation */ diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h index 17c7abd0803a..9988db6ad244 100644 --- a/include/uapi/scsi/scsi_bsg_ufs.h +++ b/include/uapi/scsi/scsi_bsg_ufs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * UFS Transport SGIO v4 BSG Message Support * diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h index f39352cef382..9eee32f5e407 100644 --- a/include/uapi/sound/skl-tplg-interface.h +++ b/include/uapi/sound/skl-tplg-interface.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * skl-tplg-interface.h - Intel DSP FW private data interface * diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 4969817124a8..98b30c1613b2 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -109,6 +109,9 @@ static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, } #endif +int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, + unsigned long len); + /* * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn * @vma: VMA to map the pages into diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index fc020c09b7e8..deff97217496 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -35,10 +35,10 @@ config PREEMPT_VOLUNTARY Select this if you are building a kernel for a desktop system. -config PREEMPT_LL +config PREEMPT bool "Preemptible Kernel (Low-Latency Desktop)" depends on !ARCH_NO_PREEMPT - select PREEMPT + select PREEMPTION select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK help This option reduces the latency of the kernel by making @@ -58,7 +58,7 @@ config PREEMPT_LL config PREEMPT_RT bool "Fully Preemptible Kernel (Real-Time)" depends on EXPERT && ARCH_SUPPORTS_RT - select PREEMPT + select PREEMPTION help This option turns the kernel into a real-time kernel by replacing various locking primitives (spinlocks, rwlocks, etc.) with @@ -77,6 +77,6 @@ endchoice config PREEMPT_COUNT bool -config PREEMPT +config PREEMPTION bool select PREEMPT_COUNT diff --git a/kernel/Makefile b/kernel/Makefile index a8d923b5481b..ef0d95a190b4 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -111,7 +111,6 @@ obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o obj-$(CONFIG_TORTURE_TEST) += torture.o obj-$(CONFIG_HAS_IOMEM) += iomem.o -obj-$(CONFIG_ZONE_DEVICE) += memremap.o obj-$(CONFIG_RSEQ) += rseq.o obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o diff --git a/kernel/cred.c b/kernel/cred.c index f9a0ce66c9c3..c0a4c12d38b2 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -144,7 +144,10 @@ void __put_cred(struct cred *cred) BUG_ON(cred == current->cred); BUG_ON(cred == current->real_cred); - call_rcu(&cred->rcu, put_cred_rcu); + if (cred->non_rcu) + put_cred_rcu(&cred->rcu); + else + call_rcu(&cred->rcu, put_cred_rcu); } EXPORT_SYMBOL(__put_cred); @@ -261,6 +264,7 @@ struct cred *prepare_creds(void) old = task->cred; memcpy(new, old, sizeof(struct cred)); + new->non_rcu = 0; atomic_set(&new->usage, 1); set_cred_subscribers(new, 0); get_group_info(new->group_info); @@ -544,7 +548,19 @@ const struct cred *override_creds(const struct cred *new) validate_creds(old); validate_creds(new); - get_cred(new); + + /* + * NOTE! This uses 'get_new_cred()' rather than 'get_cred()'. + * + * That means that we do not clear the 'non_rcu' flag, since + * we are only installing the cred into the thread-synchronous + * '->cred' pointer, not the '->real_cred' pointer that is + * visible to other threads under RCU. + * + * Also note that we did validate_creds() manually, not depending + * on the validation in 'get_cred()'. + */ + get_new_cred((struct cred *)new); alter_cred_subscribers(new, 1); rcu_assign_pointer(current->cred, new); alter_cred_subscribers(old, -1); @@ -681,6 +697,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) validate_creds(old); *new = *old; + new->non_rcu = 0; atomic_set(&new->usage, 1); set_cred_subscribers(new, 0); get_uid(new->user); diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index bfc0c17f2a3d..2bd410f934b3 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -243,8 +243,9 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) /* CMA can be used only in the context which permits sleeping */ if (cma && gfpflags_allow_blocking(gfp)) { - align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT); - page = cma_alloc(cma, count, align, gfp & __GFP_NOWARN); + size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT); + + page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN); } /* Fallback allocation of normal pages */ @@ -266,7 +267,8 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) */ void dma_free_contiguous(struct device *dev, struct page *page, size_t size) { - if (!cma_release(dev_get_cma_area(dev), page, size >> PAGE_SHIFT)) + if (!cma_release(dev_get_cma_area(dev), page, + PAGE_ALIGN(size) >> PAGE_SHIFT)) __free_pages(page, get_order(size)); } diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 1f628e7ac709..b945239621d8 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -116,11 +116,16 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, int ret; if (!dev_is_dma_coherent(dev)) { + unsigned long pfn; + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) return -ENXIO; - page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr, - dma_addr)); + /* If the PFN is not valid, we do not have a struct page */ + pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); + if (!pfn_valid(pfn)) + return -ENXIO; + page = pfn_to_page(pfn); } else { page = virt_to_page(cpu_addr); } @@ -170,7 +175,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, if (!dev_is_dma_coherent(dev)) { if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) return -ENXIO; + + /* If the PFN is not valid, we do not have a struct page */ pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); + if (!pfn_valid(pfn)) + return -ENXIO; } else { pfn = page_to_pfn(virt_to_page(cpu_addr)); } diff --git a/kernel/events/core.c b/kernel/events/core.c index 026a14541a38..0463c1151bae 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -11274,7 +11274,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, goto err_unlock; } - perf_install_in_context(ctx, event, cpu); + perf_install_in_context(ctx, event, event->cpu); perf_unpin_context(ctx); mutex_unlock(&ctx->mutex); diff --git a/kernel/exit.c b/kernel/exit.c index a75b6a7f458a..5b4a5dcce8f8 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -720,6 +720,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead) if (group_dead) kill_orphaned_pgrp(tsk->group_leader, NULL); + tsk->exit_state = EXIT_ZOMBIE; if (unlikely(tsk->ptrace)) { int sig = thread_group_leader(tsk) && thread_group_empty(tsk) && @@ -733,9 +734,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead) autoreap = true; } - tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE; - if (tsk->exit_state == EXIT_DEAD) + if (autoreap) { + tsk->exit_state = EXIT_DEAD; list_add(&tsk->ptrace_entry, &dead); + } /* mt-exec, de_thread() is waiting for group leader */ if (unlikely(tsk->signal->notify_count < 0)) diff --git a/kernel/fork.c b/kernel/fork.c index d8ae0f1b4148..2852d0e76ea3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -726,7 +726,7 @@ void __put_task_struct(struct task_struct *tsk) WARN_ON(tsk == current); cgroup_free(tsk); - task_numa_free(tsk); + task_numa_free(tsk, true); security_task_free(tsk); exit_creds(tsk); delayacct_tsk_free(tsk); diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 341f52117f88..4861cf8e274b 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -448,7 +448,7 @@ static void print_lockdep_off(const char *bug_msg) unsigned long nr_stack_trace_entries; -#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) +#ifdef CONFIG_PROVE_LOCKING /* * Stack-trace: tightly packed array of stack backtrace * addresses. Protected by the graph_lock. @@ -491,7 +491,7 @@ unsigned int max_lockdep_depth; DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); #endif -#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) +#ifdef CONFIG_PROVE_LOCKING /* * Locking printouts: */ @@ -2969,7 +2969,7 @@ static void check_chain_key(struct task_struct *curr) #endif } -#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) +#ifdef CONFIG_PROVE_LOCKING static int mark_lock(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit); @@ -3608,7 +3608,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, return ret; } -#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ +#else /* CONFIG_PROVE_LOCKING */ static inline int mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) @@ -3627,7 +3627,7 @@ static inline int separate_irq_context(struct task_struct *curr, return 0; } -#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ +#endif /* CONFIG_PROVE_LOCKING */ /* * Initialize a lock instance's lock-class mapping info: @@ -4321,8 +4321,7 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie */ static void check_flags(unsigned long flags) { -#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \ - defined(CONFIG_TRACE_IRQFLAGS) +#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) if (!debug_locks) return; diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 65b6a1600c8f..bda006f8a88b 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -200,7 +200,6 @@ static void lockdep_stats_debug_show(struct seq_file *m) static int lockdep_stats_show(struct seq_file *m, void *v) { - struct lock_class *class; unsigned long nr_unused = 0, nr_uncategorized = 0, nr_irq_safe = 0, nr_irq_unsafe = 0, nr_softirq_safe = 0, nr_softirq_unsafe = 0, @@ -211,6 +210,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v) sum_forward_deps = 0; #ifdef CONFIG_PROVE_LOCKING + struct lock_class *class; + list_for_each_entry(class, &all_lock_classes, lock_entry) { if (class->usage_mask == 0) diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index edd1c082dbf5..5e069734363c 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -908,6 +908,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, might_sleep(); +#ifdef CONFIG_DEBUG_MUTEXES + DEBUG_LOCKS_WARN_ON(lock->magic != lock); +#endif + ww = container_of(lock, struct ww_mutex, base); if (use_ww_ctx && ww_ctx) { if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) @@ -1379,8 +1383,13 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, */ int __sched mutex_trylock(struct mutex *lock) { - bool locked = __mutex_trylock(lock); + bool locked; + +#ifdef CONFIG_DEBUG_MUTEXES + DEBUG_LOCKS_WARN_ON(lock->magic != lock); +#endif + locked = __mutex_trylock(lock); if (locked) mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 37524a47f002..bd0f0d05724c 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -666,7 +666,11 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem, preempt_disable(); rcu_read_lock(); owner = rwsem_owner_flags(sem, &flags); - if ((flags & nonspinnable) || (owner && !owner_on_cpu(owner))) + /* + * Don't check the read-owner as the entry may be stale. + */ + if ((flags & nonspinnable) || + (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner))) ret = false; rcu_read_unlock(); preempt_enable(); @@ -1000,6 +1004,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state) atomic_long_add(-RWSEM_READER_BIAS, &sem->count); adjustment = 0; if (rwsem_optimistic_spin(sem, false)) { + /* rwsem_optimistic_spin() implies ACQUIRE on success */ /* * Wake up other readers in the wait list if the front * waiter is a reader. @@ -1014,6 +1019,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state) } return sem; } else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) { + /* rwsem_reader_phase_trylock() implies ACQUIRE on success */ return sem; } @@ -1032,6 +1038,8 @@ queue: */ if (adjustment && !(atomic_long_read(&sem->count) & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) { + /* Provide lock ACQUIRE */ + smp_acquire__after_ctrl_dep(); raw_spin_unlock_irq(&sem->wait_lock); rwsem_set_reader_owned(sem); lockevent_inc(rwsem_rlock_fast); @@ -1065,15 +1073,18 @@ queue: wake_up_q(&wake_q); /* wait to be given the lock */ - while (true) { + for (;;) { set_current_state(state); - if (!waiter.task) + if (!smp_load_acquire(&waiter.task)) { + /* Matches rwsem_mark_wake()'s smp_store_release(). */ break; + } if (signal_pending_state(state, current)) { raw_spin_lock_irq(&sem->wait_lock); if (waiter.task) goto out_nolock; raw_spin_unlock_irq(&sem->wait_lock); + /* Ordered by sem->wait_lock against rwsem_mark_wake(). */ break; } schedule(); @@ -1083,6 +1094,7 @@ queue: __set_current_state(TASK_RUNNING); lockevent_inc(rwsem_rlock); return sem; + out_nolock: list_del(&waiter.list); if (list_empty(&sem->wait_list)) { @@ -1123,8 +1135,10 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) /* do optimistic spinning and steal lock if possible */ if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) && - rwsem_optimistic_spin(sem, true)) + rwsem_optimistic_spin(sem, true)) { + /* rwsem_optimistic_spin() implies ACQUIRE on success */ return sem; + } /* * Disable reader optimistic spinning for this rwsem after @@ -1184,9 +1198,11 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) wait: /* wait until we successfully acquire the lock */ set_current_state(state); - while (true) { - if (rwsem_try_write_lock(sem, wstate)) + for (;;) { + if (rwsem_try_write_lock(sem, wstate)) { + /* rwsem_try_write_lock() implies ACQUIRE on success */ break; + } raw_spin_unlock_irq(&sem->wait_lock); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 036be95a87e9..bc9cfeaac8bd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1086,6 +1086,21 @@ struct numa_group { unsigned long faults[0]; }; +/* + * For functions that can be called in multiple contexts that permit reading + * ->numa_group (see struct task_struct for locking rules). + */ +static struct numa_group *deref_task_numa_group(struct task_struct *p) +{ + return rcu_dereference_check(p->numa_group, p == current || + (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu))); +} + +static struct numa_group *deref_curr_numa_group(struct task_struct *p) +{ + return rcu_dereference_protected(p->numa_group, p == current); +} + static inline unsigned long group_faults_priv(struct numa_group *ng); static inline unsigned long group_faults_shared(struct numa_group *ng); @@ -1129,10 +1144,12 @@ static unsigned int task_scan_start(struct task_struct *p) { unsigned long smin = task_scan_min(p); unsigned long period = smin; + struct numa_group *ng; /* Scale the maximum scan period with the amount of shared memory. */ - if (p->numa_group) { - struct numa_group *ng = p->numa_group; + rcu_read_lock(); + ng = rcu_dereference(p->numa_group); + if (ng) { unsigned long shared = group_faults_shared(ng); unsigned long private = group_faults_priv(ng); @@ -1140,6 +1157,7 @@ static unsigned int task_scan_start(struct task_struct *p) period *= shared + 1; period /= private + shared + 1; } + rcu_read_unlock(); return max(smin, period); } @@ -1148,13 +1166,14 @@ static unsigned int task_scan_max(struct task_struct *p) { unsigned long smin = task_scan_min(p); unsigned long smax; + struct numa_group *ng; /* Watch for min being lower than max due to floor calculations */ smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); /* Scale the maximum scan period with the amount of shared memory. */ - if (p->numa_group) { - struct numa_group *ng = p->numa_group; + ng = deref_curr_numa_group(p); + if (ng) { unsigned long shared = group_faults_shared(ng); unsigned long private = group_faults_priv(ng); unsigned long period = smax; @@ -1186,7 +1205,7 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) p->numa_scan_period = sysctl_numa_balancing_scan_delay; p->numa_work.next = &p->numa_work; p->numa_faults = NULL; - p->numa_group = NULL; + RCU_INIT_POINTER(p->numa_group, NULL); p->last_task_numa_placement = 0; p->last_sum_exec_runtime = 0; @@ -1233,7 +1252,16 @@ static void account_numa_dequeue(struct rq *rq, struct task_struct *p) pid_t task_numa_group_id(struct task_struct *p) { - return p->numa_group ? p->numa_group->gid : 0; + struct numa_group *ng; + pid_t gid = 0; + + rcu_read_lock(); + ng = rcu_dereference(p->numa_group); + if (ng) + gid = ng->gid; + rcu_read_unlock(); + + return gid; } /* @@ -1258,11 +1286,13 @@ static inline unsigned long task_faults(struct task_struct *p, int nid) static inline unsigned long group_faults(struct task_struct *p, int nid) { - if (!p->numa_group) + struct numa_group *ng = deref_task_numa_group(p); + + if (!ng) return 0; - return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] + - p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)]; + return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] + + ng->faults[task_faults_idx(NUMA_MEM, nid, 1)]; } static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) @@ -1400,12 +1430,13 @@ static inline unsigned long task_weight(struct task_struct *p, int nid, static inline unsigned long group_weight(struct task_struct *p, int nid, int dist) { + struct numa_group *ng = deref_task_numa_group(p); unsigned long faults, total_faults; - if (!p->numa_group) + if (!ng) return 0; - total_faults = p->numa_group->total_faults; + total_faults = ng->total_faults; if (!total_faults) return 0; @@ -1419,7 +1450,7 @@ static inline unsigned long group_weight(struct task_struct *p, int nid, bool should_numa_migrate_memory(struct task_struct *p, struct page * page, int src_nid, int dst_cpu) { - struct numa_group *ng = p->numa_group; + struct numa_group *ng = deref_curr_numa_group(p); int dst_nid = cpu_to_node(dst_cpu); int last_cpupid, this_cpupid; @@ -1600,13 +1631,14 @@ static bool load_too_imbalanced(long src_load, long dst_load, static void task_numa_compare(struct task_numa_env *env, long taskimp, long groupimp, bool maymove) { + struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); struct rq *dst_rq = cpu_rq(env->dst_cpu); + long imp = p_ng ? groupimp : taskimp; struct task_struct *cur; long src_load, dst_load; - long load; - long imp = env->p->numa_group ? groupimp : taskimp; - long moveimp = imp; int dist = env->dist; + long moveimp = imp; + long load; if (READ_ONCE(dst_rq->numa_migrate_on)) return; @@ -1645,21 +1677,22 @@ static void task_numa_compare(struct task_numa_env *env, * If dst and source tasks are in the same NUMA group, or not * in any group then look only at task weights. */ - if (cur->numa_group == env->p->numa_group) { + cur_ng = rcu_dereference(cur->numa_group); + if (cur_ng == p_ng) { imp = taskimp + task_weight(cur, env->src_nid, dist) - task_weight(cur, env->dst_nid, dist); /* * Add some hysteresis to prevent swapping the * tasks within a group over tiny differences. */ - if (cur->numa_group) + if (cur_ng) imp -= imp / 16; } else { /* * Compare the group weights. If a task is all by itself * (not part of a group), use the task weight instead. */ - if (cur->numa_group && env->p->numa_group) + if (cur_ng && p_ng) imp += group_weight(cur, env->src_nid, dist) - group_weight(cur, env->dst_nid, dist); else @@ -1757,11 +1790,12 @@ static int task_numa_migrate(struct task_struct *p) .best_imp = 0, .best_cpu = -1, }; + unsigned long taskweight, groupweight; struct sched_domain *sd; + long taskimp, groupimp; + struct numa_group *ng; struct rq *best_rq; - unsigned long taskweight, groupweight; int nid, ret, dist; - long taskimp, groupimp; /* * Pick the lowest SD_NUMA domain, as that would have the smallest @@ -1807,7 +1841,8 @@ static int task_numa_migrate(struct task_struct *p) * multiple NUMA nodes; in order to better consolidate the group, * we need to check other locations. */ - if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) { + ng = deref_curr_numa_group(p); + if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) { for_each_online_node(nid) { if (nid == env.src_nid || nid == p->numa_preferred_nid) continue; @@ -1840,7 +1875,7 @@ static int task_numa_migrate(struct task_struct *p) * A task that migrated to a second choice node will be better off * trying for a better one later. Do not set the preferred node here. */ - if (p->numa_group) { + if (ng) { if (env.best_cpu == -1) nid = env.src_nid; else @@ -2135,6 +2170,7 @@ static void task_numa_placement(struct task_struct *p) unsigned long total_faults; u64 runtime, period; spinlock_t *group_lock = NULL; + struct numa_group *ng; /* * The p->mm->numa_scan_seq field gets updated without @@ -2152,8 +2188,9 @@ static void task_numa_placement(struct task_struct *p) runtime = numa_get_avg_runtime(p, &period); /* If the task is part of a group prevent parallel updates to group stats */ - if (p->numa_group) { - group_lock = &p->numa_group->lock; + ng = deref_curr_numa_group(p); + if (ng) { + group_lock = &ng->lock; spin_lock_irq(group_lock); } @@ -2194,7 +2231,7 @@ static void task_numa_placement(struct task_struct *p) p->numa_faults[cpu_idx] += f_diff; faults += p->numa_faults[mem_idx]; p->total_numa_faults += diff; - if (p->numa_group) { + if (ng) { /* * safe because we can only change our own group * @@ -2202,14 +2239,14 @@ static void task_numa_placement(struct task_struct *p) * nid and priv in a specific region because it * is at the beginning of the numa_faults array. */ - p->numa_group->faults[mem_idx] += diff; - p->numa_group->faults_cpu[mem_idx] += f_diff; - p->numa_group->total_faults += diff; - group_faults += p->numa_group->faults[mem_idx]; + ng->faults[mem_idx] += diff; + ng->faults_cpu[mem_idx] += f_diff; + ng->total_faults += diff; + group_faults += ng->faults[mem_idx]; } } - if (!p->numa_group) { + if (!ng) { if (faults > max_faults) { max_faults = faults; max_nid = nid; @@ -2220,8 +2257,8 @@ static void task_numa_placement(struct task_struct *p) } } - if (p->numa_group) { - numa_group_count_active_nodes(p->numa_group); + if (ng) { + numa_group_count_active_nodes(ng); spin_unlock_irq(group_lock); max_nid = preferred_group_nid(p, max_nid); } @@ -2255,7 +2292,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, int cpu = cpupid_to_cpu(cpupid); int i; - if (unlikely(!p->numa_group)) { + if (unlikely(!deref_curr_numa_group(p))) { unsigned int size = sizeof(struct numa_group) + 4*nr_node_ids*sizeof(unsigned long); @@ -2291,7 +2328,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, if (!grp) goto no_join; - my_grp = p->numa_group; + my_grp = deref_curr_numa_group(p); if (grp == my_grp) goto no_join; @@ -2353,13 +2390,24 @@ no_join: return; } -void task_numa_free(struct task_struct *p) +/* + * Get rid of NUMA staticstics associated with a task (either current or dead). + * If @final is set, the task is dead and has reached refcount zero, so we can + * safely free all relevant data structures. Otherwise, there might be + * concurrent reads from places like load balancing and procfs, and we should + * reset the data back to default state without freeing ->numa_faults. + */ +void task_numa_free(struct task_struct *p, bool final) { - struct numa_group *grp = p->numa_group; - void *numa_faults = p->numa_faults; + /* safe: p either is current or is being freed by current */ + struct numa_group *grp = rcu_dereference_raw(p->numa_group); + unsigned long *numa_faults = p->numa_faults; unsigned long flags; int i; + if (!numa_faults) + return; + if (grp) { spin_lock_irqsave(&grp->lock, flags); for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) @@ -2372,8 +2420,14 @@ void task_numa_free(struct task_struct *p) put_numa_group(grp); } - p->numa_faults = NULL; - kfree(numa_faults); + if (final) { + p->numa_faults = NULL; + kfree(numa_faults); + } else { + p->total_numa_faults = 0; + for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) + numa_faults[i] = 0; + } } /* @@ -2426,7 +2480,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) * actively using should be counted as local. This allows the * scan rate to slow down when a workload has settled down. */ - ng = p->numa_group; + ng = deref_curr_numa_group(p); if (!priv && !local && ng && ng->active_nodes > 1 && numa_is_active_node(cpu_node, ng) && numa_is_active_node(mem_node, ng)) @@ -10444,18 +10498,22 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m) { int node; unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0; + struct numa_group *ng; + rcu_read_lock(); + ng = rcu_dereference(p->numa_group); for_each_online_node(node) { if (p->numa_faults) { tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; } - if (p->numa_group) { - gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)], - gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)]; + if (ng) { + gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)], + gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; } print_numa_stats(m, node, tsf, tpf, gsf, gpf); } + rcu_read_unlock(); } #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_SCHED_DEBUG */ diff --git a/kernel/signal.c b/kernel/signal.c index 91b789dd6e72..e667be6907d7 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -349,7 +349,7 @@ void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. * Group stop states are cleared and the group stop count is consumed if * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group - * stop, the appropriate %SIGNAL_* flags are set. + * stop, the appropriate `SIGNAL_*` flags are set. * * CONTEXT: * Must be called with @task->sighand->siglock held. @@ -1885,6 +1885,7 @@ static void do_notify_pidfd(struct task_struct *task) { struct pid *pid; + WARN_ON(task->exit_state == 0); pid = task_pid(task); wake_up_all(&pid->wait_pidfd); } diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 69ebf3c2f1b5..78af97163147 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -137,6 +137,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) return 0; + /* + * Do not trace a function if it's filtered by set_graph_notrace. + * Make the index of ret stack negative to indicate that it should + * ignore further functions. But it needs its own ret stack entry + * to recover the original index in order to continue tracing after + * returning from the function. + */ if (ftrace_graph_notrace_addr(trace->func)) { trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT); /* @@ -156,16 +163,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) return 0; /* - * Do not trace a function if it's filtered by set_graph_notrace. - * Make the index of ret stack negative to indicate that it should - * ignore further functions. But it needs its own ret stack entry - * to recover the original index in order to continue tracing after - * returning from the function. - */ - if (ftrace_graph_notrace_addr(trace->func)) - return 1; - - /* * Stop here if tracing_threshold is set. We only write function return * events to the ring buffer. */ diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 4fafba1a923b..7fa97a8b5717 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -106,7 +106,6 @@ endchoice config KASAN_STACK_ENABLE bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST - default !(CLANG_VERSION < 90000) depends on KASAN help The LLVM stack address sanitizer has a know problem that @@ -115,11 +114,11 @@ config KASAN_STACK_ENABLE Disabling asan-stack makes it safe to run kernels build with clang-8 with KASAN enabled, though it loses some of the functionality. - This feature is always disabled when compile-testing with clang-8 - or earlier to avoid cluttering the output in stack overflow - warnings, but clang-8 users can still enable it for builds without - CONFIG_COMPILE_TEST. On gcc and later clang versions it is - assumed to always be safe to use and enabled by default. + This feature is always disabled when compile-testing with clang + to avoid cluttering the output in stack overflow warnings, + but clang users can still enable it for builds without + CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe + to use and enabled by default. config KASAN_STACK int diff --git a/lib/Makefile b/lib/Makefile index 095601ce371d..29c02a924973 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -279,7 +279,8 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o obj-$(CONFIG_UBSAN) += ubsan.o UBSAN_SANITIZE_ubsan.o := n -CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) +KASAN_SANITIZE_ubsan.o := n +CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN) obj-$(CONFIG_SBITMAP) += sbitmap.o diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index 42695bc8d451..0083b5cc646c 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -66,7 +66,7 @@ CFLAGS_vpermxor1.o += $(altivec_flags) CFLAGS_vpermxor2.o += $(altivec_flags) CFLAGS_vpermxor4.o += $(altivec_flags) CFLAGS_vpermxor8.o += $(altivec_flags) -targets += vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o +targets += vpermxor1.c vpermxor2.c vpermxor4.c vpermxor8.c $(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 83ea6c4e623c..6ca97a63b3d6 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -886,8 +886,11 @@ static int __init test_firmware_init(void) return -ENOMEM; rc = __test_firmware_config_init(); - if (rc) + if (rc) { + kfree(test_fw_config); + pr_err("could not init firmware test config: %d\n", rc); return rc; + } rc = misc_register(&test_fw_misc_device); if (rc) { diff --git a/lib/test_meminit.c b/lib/test_meminit.c index 62d19f270cad..9729f271d150 100644 --- a/lib/test_meminit.c +++ b/lib/test_meminit.c @@ -222,7 +222,7 @@ static int __init do_kmem_cache_size(size_t size, bool want_ctor, * Copy the buffer to check that it's not wiped on * free(). */ - buf_copy = kmalloc(size, GFP_KERNEL); + buf_copy = kmalloc(size, GFP_ATOMIC); if (buf_copy) memcpy(buf_copy, buf, size); diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c index 2d1c1f241fd9..e630e7ff57f1 100644 --- a/lib/vdso/gettimeofday.c +++ b/lib/vdso/gettimeofday.c @@ -51,7 +51,7 @@ static int do_hres(const struct vdso_data *vd, clockid_t clk, ns = vdso_ts->nsec; last = vd->cycle_last; if (unlikely((s64)cycles < 0)) - return clock_gettime_fallback(clk, ts); + return -1; ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); ns >>= vd->shift; @@ -82,14 +82,14 @@ static void do_coarse(const struct vdso_data *vd, clockid_t clk, } static __maybe_unused int -__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) +__cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts) { const struct vdso_data *vd = __arch_get_vdso_data(); u32 msk; /* Check for negative values or invalid clocks */ if (unlikely((u32) clock >= MAX_CLOCKS)) - goto fallback; + return -1; /* * Convert the clockid to a bitmask and use it to check which @@ -104,9 +104,17 @@ __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) } else if (msk & VDSO_RAW) { return do_hres(&vd[CS_RAW], clock, ts); } + return -1; +} + +static __maybe_unused int +__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) +{ + int ret = __cvdso_clock_gettime_common(clock, ts); -fallback: - return clock_gettime_fallback(clock, ts); + if (unlikely(ret)) + return clock_gettime_fallback(clock, ts); + return 0; } static __maybe_unused int @@ -115,20 +123,21 @@ __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res) struct __kernel_timespec ts; int ret; - if (res == NULL) - goto fallback; + ret = __cvdso_clock_gettime_common(clock, &ts); - ret = __cvdso_clock_gettime(clock, &ts); +#ifdef VDSO_HAS_32BIT_FALLBACK + if (unlikely(ret)) + return clock_gettime32_fallback(clock, res); +#else + if (unlikely(ret)) + ret = clock_gettime_fallback(clock, &ts); +#endif - if (ret == 0) { + if (likely(!ret)) { res->tv_sec = ts.tv_sec; res->tv_nsec = ts.tv_nsec; } - return ret; - -fallback: - return clock_gettime_fallback(clock, (struct __kernel_timespec *)res); } static __maybe_unused int @@ -169,17 +178,18 @@ static __maybe_unused time_t __cvdso_time(time_t *time) #ifdef VDSO_HAS_CLOCK_GETRES static __maybe_unused -int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res) +int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res) { const struct vdso_data *vd = __arch_get_vdso_data(); - u64 ns; + u64 hrtimer_res; u32 msk; - u64 hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res); + u64 ns; /* Check for negative values or invalid clocks */ if (unlikely((u32) clock >= MAX_CLOCKS)) - goto fallback; + return -1; + hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res); /* * Convert the clockid to a bitmask and use it to check which * clocks are handled in the VDSO directly. @@ -201,18 +211,22 @@ int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res) */ ns = hrtimer_res; } else { - goto fallback; + return -1; } - if (res) { - res->tv_sec = 0; - res->tv_nsec = ns; - } + res->tv_sec = 0; + res->tv_nsec = ns; return 0; +} + +int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res) +{ + int ret = __cvdso_clock_getres_common(clock, res); -fallback: - return clock_getres_fallback(clock, res); + if (unlikely(ret)) + return clock_getres_fallback(clock, res); + return 0; } static __maybe_unused int @@ -221,19 +235,20 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res) struct __kernel_timespec ts; int ret; - if (res == NULL) - goto fallback; + ret = __cvdso_clock_getres_common(clock, &ts); - ret = __cvdso_clock_getres(clock, &ts); +#ifdef VDSO_HAS_32BIT_FALLBACK + if (unlikely(ret)) + return clock_getres32_fallback(clock, res); +#else + if (unlikely(ret)) + ret = clock_getres_fallback(clock, &ts); +#endif - if (ret == 0) { + if (likely(!ret)) { res->tv_sec = ts.tv_sec; res->tv_nsec = ts.tv_nsec; } - return ret; - -fallback: - return clock_getres_fallback(clock, (struct __kernel_timespec *)res); } #endif /* VDSO_HAS_CLOCK_GETRES */ diff --git a/mm/Makefile b/mm/Makefile index 338e528ad436..d0b295c3b764 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -102,5 +102,6 @@ obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o +obj-$(CONFIG_ZONE_DEVICE) += memremap.o obj-$(CONFIG_HMM_MIRROR) += hmm.o obj-$(CONFIG_MEMFD_CREATE) += memfd.o diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 83a7b614061f..798275a51887 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -21,7 +21,6 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info, * memory corruption is possible and we should stop execution. */ BUG_ON(!trylock_page(page)); - list_del(&page->lru); balloon_page_insert(b_dev_info, page); unlock_page(page); __count_vm_event(BALLOON_INFLATE); @@ -33,8 +32,8 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info, * @b_dev_info: balloon device descriptor where we will insert a new page to * @pages: pages to enqueue - allocated using balloon_page_alloc. * - * Driver must call it to properly enqueue a balloon pages before definitively - * removing it from the guest system. + * Driver must call this function to properly enqueue balloon pages before + * definitively removing them from the guest system. * * Return: number of pages that were enqueued. */ @@ -47,6 +46,7 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info, spin_lock_irqsave(&b_dev_info->pages_lock, flags); list_for_each_entry_safe(page, tmp, pages, lru) { + list_del(&page->lru); balloon_page_enqueue_one(b_dev_info, page); n_pages++; } @@ -63,12 +63,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue); * @n_req_pages: number of requested pages. * * Driver must call this function to properly de-allocate a previous enlisted - * balloon pages before definetively releasing it back to the guest system. + * balloon pages before definitively releasing it back to the guest system. * This function tries to remove @n_req_pages from the ballooned pages and * return them to the caller in the @pages list. * - * Note that this function may fail to dequeue some pages temporarily empty due - * to compaction isolated pages. + * Note that this function may fail to dequeue some pages even if the balloon + * isn't empty - since the page list can be temporarily empty due to compaction + * of isolated pages. * * Return: number of pages that were added to the @pages list. */ @@ -112,12 +113,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue); /* * balloon_page_alloc - allocates a new page for insertion into the balloon - * page list. + * page list. + * + * Driver must call this function to properly allocate a new balloon page. + * Driver must call balloon_page_enqueue before definitively removing the page + * from the guest system. * - * Driver must call it to properly allocate a new enlisted balloon page. - * Driver must call balloon_page_enqueue before definitively removing it from - * the guest system. This function returns the page address for the recently - * allocated page or NULL in the case we fail to allocate a new page this turn. + * Return: struct page for the allocated page or NULL on allocation failure. */ struct page *balloon_page_alloc(void) { @@ -128,15 +130,17 @@ struct page *balloon_page_alloc(void) EXPORT_SYMBOL_GPL(balloon_page_alloc); /* - * balloon_page_enqueue - allocates a new page and inserts it into the balloon - * page list. - * @b_dev_info: balloon device descriptor where we will insert a new page to + * balloon_page_enqueue - inserts a new page into the balloon page list. + * + * @b_dev_info: balloon device descriptor where we will insert a new page * @page: new page to enqueue - allocated using balloon_page_alloc. * - * Driver must call it to properly enqueue a new allocated balloon page - * before definitively removing it from the guest system. - * This function returns the page address for the recently enqueued page or - * NULL in the case we fail to allocate a new page this turn. + * Drivers must call this function to properly enqueue a new allocated balloon + * page before definitively removing the page from the guest system. + * + * Drivers must not call balloon_page_enqueue on pages that have been pushed to + * a list with balloon_page_push before removing them with balloon_page_pop. To + * enqueue a list of pages, use balloon_page_list_enqueue instead. */ void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, struct page *page) @@ -151,14 +155,23 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue); /* * balloon_page_dequeue - removes a page from balloon's page list and returns - * the its address to allow the driver release the page. + * its address to allow the driver to release the page. * @b_dev_info: balloon device decriptor where we will grab a page from. * - * Driver must call it to properly de-allocate a previous enlisted balloon page - * before definetively releasing it back to the guest system. - * This function returns the page address for the recently dequeued page or - * NULL in the case we find balloon's page list temporarily empty due to - * compaction isolated pages. + * Driver must call this function to properly dequeue a previously enqueued page + * before definitively releasing it back to the guest system. + * + * Caller must perform its own accounting to ensure that this + * function is called only if some pages are actually enqueued. + * + * Note that this function may fail to dequeue some pages even if there are + * some enqueued pages - since the page list can be temporarily empty due to + * the compaction of isolated pages. + * + * TODO: remove the caller accounting requirements, and allow caller to wait + * until all pages can be dequeued. + * + * Return: struct page for the dequeued page, or NULL if no page was dequeued. */ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) { @@ -171,9 +184,9 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) if (n_pages != 1) { /* * If we are unable to dequeue a balloon page because the page - * list is empty and there is no isolated pages, then something + * list is empty and there are no isolated pages, then something * went out of track and some balloon pages are lost. - * BUG() here, otherwise the balloon driver may get stuck into + * BUG() here, otherwise the balloon driver may get stuck in * an infinite loop while attempting to release all its pages. */ spin_lock_irqsave(&b_dev_info->pages_lock, flags); @@ -224,8 +237,8 @@ int balloon_page_migrate(struct address_space *mapping, /* * We can not easily support the no copy case here so ignore it as it - * is unlikely to be use with ballon pages. See include/linux/hmm.h for - * user of the MIGRATE_SYNC_NO_COPY mode. + * is unlikely to be used with balloon pages. See include/linux/hmm.h + * for a user of the MIGRATE_SYNC_NO_COPY mode. */ if (mode == MIGRATE_SYNC_NO_COPY) return -EINVAL; diff --git a/mm/compaction.c b/mm/compaction.c index 9e1b9acb116b..952dc2fb24e5 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -842,13 +842,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* * Periodically drop the lock (if held) regardless of its - * contention, to give chance to IRQs. Abort async compaction - * if contended. + * contention, to give chance to IRQs. Abort completely if + * a fatal signal is pending. */ if (!(low_pfn % SWAP_CLUSTER_MAX) && compact_unlock_should_abort(&pgdat->lru_lock, - flags, &locked, cc)) - break; + flags, &locked, cc)) { + low_pfn = 0; + goto fatal_pending; + } if (!pfn_valid_within(low_pfn)) goto isolate_fail; @@ -1060,6 +1062,7 @@ isolate_abort: trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, nr_scanned, nr_isolated); +fatal_pending: cc->total_migrate_scanned += nr_scanned; if (nr_isolated) count_compact_events(COMPACTISOLATED, nr_isolated); @@ -946,7 +946,7 @@ EXPORT_SYMBOL(hmm_range_unregister); * @range: range * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid * permission (for instance asking for write and range is read only), - * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid + * -EBUSY if you need to retry, -EFAULT invalid (ie either no valid * vma or it is illegal to access that range), number of valid pages * in range->pfns[] (from range start address). * @@ -967,7 +967,7 @@ long hmm_range_snapshot(struct hmm_range *range) do { /* If range is no longer valid force retry. */ if (!range->valid) - return -EAGAIN; + return -EBUSY; vma = find_vma(hmm->mm, start); if (vma == NULL || (vma->vm_flags & device_vma)) @@ -1062,10 +1062,8 @@ long hmm_range_fault(struct hmm_range *range, bool block) do { /* If range is no longer valid force retry. */ - if (!range->valid) { - up_read(&hmm->mm->mmap_sem); - return -EAGAIN; - } + if (!range->valid) + return -EBUSY; vma = find_vma(hmm->mm, start); if (vma == NULL || (vma->vm_flags & device_vma)) diff --git a/mm/kmemleak.c b/mm/kmemleak.c index dbbd518fb6b3..6e9e8cca663e 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -114,7 +114,7 @@ /* GFP bitmask for kmemleak internal allocations */ #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ __GFP_NORETRY | __GFP_NOMEMALLOC | \ - __GFP_NOWARN | __GFP_NOFAIL) + __GFP_NOWARN) /* scanning area inside a memory block */ struct kmemleak_scan_area { diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 2a9bbddb0e55..c73f09913165 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -132,7 +132,6 @@ static void release_memory_resource(struct resource *res) return; release_resource(res); kfree(res); - return; } #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE @@ -979,7 +978,6 @@ static void rollback_node_hotadd(int nid) arch_refresh_nodedata(nid, NULL); free_percpu(pgdat->per_cpu_nodestats); arch_free_nodedata(pgdat); - return; } diff --git a/kernel/memremap.c b/mm/memremap.c index 6ee03a816d67..6ee03a816d67 100644 --- a/kernel/memremap.c +++ b/mm/memremap.c diff --git a/mm/migrate.c b/mm/migrate.c index 8992741f10aa..a42858d8e00b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -767,12 +767,12 @@ recheck_buffers: } bh = bh->b_this_page; } while (bh != head); - spin_unlock(&mapping->private_lock); if (busy) { if (invalidated) { rc = -EAGAIN; goto unlock_buffers; } + spin_unlock(&mapping->private_lock); invalidate_bh_lrus(); invalidated = true; goto recheck_buffers; @@ -805,6 +805,8 @@ recheck_buffers: rc = MIGRATEPAGE_SUCCESS; unlock_buffers: + if (check_refs) + spin_unlock(&mapping->private_lock); bh = head; do { unlock_buffer(bh); @@ -2338,16 +2340,13 @@ next: static void migrate_vma_collect(struct migrate_vma *migrate) { struct mmu_notifier_range range; - struct mm_walk mm_walk; - - mm_walk.pmd_entry = migrate_vma_collect_pmd; - mm_walk.pte_entry = NULL; - mm_walk.pte_hole = migrate_vma_collect_hole; - mm_walk.hugetlb_entry = NULL; - mm_walk.test_walk = NULL; - mm_walk.vma = migrate->vma; - mm_walk.mm = migrate->vma->vm_mm; - mm_walk.private = migrate; + struct mm_walk mm_walk = { + .pmd_entry = migrate_vma_collect_pmd, + .pte_hole = migrate_vma_collect_hole, + .vma = migrate->vma, + .mm = migrate->vma->vm_mm, + .private = migrate, + }; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm, migrate->start, diff --git a/mm/slub.c b/mm/slub.c index e6c030e47364..8834563cdb4b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1432,7 +1432,9 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, void *old_tail = *tail ? *tail : *head; int rsize; - if (slab_want_init_on_free(s)) + if (slab_want_init_on_free(s)) { + void *p = NULL; + do { object = next; next = get_freepointer(s, object); @@ -1445,8 +1447,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, : 0; memset((char *)object + s->inuse, 0, s->size - s->inuse - rsize); - set_freepointer(s, object, next); + set_freepointer(s, object, p); + p = object; } while (object != old_tail); + } /* * Compiler cannot detect this function can be removed if slab_free_hook() diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 4fa8d84599b0..e0fc963acc41 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1259,6 +1259,12 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) return false; /* + * First make sure the mappings are removed from all page-tables + * before they are freed. + */ + vmalloc_sync_all(); + + /* * TODO: to calculate a flush range without looping. * The list can be up to lazy_max_pages() elements. */ @@ -3038,6 +3044,9 @@ EXPORT_SYMBOL(remap_vmalloc_range); /* * Implement a stub for vmalloc_sync_all() if the architecture chose not to * have one. + * + * The purpose of this function is to make sure the vmalloc area + * mappings are identical in all page-tables in the system. */ void __weak vmalloc_sync_all(void) { diff --git a/mm/vmscan.c b/mm/vmscan.c index 44df66a98f2a..dbdc46a84f63 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -699,7 +699,14 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, unsigned long ret, freed = 0; struct shrinker *shrinker; - if (!mem_cgroup_is_root(memcg)) + /* + * The root memcg might be allocated even though memcg is disabled + * via "cgroup_disable=memory" boot parameter. This could make + * mem_cgroup_is_root() return false, then just run memcg slab + * shrink, but skip global shrink. This may result in premature + * oom. + */ + if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg)) return shrink_slab_memcg(gfp_mask, nid, memcg, priority); if (!down_read_trylock(&shrinker_rwsem)) diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig index 154fa558bb90..5040fe43f4b4 100644 --- a/net/bridge/netfilter/Kconfig +++ b/net/bridge/netfilter/Kconfig @@ -6,7 +6,7 @@ menuconfig NF_TABLES_BRIDGE depends on BRIDGE && NETFILTER && NF_TABLES select NETFILTER_FAMILY_BRIDGE - bool "Ethernet Bridge nf_tables support" + tristate "Ethernet Bridge nf_tables support" if NF_TABLES_BRIDGE @@ -25,6 +25,8 @@ config NF_LOG_BRIDGE tristate "Bridge packet logging" select NF_LOG_COMMON +endif # NF_TABLES_BRIDGE + config NF_CONNTRACK_BRIDGE tristate "IPv4/IPV6 bridge connection tracking support" depends on NF_CONNTRACK @@ -39,8 +41,6 @@ config NF_CONNTRACK_BRIDGE To compile it as a module, choose M here. If unsure, say N. -endif # NF_TABLES_BRIDGE - menuconfig BRIDGE_NF_EBTABLES tristate "Ethernet Bridge tables (ebtables) support" depends on BRIDGE && NETFILTER && NETFILTER_XTABLES diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index 76f8db3841d7..d63b970784dc 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -165,7 +165,7 @@ void flow_rule_match_enc_opts(const struct flow_rule *rule, } EXPORT_SYMBOL(flow_rule_match_enc_opts); -struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb, +struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, void (*release)(void *cb_priv)) { @@ -175,7 +175,6 @@ struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb, if (!block_cb) return ERR_PTR(-ENOMEM); - block_cb->net = net; block_cb->cb = cb; block_cb->cb_ident = cb_ident; block_cb->cb_priv = cb_priv; @@ -194,14 +193,13 @@ void flow_block_cb_free(struct flow_block_cb *block_cb) } EXPORT_SYMBOL(flow_block_cb_free); -struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *f, - tc_setup_cb_t *cb, void *cb_ident) +struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, + flow_setup_cb_t *cb, void *cb_ident) { struct flow_block_cb *block_cb; - list_for_each_entry(block_cb, f->driver_block_list, driver_list) { - if (block_cb->net == f->net && - block_cb->cb == cb && + list_for_each_entry(block_cb, &block->cb_list, list) { + if (block_cb->cb == cb && block_cb->cb_ident == cb_ident) return block_cb; } @@ -228,7 +226,7 @@ unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb) } EXPORT_SYMBOL(flow_block_cb_decref); -bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident, +bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, struct list_head *driver_block_list) { struct flow_block_cb *block_cb; @@ -245,7 +243,8 @@ EXPORT_SYMBOL(flow_block_cb_is_busy); int flow_block_cb_setup_simple(struct flow_block_offload *f, struct list_head *driver_block_list, - tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, + flow_setup_cb_t *cb, + void *cb_ident, void *cb_priv, bool ingress_only) { struct flow_block_cb *block_cb; @@ -261,8 +260,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f, if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list)) return -EBUSY; - block_cb = flow_block_cb_alloc(f->net, cb, cb_ident, - cb_priv, NULL); + block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); @@ -270,7 +268,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f, list_add_tail(&block_cb->driver_list, driver_block_list); return 0; case FLOW_BLOCK_UNBIND: - block_cb = flow_block_cb_lookup(f, cb, cb_ident); + block_cb = flow_block_cb_lookup(f->block, cb, cb_ident); if (!block_cb) return -ENOENT; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 614c38ece104..33f41178afcc 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -951,7 +951,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev, struct flow_block_offload *f) { struct flow_block_cb *block_cb; - tc_setup_cb_t *cb; + flow_setup_cb_t *cb; if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) cb = dsa_slave_setup_tc_block_cb_ig; @@ -967,7 +967,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev, if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list)) return -EBUSY; - block_cb = flow_block_cb_alloc(f->net, cb, dev, dev, NULL); + block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); @@ -975,7 +975,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev, list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list); return 0; case FLOW_BLOCK_UNBIND: - block_cb = flow_block_cb_lookup(f, cb, dev); + block_cb = flow_block_cb_lookup(f->block, cb, dev); if (!block_cb) return -ENOENT; diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 4d6bf7ac0792..6bdb1ab8af61 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -416,8 +416,8 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par) ctinfo == IP_CT_RELATED_REPLY)) return XT_CONTINUE; - /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, - * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here + /* nf_conntrack_proto_icmp guarantees us that we only have ICMP_ECHO, + * TIMESTAMP, INFO_REQUEST or ICMP_ADDRESS type icmp packets from here * on, which all have an ID field [relevant for hashing]. */ hash = clusterip_hashfn(skb, cipinfo->config); diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index 8e7f84ec783d..0e70f3f65f6f 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -36,6 +36,8 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) opts.options |= XT_SYNPROXY_OPT_ECN; opts.options &= info->options; + opts.mss_encode = opts.mss; + opts.mss = info->mss; if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) synproxy_init_timestamp_cookie(info, &opts); else diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index 59031670b16a..cc23f1ce239c 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c @@ -78,6 +78,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; flow.flowi4_tos = RT_TOS(iph->tos); flow.flowi4_scope = RT_SCOPE_UNIVERSE; + flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par)); return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert; } diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index 87b711fd5a44..3e2685c120c7 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c @@ -221,11 +221,11 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, int ret; rtp_exp->tuple.dst.u.udp.port = htons(nated_port); - ret = nf_ct_expect_related(rtp_exp); + ret = nf_ct_expect_related(rtp_exp, 0); if (ret == 0) { rtcp_exp->tuple.dst.u.udp.port = htons(nated_port + 1); - ret = nf_ct_expect_related(rtcp_exp); + ret = nf_ct_expect_related(rtcp_exp, 0); if (ret == 0) break; else if (ret == -EBUSY) { @@ -296,7 +296,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct, int ret; exp->tuple.dst.u.tcp.port = htons(nated_port); - ret = nf_ct_expect_related(exp); + ret = nf_ct_expect_related(exp, 0); if (ret == 0) break; else if (ret != -EBUSY) { @@ -352,7 +352,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct, int ret; exp->tuple.dst.u.tcp.port = htons(nated_port); - ret = nf_ct_expect_related(exp); + ret = nf_ct_expect_related(exp, 0); if (ret == 0) break; else if (ret != -EBUSY) { @@ -444,7 +444,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct, int ret; exp->tuple.dst.u.tcp.port = htons(nated_port); - ret = nf_ct_expect_related(exp); + ret = nf_ct_expect_related(exp, 0); if (ret == 0) break; else if (ret != -EBUSY) { @@ -537,7 +537,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, int ret; exp->tuple.dst.u.tcp.port = htons(nated_port); - ret = nf_ct_expect_related(exp); + ret = nf_ct_expect_related(exp, 0); if (ret == 0) break; else if (ret != -EBUSY) { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 4af1f5dae9d3..6e4afc48d7bb 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1288,6 +1288,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *buff; int nsize, old_factor; + long limit; int nlen; u8 flags; @@ -1298,8 +1299,16 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, if (nsize < 0) nsize = 0; - if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf && - tcp_queue != TCP_FRAG_IN_WRITE_QUEUE)) { + /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb. + * We need some allowance to not penalize applications setting small + * SO_SNDBUF values. + * Also allow first and last skb in retransmit queue to be split. + */ + limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE); + if (unlikely((sk->sk_wmem_queued >> 1) > limit && + tcp_queue != TCP_FRAG_IN_WRITE_QUEUE && + skb != tcp_rtx_queue_head(sk) && + skb != tcp_rtx_queue_tail(sk))) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); return -ENOMEM; } diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index e77ea1ed5edd..5cdb4a69d277 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -36,6 +36,8 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) opts.options |= XT_SYNPROXY_OPT_ECN; opts.options &= info->options; + opts.mss_encode = opts.mss; + opts.mss = info->mss; if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) synproxy_init_timestamp_cookie(info, &opts); else diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index 6bcaf7357183..d800801a5dd2 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c @@ -55,7 +55,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, if (rpfilter_addr_linklocal(&iph->saddr)) { lookup_flags |= RT6_LOOKUP_F_IFACE; fl6.flowi6_oif = dev->ifindex; - } else if ((flags & XT_RPFILTER_LOOSE) == 0) + /* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */ + } else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) || + (flags & XT_RPFILTER_LOOSE) == 0) fl6.flowi6_oif = dev->ifindex; rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags); @@ -70,7 +72,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, goto out; } - if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE)) + if (rt->rt6i_idev->dev == dev || + l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex || + (flags & XT_RPFILTER_LOOSE)) ret = true; out: ip6_rt_put(rt); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 76cc9e967fa6..4d458067d80d 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -936,8 +936,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, err = ieee80211_set_probe_resp(sdata, params->probe_resp, params->probe_resp_len, csa); - if (err < 0) + if (err < 0) { + kfree(new); return err; + } if (err == 0) changed |= BSS_CHANGED_AP_PROBE_RESP; @@ -949,8 +951,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, params->civicloc, params->civicloc_len); - if (err < 0) + if (err < 0) { + kfree(new); return err; + } changed |= BSS_CHANGED_FTM_RESPONDER; } diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c index acd4afb4944b..c9a8a2433e8a 100644 --- a/net/mac80211/driver-ops.c +++ b/net/mac80211/driver-ops.c @@ -187,11 +187,16 @@ int drv_conf_tx(struct ieee80211_local *local, if (!check_sdata_in_driver(sdata)) return -EIO; - if (WARN_ONCE(params->cw_min == 0 || - params->cw_min > params->cw_max, - "%s: invalid CW_min/CW_max: %d/%d\n", - sdata->name, params->cw_min, params->cw_max)) + if (params->cw_min == 0 || params->cw_min > params->cw_max) { + /* + * If we can't configure hardware anyway, don't warn. We may + * never have initialized the CW parameters. + */ + WARN_ONCE(local->ops->conf_tx, + "%s: invalid CW_min/CW_max: %d/%d\n", + sdata->name, params->cw_min, params->cw_max); return -EINVAL; + } trace_drv_conf_tx(local, sdata, ac, params); if (local->ops->conf_tx) diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 32a45c03786e..0d65f4d39494 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -223,8 +223,6 @@ config NF_CONNTRACK_FTP of Network Address Translation on them. This is FTP support on Layer 3 independent connection tracking. - Layer 3 independent connection tracking is experimental scheme - which generalize ip_conntrack to support other layer 3 protocols. To compile it as a module, choose M here. If unsure, say N. @@ -338,7 +336,7 @@ config NF_CONNTRACK_SIP help SIP is an application-layer control protocol that can establish, modify, and terminate multimedia sessions (conferences) such as - Internet telephony calls. With the ip_conntrack_sip and + Internet telephony calls. With the nf_conntrack_sip and the nf_nat_sip modules you can support the protocol on a connection tracking/NATing firewall. @@ -1313,7 +1311,7 @@ config NETFILTER_XT_MATCH_HELPER depends on NETFILTER_ADVANCED help Helper matching allows you to match packets in dynamic connections - tracked by a conntrack-helper, ie. ip_conntrack_ftp + tracked by a conntrack-helper, ie. nf_conntrack_ftp To compile it as a module, choose M here. If unsure, say Y. diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c index 403541996952..08adcb222986 100644 --- a/net/netfilter/ipvs/ip_vs_nfct.c +++ b/net/netfilter/ipvs/ip_vs_nfct.c @@ -231,7 +231,7 @@ void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct, IP_VS_DBG_BUF(7, "%s: ct=%p, expect tuple=" FMT_TUPLE "\n", __func__, ct, ARG_TUPLE(&exp->tuple)); - nf_ct_expect_related(exp); + nf_ct_expect_related(exp, 0); nf_ct_expect_put(exp); } EXPORT_SYMBOL(ip_vs_nfct_expect_related); diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c index 42ee659d0d1e..d011d2eb0848 100644 --- a/net/netfilter/nf_conntrack_amanda.c +++ b/net/netfilter/nf_conntrack_amanda.c @@ -159,7 +159,7 @@ static int amanda_help(struct sk_buff *skb, if (nf_nat_amanda && ct->status & IPS_NAT_MASK) ret = nf_nat_amanda(skb, ctinfo, protoff, off - dataoff, len, exp); - else if (nf_ct_expect_related(exp) != 0) { + else if (nf_ct_expect_related(exp, 0) != 0) { nf_ct_helper_log(skb, ct, "cannot add expectation"); ret = NF_DROP; } diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c index 921a7b95be68..1ba6becc3079 100644 --- a/net/netfilter/nf_conntrack_broadcast.c +++ b/net/netfilter/nf_conntrack_broadcast.c @@ -68,7 +68,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb, exp->class = NF_CT_EXPECT_CLASS_DEFAULT; exp->helper = NULL; - nf_ct_expect_related(exp); + nf_ct_expect_related(exp, 0); nf_ct_expect_put(exp); nf_ct_refresh(ct, skb, timeout * HZ); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index bdfeacee0817..a542761e90d1 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1817,9 +1817,7 @@ EXPORT_SYMBOL_GPL(nf_ct_kill_acct); #include <linux/netfilter/nfnetlink_conntrack.h> #include <linux/mutex.h> -/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be - * in ip_conntrack_core, since we don't want the protocols to autoload - * or depend on ctnetlink */ +/* Generic function for tcp/udp/sctp/dccp and alike. */ int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index ffd1f4906c4f..65364de915d1 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -249,13 +249,22 @@ static inline int expect_clash(const struct nf_conntrack_expect *a, static inline int expect_matches(const struct nf_conntrack_expect *a, const struct nf_conntrack_expect *b) { - return a->master == b->master && - nf_ct_tuple_equal(&a->tuple, &b->tuple) && + return nf_ct_tuple_equal(&a->tuple, &b->tuple) && nf_ct_tuple_mask_equal(&a->mask, &b->mask) && net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) && nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master)); } +static bool master_matches(const struct nf_conntrack_expect *a, + const struct nf_conntrack_expect *b, + unsigned int flags) +{ + if (flags & NF_CT_EXP_F_SKIP_MASTER) + return true; + + return a->master == b->master; +} + /* Generally a bad idea to call this: could have matched already. */ void nf_ct_unexpect_related(struct nf_conntrack_expect *exp) { @@ -399,7 +408,8 @@ static void evict_oldest_expect(struct nf_conn *master, nf_ct_remove_expect(last); } -static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) +static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect, + unsigned int flags) { const struct nf_conntrack_expect_policy *p; struct nf_conntrack_expect *i; @@ -417,8 +427,10 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) } h = nf_ct_expect_dst_hash(net, &expect->tuple); hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { - if (expect_matches(i, expect)) { - if (i->class != expect->class) + if (master_matches(i, expect, flags) && + expect_matches(i, expect)) { + if (i->class != expect->class || + i->master != expect->master) return -EALREADY; if (nf_ct_remove_expect(i)) @@ -453,12 +465,12 @@ out: } int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, - u32 portid, int report) + u32 portid, int report, unsigned int flags) { int ret; spin_lock_bh(&nf_conntrack_expect_lock); - ret = __nf_ct_expect_check(expect); + ret = __nf_ct_expect_check(expect, flags); if (ret < 0) goto out; diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index 8c6c11bab5b6..0ecb3e289ef2 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c @@ -525,7 +525,7 @@ skip_nl_seq: protoff, matchoff, matchlen, exp); else { /* Can't expect this? Best to drop packet now. */ - if (nf_ct_expect_related(exp) != 0) { + if (nf_ct_expect_related(exp, 0) != 0) { nf_ct_helper_log(skb, ct, "cannot add expectation"); ret = NF_DROP; } else diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c index 8f6ba8162f0b..573cb4481481 100644 --- a/net/netfilter/nf_conntrack_h323_asn1.c +++ b/net/netfilter/nf_conntrack_h323_asn1.c @@ -1,11 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * ip_conntrack_helper_h323_asn1.c - BER and PER decoding library for H.323 - * conntrack/NAT module. + * BER and PER decoding library for H.323 conntrack/NAT module. * * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net> * - * See ip_conntrack_helper_h323_asn1.h for details. + * See nf_conntrack_helper_h323_asn1.h for details. */ #ifdef __KERNEL__ diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 6497e5fc0871..8ba037b76ad3 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -305,8 +305,8 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, ret = nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff, taddr, port, rtp_port, rtp_exp, rtcp_exp); } else { /* Conntrack only */ - if (nf_ct_expect_related(rtp_exp) == 0) { - if (nf_ct_expect_related(rtcp_exp) == 0) { + if (nf_ct_expect_related(rtp_exp, 0) == 0) { + if (nf_ct_expect_related(rtcp_exp, 0) == 0) { pr_debug("nf_ct_h323: expect RTP "); nf_ct_dump_tuple(&rtp_exp->tuple); pr_debug("nf_ct_h323: expect RTCP "); @@ -364,7 +364,7 @@ static int expect_t120(struct sk_buff *skb, ret = nat_t120(skb, ct, ctinfo, protoff, data, dataoff, taddr, port, exp); } else { /* Conntrack only */ - if (nf_ct_expect_related(exp) == 0) { + if (nf_ct_expect_related(exp, 0) == 0) { pr_debug("nf_ct_h323: expect T.120 "); nf_ct_dump_tuple(&exp->tuple); } else @@ -701,7 +701,7 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct, ret = nat_h245(skb, ct, ctinfo, protoff, data, dataoff, taddr, port, exp); } else { /* Conntrack only */ - if (nf_ct_expect_related(exp) == 0) { + if (nf_ct_expect_related(exp, 0) == 0) { pr_debug("nf_ct_q931: expect H.245 "); nf_ct_dump_tuple(&exp->tuple); } else @@ -825,7 +825,7 @@ static int expect_callforwarding(struct sk_buff *skb, protoff, data, dataoff, taddr, port, exp); } else { /* Conntrack only */ - if (nf_ct_expect_related(exp) == 0) { + if (nf_ct_expect_related(exp, 0) == 0) { pr_debug("nf_ct_q931: expect Call Forwarding "); nf_ct_dump_tuple(&exp->tuple); } else @@ -1284,7 +1284,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct, ret = nat_q931(skb, ct, ctinfo, protoff, data, taddr, i, port, exp); } else { /* Conntrack only */ - if (nf_ct_expect_related(exp) == 0) { + if (nf_ct_expect_related(exp, 0) == 0) { pr_debug("nf_ct_ras: expect Q.931 "); nf_ct_dump_tuple(&exp->tuple); @@ -1349,7 +1349,7 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct, IPPROTO_UDP, NULL, &port); exp->helper = nf_conntrack_helper_ras; - if (nf_ct_expect_related(exp) == 0) { + if (nf_ct_expect_related(exp, 0) == 0) { pr_debug("nf_ct_ras: expect RAS "); nf_ct_dump_tuple(&exp->tuple); } else @@ -1561,7 +1561,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct, exp->flags = NF_CT_EXPECT_PERMANENT; exp->helper = nf_conntrack_helper_q931; - if (nf_ct_expect_related(exp) == 0) { + if (nf_ct_expect_related(exp, 0) == 0) { pr_debug("nf_ct_ras: expect Q.931 "); nf_ct_dump_tuple(&exp->tuple); } else @@ -1615,7 +1615,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct, exp->flags = NF_CT_EXPECT_PERMANENT; exp->helper = nf_conntrack_helper_q931; - if (nf_ct_expect_related(exp) == 0) { + if (nf_ct_expect_related(exp, 0) == 0) { pr_debug("nf_ct_ras: expect Q.931 "); nf_ct_dump_tuple(&exp->tuple); } else diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index 7ac156f1f3bc..e40988a2f22f 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c @@ -213,7 +213,7 @@ static int help(struct sk_buff *skb, unsigned int protoff, addr_beg_p - ib_ptr, addr_end_p - addr_beg_p, exp); - else if (nf_ct_expect_related(exp) != 0) { + else if (nf_ct_expect_related(exp, 0) != 0) { nf_ct_helper_log(skb, ct, "cannot add expectation"); ret = NF_DROP; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 1b77444d5b52..6aa01eb6fe99 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2616,7 +2616,7 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct, if (IS_ERR(exp)) return PTR_ERR(exp); - err = nf_ct_expect_related_report(exp, portid, report); + err = nf_ct_expect_related_report(exp, portid, report, 0); nf_ct_expect_put(exp); return err; } @@ -3367,7 +3367,7 @@ ctnetlink_create_expect(struct net *net, goto err_rcu; } - err = nf_ct_expect_related_report(exp, portid, report); + err = nf_ct_expect_related_report(exp, portid, report, 0); nf_ct_expect_put(exp); err_rcu: rcu_read_unlock(); diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index b22042ad0fca..a971183f11af 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c @@ -234,9 +234,9 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid) nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre); if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK) nf_nat_pptp_exp_gre(exp_orig, exp_reply); - if (nf_ct_expect_related(exp_orig) != 0) + if (nf_ct_expect_related(exp_orig, 0) != 0) goto out_put_both; - if (nf_ct_expect_related(exp_reply) != 0) + if (nf_ct_expect_related(exp_reply, 0) != 0) goto out_unexpect_orig; /* Add GRE keymap entries */ diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index c2eb365f1723..5b05487a60d2 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * ip_conntrack_proto_gre.c - Version 3.0 - * * Connection tracking protocol helper module for GRE. * * GRE is a generic encapsulation protocol, which is generally not very diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c index dd53e2b20f6b..097deba7441a 100644 --- a/net/netfilter/nf_conntrack_proto_icmp.c +++ b/net/netfilter/nf_conntrack_proto_icmp.c @@ -215,7 +215,7 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl, return -NF_ACCEPT; } - /* See ip_conntrack_proto_tcp.c */ + /* See nf_conntrack_proto_tcp.c */ if (state->net->ct.sysctl_checksum && state->hook == NF_INET_PRE_ROUTING && nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) { diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index d5fdfa00d683..85c1f8c213b0 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -472,6 +472,7 @@ static bool tcp_in_window(const struct nf_conn *ct, struct ip_ct_tcp_state *receiver = &state->seen[!dir]; const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; __u32 seq, ack, sack, end, win, swin; + u16 win_raw; s32 receiver_offset; bool res, in_recv_win; @@ -480,7 +481,8 @@ static bool tcp_in_window(const struct nf_conn *ct, */ seq = ntohl(tcph->seq); ack = sack = ntohl(tcph->ack_seq); - win = ntohs(tcph->window); + win_raw = ntohs(tcph->window); + win = win_raw; end = segment_seq_plus_len(seq, skb->len, dataoff, tcph); if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM) @@ -655,14 +657,14 @@ static bool tcp_in_window(const struct nf_conn *ct, && state->last_seq == seq && state->last_ack == ack && state->last_end == end - && state->last_win == win) + && state->last_win == win_raw) state->retrans++; else { state->last_dir = dir; state->last_seq = seq; state->last_ack = ack; state->last_end = end; - state->last_win = win; + state->last_win = win_raw; state->retrans = 0; } } diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c index 81448c3db661..1aebd6569d4e 100644 --- a/net/netfilter/nf_conntrack_sane.c +++ b/net/netfilter/nf_conntrack_sane.c @@ -153,7 +153,7 @@ static int help(struct sk_buff *skb, nf_ct_dump_tuple(&exp->tuple); /* Can't expect this? Best to drop packet now. */ - if (nf_ct_expect_related(exp) != 0) { + if (nf_ct_expect_related(exp, 0) != 0) { nf_ct_helper_log(skb, ct, "cannot add expectation"); ret = NF_DROP; } diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 107251731809..b83dc9bf0a5d 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -977,11 +977,15 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff, /* -EALREADY handling works around end-points that send * SDP messages with identical port but different media type, * we pretend expectation was set up. + * It also works in the case that SDP messages are sent with + * identical expect tuples but for different master conntracks. */ - int errp = nf_ct_expect_related(rtp_exp); + int errp = nf_ct_expect_related(rtp_exp, + NF_CT_EXP_F_SKIP_MASTER); if (errp == 0 || errp == -EALREADY) { - int errcp = nf_ct_expect_related(rtcp_exp); + int errcp = nf_ct_expect_related(rtcp_exp, + NF_CT_EXP_F_SKIP_MASTER); if (errcp == 0 || errcp == -EALREADY) ret = NF_ACCEPT; @@ -1296,7 +1300,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff, ret = hooks->expect(skb, protoff, dataoff, dptr, datalen, exp, matchoff, matchlen); else { - if (nf_ct_expect_related(exp) != 0) { + if (nf_ct_expect_related(exp, 0) != 0) { nf_ct_helper_log(skb, ct, "cannot add expectation"); ret = NF_DROP; } else diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c index df6d6d61bd58..80ee53f29f68 100644 --- a/net/netfilter/nf_conntrack_tftp.c +++ b/net/netfilter/nf_conntrack_tftp.c @@ -78,7 +78,7 @@ static int tftp_help(struct sk_buff *skb, nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook); if (nf_nat_tftp && ct->status & IPS_NAT_MASK) ret = nf_nat_tftp(skb, ctinfo, exp); - else if (nf_ct_expect_related(exp) != 0) { + else if (nf_ct_expect_related(exp, 0) != 0) { nf_ct_helper_log(skb, ct, "cannot add expectation"); ret = NF_DROP; } diff --git a/net/netfilter/nf_nat_amanda.c b/net/netfilter/nf_nat_amanda.c index a352604d6186..3bc7e0854efe 100644 --- a/net/netfilter/nf_nat_amanda.c +++ b/net/netfilter/nf_nat_amanda.c @@ -48,7 +48,7 @@ static unsigned int help(struct sk_buff *skb, int res; exp->tuple.dst.u.tcp.port = htons(port); - res = nf_ct_expect_related(exp); + res = nf_ct_expect_related(exp, 0); if (res == 0) break; else if (res != -EBUSY) { diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 9ab410455992..3f6023ed4966 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -519,7 +519,7 @@ another_round: * and NF_INET_LOCAL_OUT, we change the destination to map into the * range. It might not be possible to get a unique tuple, but we try. * At worst (or if we race), we will end up with a final duplicate in - * __ip_conntrack_confirm and drop the packet. */ + * __nf_conntrack_confirm and drop the packet. */ static void get_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig_tuple, diff --git a/net/netfilter/nf_nat_ftp.c b/net/netfilter/nf_nat_ftp.c index d48484a9d52d..aace6768a64e 100644 --- a/net/netfilter/nf_nat_ftp.c +++ b/net/netfilter/nf_nat_ftp.c @@ -91,7 +91,7 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb, int ret; exp->tuple.dst.u.tcp.port = htons(port); - ret = nf_ct_expect_related(exp); + ret = nf_ct_expect_related(exp, 0); if (ret == 0) break; else if (ret != -EBUSY) { diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c index dfb7ef8845bd..c691ab8d234c 100644 --- a/net/netfilter/nf_nat_irc.c +++ b/net/netfilter/nf_nat_irc.c @@ -53,7 +53,7 @@ static unsigned int help(struct sk_buff *skb, int ret; exp->tuple.dst.u.tcp.port = htons(port); - ret = nf_ct_expect_related(exp); + ret = nf_ct_expect_related(exp, 0); if (ret == 0) break; else if (ret != -EBUSY) { diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c index e338d91980d8..f0a735e86851 100644 --- a/net/netfilter/nf_nat_sip.c +++ b/net/netfilter/nf_nat_sip.c @@ -414,7 +414,7 @@ static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff, int ret; exp->tuple.dst.u.udp.port = htons(port); - ret = nf_ct_expect_related(exp); + ret = nf_ct_expect_related(exp, NF_CT_EXP_F_SKIP_MASTER); if (ret == 0) break; else if (ret != -EBUSY) { @@ -607,7 +607,8 @@ static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff, int ret; rtp_exp->tuple.dst.u.udp.port = htons(port); - ret = nf_ct_expect_related(rtp_exp); + ret = nf_ct_expect_related(rtp_exp, + NF_CT_EXP_F_SKIP_MASTER); if (ret == -EBUSY) continue; else if (ret < 0) { @@ -615,7 +616,8 @@ static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff, break; } rtcp_exp->tuple.dst.u.udp.port = htons(port + 1); - ret = nf_ct_expect_related(rtcp_exp); + ret = nf_ct_expect_related(rtcp_exp, + NF_CT_EXP_F_SKIP_MASTER); if (ret == 0) break; else if (ret == -EBUSY) { diff --git a/net/netfilter/nf_nat_tftp.c b/net/netfilter/nf_nat_tftp.c index 833a11f68031..1a591132d6eb 100644 --- a/net/netfilter/nf_nat_tftp.c +++ b/net/netfilter/nf_nat_tftp.c @@ -30,7 +30,7 @@ static unsigned int help(struct sk_buff *skb, = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; exp->dir = IP_CT_DIR_REPLY; exp->expectfn = nf_nat_follow_master; - if (nf_ct_expect_related(exp) != 0) { + if (nf_ct_expect_related(exp, 0) != 0) { nf_ct_helper_log(skb, exp->master, "cannot add expectation"); return NF_DROP; } diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index b101f187eda8..c769462a839e 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -470,7 +470,7 @@ synproxy_send_client_synack(struct net *net, struct iphdr *iph, *niph; struct tcphdr *nth; unsigned int tcp_hdr_size; - u16 mss = opts->mss; + u16 mss = opts->mss_encode; iph = ip_hdr(skb); @@ -687,7 +687,7 @@ ipv4_synproxy_hook(void *priv, struct sk_buff *skb, state = &ct->proto.tcp; switch (state->state) { case TCP_CONNTRACK_CLOSE: - if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { + if (th->rst && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) { nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq) + 1); break; @@ -884,7 +884,7 @@ synproxy_send_client_synack_ipv6(struct net *net, struct ipv6hdr *iph, *niph; struct tcphdr *nth; unsigned int tcp_hdr_size; - u16 mss = opts->mss; + u16 mss = opts->mss_encode; iph = ipv6_hdr(skb); @@ -1111,7 +1111,7 @@ ipv6_synproxy_hook(void *priv, struct sk_buff *skb, state = &ct->proto.tcp; switch (state->state) { case TCP_CONNTRACK_CLOSE: - if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { + if (th->rst && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) { nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq) + 1); break; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index ed17a7c29b86..605a7cfe7ca7 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1662,7 +1662,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, chain->flags |= NFT_BASE_CHAIN | flags; basechain->policy = NF_ACCEPT; - INIT_LIST_HEAD(&basechain->cb_list); + flow_block_init(&basechain->flow_block); } else { chain = kzalloc(sizeof(*chain), GFP_KERNEL); if (chain == NULL) @@ -1900,6 +1900,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, if (nla[NFTA_CHAIN_FLAGS]) flags = ntohl(nla_get_be32(nla[NFTA_CHAIN_FLAGS])); + else if (chain) + flags = chain->flags; nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla); diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index 2c3302845f67..64f5fd5f240e 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -116,7 +116,7 @@ static int nft_setup_cb_call(struct nft_base_chain *basechain, struct flow_block_cb *block_cb; int err; - list_for_each_entry(block_cb, &basechain->cb_list, list) { + list_for_each_entry(block_cb, &basechain->flow_block.cb_list, list) { err = block_cb->cb(type, type_data, block_cb->cb_priv); if (err < 0) return err; @@ -154,7 +154,7 @@ static int nft_flow_offload_rule(struct nft_trans *trans, static int nft_flow_offload_bind(struct flow_block_offload *bo, struct nft_base_chain *basechain) { - list_splice(&bo->cb_list, &basechain->cb_list); + list_splice(&bo->cb_list, &basechain->flow_block.cb_list); return 0; } @@ -198,6 +198,7 @@ static int nft_flow_offload_chain(struct nft_trans *trans, return -EOPNOTSUPP; bo.command = cmd; + bo.block = &basechain->flow_block; bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; bo.extack = &extack; INIT_LIST_HEAD(&bo.cb_list); diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 92077d459109..4abbb452cf6c 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -578,7 +578,7 @@ static int nfnetlink_bind(struct net *net, int group) ss = nfnetlink_get_subsys(type << 8); rcu_read_unlock(); if (!ss) - request_module("nfnetlink-subsys-%d", type); + request_module_nowait("nfnetlink-subsys-%d", type); return 0; } #endif diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c index 3fd540b2c6ba..b5d5d071d765 100644 --- a/net/netfilter/nft_chain_filter.c +++ b/net/netfilter/nft_chain_filter.c @@ -193,7 +193,7 @@ static inline void nft_chain_filter_inet_init(void) {} static inline void nft_chain_filter_inet_fini(void) {} #endif /* CONFIG_NF_TABLES_IPV6 */ -#ifdef CONFIG_NF_TABLES_BRIDGE +#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE) static unsigned int nft_do_chain_bridge(void *priv, struct sk_buff *skb, diff --git a/net/netfilter/nft_chain_nat.c b/net/netfilter/nft_chain_nat.c index 2f89bde3c61c..ff9ac8ae0031 100644 --- a/net/netfilter/nft_chain_nat.c +++ b/net/netfilter/nft_chain_nat.c @@ -142,3 +142,6 @@ MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat"); #ifdef CONFIG_NF_TABLES_IPV6 MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat"); #endif +#ifdef CONFIG_NF_TABLES_INET +MODULE_ALIAS_NFT_CHAIN(1, "nat"); /* NFPROTO_INET */ +#endif diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 827ab6196df9..46ca8bcca1bd 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -1252,7 +1252,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj, priv->l4proto, NULL, &priv->dport); exp->timeout.expires = jiffies + priv->timeout * HZ; - if (nf_ct_expect_related(exp) != 0) + if (nf_ct_expect_related(exp, 0) != 0) regs->verdict.code = NF_DROP; } diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index fe93e731dc7f..b836d550b919 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -129,7 +129,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx, priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]); priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS])); - if (priv->modulus <= 1) + if (priv->modulus < 1) return -ERANGE; if (priv->offset + priv->modulus - 1 < priv->offset) diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 76866f77e343..f1b1d948c07b 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -546,7 +546,7 @@ nft_meta_select_ops(const struct nft_ctx *ctx, if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG]) return ERR_PTR(-EINVAL); -#ifdef CONFIG_NF_TABLES_BRIDGE +#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE) && IS_MODULE(CONFIG_NFT_BRIDGE_META) if (ctx->family == NFPROTO_BRIDGE) return ERR_PTR(-EAGAIN); #endif diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c index 8487eeff5c0e..43eeb1f609f1 100644 --- a/net/netfilter/nft_redir.c +++ b/net/netfilter/nft_redir.c @@ -291,4 +291,4 @@ module_exit(nft_redir_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>"); -MODULE_ALIAS_NFT_EXPR("nat"); +MODULE_ALIAS_NFT_EXPR("redir"); diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c index 80060ade8a5b..928e661d1517 100644 --- a/net/netfilter/nft_synproxy.c +++ b/net/netfilter/nft_synproxy.c @@ -31,6 +31,8 @@ static void nft_synproxy_tcp_options(struct synproxy_options *opts, opts->options |= NF_SYNPROXY_OPT_ECN; opts->options &= priv->info.options; + opts->mss_encode = opts->mss; + opts->mss = info->mss; if (opts->options & NF_SYNPROXY_OPT_TIMESTAMP) synproxy_init_timestamp_cookie(info, opts); else diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index dca3b1e2acf0..bc89e16e0505 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -59,7 +59,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies) void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, const struct sk_buff *skb) { - struct flow_stats *stats; + struct sw_flow_stats *stats; unsigned int cpu = smp_processor_id(); int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); @@ -87,7 +87,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, if (likely(flow->stats_last_writer != -1) && likely(!rcu_access_pointer(flow->stats[cpu]))) { /* Try to allocate CPU-specific stats. */ - struct flow_stats *new_stats; + struct sw_flow_stats *new_stats; new_stats = kmem_cache_alloc_node(flow_stats_cache, @@ -134,7 +134,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow, /* We open code this to make sure cpu 0 is always considered */ for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { - struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]); + struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]); if (stats) { /* Local CPU may write on non-local stats, so we must @@ -158,7 +158,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow) /* We open code this to make sure cpu 0 is always considered */ for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { - struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]); + struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]); if (stats) { spin_lock_bh(&stats->lock); diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 3e2cc2202d66..a5506e2d4b7a 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -194,7 +194,7 @@ struct sw_flow_actions { struct nlattr actions[]; }; -struct flow_stats { +struct sw_flow_stats { u64 packet_count; /* Number of packets matched. */ u64 byte_count; /* Number of bytes matched. */ unsigned long used; /* Last used time (in jiffies). */ @@ -216,7 +216,7 @@ struct sw_flow { struct cpumask cpu_used_mask; struct sw_flow_mask *mask; struct sw_flow_actions __rcu *sf_acts; - struct flow_stats __rcu *stats[]; /* One for each CPU. First one + struct sw_flow_stats __rcu *stats[]; /* One for each CPU. First one * is allocated at flow creation time, * the rest are allocated on demand * while holding the 'stats[0].lock'. diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index 988fd8a94e43..cf3582c5ed70 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -66,7 +66,7 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, struct sw_flow *ovs_flow_alloc(void) { struct sw_flow *flow; - struct flow_stats *stats; + struct sw_flow_stats *stats; flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); if (!flow) @@ -110,7 +110,7 @@ static void flow_free(struct sw_flow *flow) for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) if (flow->stats[cpu]) kmem_cache_free(flow_stats_cache, - (struct flow_stats __force *)flow->stats[cpu]); + (struct sw_flow_stats __force *)flow->stats[cpu]); kmem_cache_free(flow_cache, flow); } @@ -712,13 +712,13 @@ int ovs_flow_init(void) flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) + (nr_cpu_ids - * sizeof(struct flow_stats *)), + * sizeof(struct sw_flow_stats *)), 0, 0, NULL); if (flow_cache == NULL) return -ENOMEM; flow_stats_cache - = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats), + = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats), 0, SLAB_HWCACHE_ALIGN, NULL); if (flow_stats_cache == NULL) { kmem_cache_destroy(flow_cache); diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index d144233423c5..efd3cfb80a2a 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -691,6 +691,8 @@ static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev, if (!indr_dev->block) return; + bo.block = &indr_dev->block->flow_block; + indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, &bo); tcf_block_setup(indr_dev->block, &bo); @@ -775,6 +777,7 @@ static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev, .command = command, .binder_type = ei->binder_type, .net = dev_net(dev), + .block = &block->flow_block, .block_shared = tcf_block_shared(block), .extack = extack, }; @@ -810,6 +813,7 @@ static int tcf_block_offload_cmd(struct tcf_block *block, bo.net = dev_net(dev); bo.command = command; bo.binder_type = ei->binder_type; + bo.block = &block->flow_block; bo.block_shared = tcf_block_shared(block); bo.extack = extack; INIT_LIST_HEAD(&bo.cb_list); @@ -987,8 +991,8 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, return ERR_PTR(-ENOMEM); } mutex_init(&block->lock); + flow_block_init(&block->flow_block); INIT_LIST_HEAD(&block->chain_list); - INIT_LIST_HEAD(&block->cb_list); INIT_LIST_HEAD(&block->owner_list); INIT_LIST_HEAD(&block->chain0.filter_chain_list); @@ -1514,7 +1518,7 @@ void tcf_block_put(struct tcf_block *block) EXPORT_SYMBOL(tcf_block_put); static int -tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb, +tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, void *cb_priv, bool add, bool offload_in_use, struct netlink_ext_ack *extack) { @@ -1570,7 +1574,7 @@ static int tcf_block_bind(struct tcf_block *block, i++; } - list_splice(&bo->cb_list, &block->cb_list); + list_splice(&bo->cb_list, &block->flow_block.cb_list); return 0; @@ -2152,7 +2156,9 @@ replay: tfilter_notify(net, skb, n, tp, block, q, parent, fh, RTM_NEWTFILTER, false, rtnl_held); tfilter_put(tp, fh); - q->flags &= ~TCQ_F_CAN_BYPASS; + /* q pointer is NULL for shared blocks */ + if (q) + q->flags &= ~TCQ_F_CAN_BYPASS; } errout: @@ -3156,7 +3162,7 @@ int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, if (block->nooffloaddevcnt && err_stop) return -EOPNOTSUPP; - list_for_each_entry(block_cb, &block->cb_list, list) { + list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { err = block_cb->cb(type, type_data, block_cb->cb_priv); if (err) { if (err_stop) diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 691f71830134..3f7a9c02b70c 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -651,7 +651,7 @@ skip: } } -static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, +static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, void *cb_priv, struct netlink_ext_ack *extack) { struct cls_bpf_head *head = rtnl_dereference(tp->root); diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 38d6e85693fc..054123742e32 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1800,7 +1800,7 @@ fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add) return NULL; } -static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, +static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, void *cb_priv, struct netlink_ext_ack *extack) { struct tcf_block *block = tp->chain->block; diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index a30d2f8feb32..455ea2793f9b 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -282,7 +282,7 @@ skip: arg->count++; } -static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, +static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, void *cb_priv, struct netlink_ext_ack *extack) { struct cls_mall_head *head = rtnl_dereference(tp->root); diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index be9e46c77e8b..8614088edd1b 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -1152,7 +1152,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg, } static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, - bool add, tc_setup_cb_t *cb, void *cb_priv, + bool add, flow_setup_cb_t *cb, void *cb_priv, struct netlink_ext_ack *extack) { struct tc_cls_u32_offload cls_u32 = {}; @@ -1172,7 +1172,7 @@ static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, } static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, - bool add, tc_setup_cb_t *cb, void *cb_priv, + bool add, flow_setup_cb_t *cb, void *cb_priv, struct netlink_ext_ack *extack) { struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); @@ -1213,7 +1213,7 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, return 0; } -static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, +static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, void *cb_priv, struct netlink_ext_ack *extack) { struct tc_u_common *tp_c = tp->data; diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index f345662890a6..ca8ac96d22a9 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -476,7 +476,7 @@ static void tipc_topsrv_accept(struct work_struct *work) } } -/* tipc_toprsv_listener_data_ready - interrupt callback with connection request +/* tipc_topsrv_listener_data_ready - interrupt callback with connection request * The queued job is launched into tipc_topsrv_accept() */ static void tipc_topsrv_listener_data_ready(struct sock *sk) diff --git a/samples/vfio-mdev/mdpy-defs.h b/samples/vfio-mdev/mdpy-defs.h index 96b3b1b49d34..eb26421b6429 100644 --- a/samples/vfio-mdev/mdpy-defs.h +++ b/samples/vfio-mdev/mdpy-defs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Simple pci display device. * diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 77c742fa4fb1..4b0432e095ae 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -190,9 +190,6 @@ echo-cmd = $(if $($(quiet)cmd_$(1)),\ # printing commands cmd = @set -e; $(echo-cmd) $(cmd_$(1)) -# Add $(obj)/ for paths that are not absolute -objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o))) - ### # if_changed - execute command if any prerequisite is newer than # target, or command line has changed diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include index 8a5c4d645eb1..4bbf4fc163a2 100644 --- a/scripts/Kconfig.include +++ b/scripts/Kconfig.include @@ -25,7 +25,7 @@ failure = $(if-success,$(1),n,y) # $(cc-option,<flag>) # Return y if the compiler supports <flag>, n otherwise -cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null) +cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null) # $(ld-option,<flag>) # Return y if the linker supports <flag>, n otherwise diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 5241d0751eb0..41c50f9461e5 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -45,7 +45,6 @@ subdir-ym := $(sort $(subdir-y) $(subdir-m)) multi-used-y := $(sort $(foreach m,$(obj-y), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))), $(m)))) multi-used-m := $(sort $(foreach m,$(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))), $(m)))) multi-used := $(multi-used-y) $(multi-used-m) -single-used-m := $(sort $(filter-out $(multi-used-m),$(obj-m))) # $(subdir-obj-y) is the list of objects in $(obj-y) which uses dir/ to # tell kbuild to descend @@ -91,7 +90,6 @@ lib-y := $(addprefix $(obj)/,$(lib-y)) subdir-obj-y := $(addprefix $(obj)/,$(subdir-obj-y)) real-obj-y := $(addprefix $(obj)/,$(real-obj-y)) real-obj-m := $(addprefix $(obj)/,$(real-obj-m)) -single-used-m := $(addprefix $(obj)/,$(single-used-m)) multi-used-m := $(addprefix $(obj)/,$(multi-used-m)) subdir-ym := $(addprefix $(obj)/,$(subdir-ym)) diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index 6b19c1a4eae5..92ed02d7cd5e 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -38,12 +38,39 @@ # symbols in the final module linking stage # KBUILD_MODPOST_NOFINAL can be set to skip the final link of modules. # This is solely useful to speed up test compiles -PHONY := _modpost -_modpost: __modpost + +PHONY := __modpost +__modpost: include include/config/auto.conf include scripts/Kbuild.include +kernelsymfile := $(objtree)/Module.symvers +modulesymfile := $(firstword $(KBUILD_EXTMOD))/Module.symvers + +MODPOST = scripts/mod/modpost \ + $(if $(CONFIG_MODVERSIONS),-m) \ + $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a) \ + $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \ + $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \ + $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \ + $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \ + $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \ + $(if $(KBUILD_MODPOST_WARN),-w) + +ifdef MODPOST_VMLINUX + +__modpost: vmlinux.o + +quiet_cmd_modpost = MODPOST $@ + cmd_modpost = $(MODPOST) $@ + +PHONY += vmlinux.o +vmlinux.o: + $(call cmd,modpost) + +else + # When building external modules load the Kbuild file to retrieve EXTRA_SYMBOLS info ifneq ($(KBUILD_EXTMOD),) @@ -58,50 +85,27 @@ endif include scripts/Makefile.lib -kernelsymfile := $(objtree)/Module.symvers -modulesymfile := $(firstword $(KBUILD_EXTMOD))/Module.symvers - modorder := $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)modules.order -# Step 1), find all modules listed in modules.order -ifdef CONFIG_MODULES +# find all modules listed in modules.order modules := $(sort $(shell cat $(modorder))) -endif # Stop after building .o files if NOFINAL is set. Makes compile tests quicker -_modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules)) - -# Step 2), invoke modpost -# Includes step 3,4 -modpost = scripts/mod/modpost \ - $(if $(CONFIG_MODVERSIONS),-m) \ - $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \ - $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \ - $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \ - $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \ - $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \ - $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \ - $(if $(KBUILD_MODPOST_WARN),-w) - -MODPOST_OPT=$(subst -i,-n,$(filter -i,$(MAKEFLAGS))) +__modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules)) + @: -# We can go over command line length here, so be careful. -quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules - cmd_modpost = sed 's/ko$$/o/' $(modorder) | $(modpost) $(MODPOST_OPT) -s -T - +MODPOST += $(subst -i,-n,$(filter -i,$(MAKEFLAGS))) -s -T - $(wildcard vmlinux) -PHONY += __modpost -__modpost: $(modules:.ko=.o) FORCE - $(call cmd,modpost) $(wildcard vmlinux) - -quiet_cmd_kernel-mod = MODPOST $@ - cmd_kernel-mod = $(modpost) $@ +# We can go over command line length here, so be careful. +quiet_cmd_modpost = MODPOST $(words $(modules)) modules + cmd_modpost = sed 's/ko$$/o/' $(modorder) | $(MODPOST) -vmlinux.o: FORCE - $(call cmd,kernel-mod) +PHONY += modules-modpost +modules-modpost: + $(call cmd,modpost) # Declare generated files as targets for modpost -$(modules:.ko=.mod.c): __modpost ; - +$(modules:.ko=.mod.c): modules-modpost # Step 5), compile all *.mod.c files @@ -145,10 +149,10 @@ FORCE: # optimization, we don't need to read them if the target does not # exist, we will rebuild anyway in that case. -cmd_files := $(wildcard $(foreach f,$(sort $(targets)),$(dir $(f)).$(notdir $(f)).cmd)) +existing-targets := $(wildcard $(sort $(targets))) + +-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd) -ifneq ($(cmd_files),) - include $(cmd_files) endif .PHONY: $(PHONY) diff --git a/scripts/gen_compile_commands.py b/scripts/gen_compile_commands.py index 7915823b92a5..c458696ef3a7 100755 --- a/scripts/gen_compile_commands.py +++ b/scripts/gen_compile_commands.py @@ -21,9 +21,9 @@ _LINE_PATTERN = r'^cmd_[^ ]*\.o := (.* )([^ ]*\.c)$' _VALID_LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] # A kernel build generally has over 2000 entries in its compile_commands.json -# database. If this code finds 500 or fewer, then warn the user that they might +# database. If this code finds 300 or fewer, then warn the user that they might # not have all the .cmd files, and they might need to compile the kernel. -_LOW_COUNT_THRESHOLD = 500 +_LOW_COUNT_THRESHOLD = 300 def parse_arguments(): diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh index 47f6f3ea0771..bbaf29386995 100755 --- a/scripts/headers_install.sh +++ b/scripts/headers_install.sh @@ -23,6 +23,12 @@ TMPFILE=$OUTFILE.tmp trap 'rm -f $OUTFILE $TMPFILE' EXIT +# SPDX-License-Identifier with GPL variants must have "WITH Linux-syscall-note" +if [ -n "$(sed -n -e "/SPDX-License-Identifier:.*GPL-/{/WITH Linux-syscall-note/!p}" $INFILE)" ]; then + echo "error: $INFILE: missing \"WITH Linux-syscall-note\" for SPDX-License-Identifier" >&2 + exit 1 +fi + sed -E -e ' s/([[:space:](])(__user|__force|__iomem)[[:space:]]/\1/g s/__attribute_const__([[:space:]]|$)/\1/g diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 1134892599da..3569d2dec37c 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -848,6 +848,7 @@ int conf_write(const char *name) const char *str; char tmpname[PATH_MAX + 1], oldname[PATH_MAX + 1]; char *env; + int i; bool need_newline = false; if (!name) @@ -930,6 +931,9 @@ next: } fclose(out); + for_all_symbols(i, sym) + sym->flags &= ~SYMBOL_WRITTEN; + if (*tmpname) { if (is_same(name, tmpname)) { conf_message("No change to %s", name); diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index a7124f895b24..915775eb2921 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -210,7 +210,7 @@ info LD vmlinux.o modpost_link vmlinux.o # modpost vmlinux.o to check for section mismatches -${MAKE} -f "${srctree}/scripts/Makefile.modpost" vmlinux.o +${MAKE} -f "${srctree}/scripts/Makefile.modpost" MODPOST_VMLINUX=1 info MODINFO modules.builtin.modinfo ${OBJCOPY} -j .modinfo -O binary vmlinux.o modules.builtin.modinfo diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install index f230e65329a2..3b638c0e1a4f 100755 --- a/scripts/sphinx-pre-install +++ b/scripts/sphinx-pre-install @@ -83,6 +83,17 @@ sub check_missing(%) foreach my $prog (sort keys %missing) { my $is_optional = $missing{$prog}; + # At least on some LTS distros like CentOS 7, texlive doesn't + # provide all packages we need. When such distros are + # detected, we have to disable PDF output. + # + # So, we need to ignore the packages that distros would + # need for LaTeX to work + if ($is_optional == 2 && !$pdf) { + $optional--; + next; + } + if ($is_optional) { print "Warning: better to also install \"$prog\".\n"; } else { @@ -333,10 +344,13 @@ sub give_debian_hints() if ($pdf) { check_missing_file("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", - "fonts-dejavu", 1); + "fonts-dejavu", 2); + + check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc", + "fonts-noto-cjk", 2); } - check_program("dvipng", 1) if ($pdf); + check_program("dvipng", 2) if ($pdf); check_missing(\%map); return if (!$need && !$optional); @@ -363,6 +377,7 @@ sub give_redhat_hints() my @fedora_tex_pkgs = ( "texlive-collection-fontsrecommended", "texlive-collection-latex", + "texlive-xecjk", "dejavu-sans-fonts", "dejavu-serif-fonts", "dejavu-sans-mono-fonts", @@ -371,22 +386,45 @@ sub give_redhat_hints() # # Checks valid for RHEL/CentOS version 7.x. # - if (! $system_release =~ /Fedora/) { + my $old = 0; + my $rel; + $rel = $1 if ($system_release =~ /release\s+(\d+)/); + + if (!($system_release =~ /Fedora/)) { $map{"virtualenv"} = "python-virtualenv"; - } - my $release; + if ($rel && $rel < 8) { + $old = 1; + $pdf = 0; + + printf("Note: texlive packages on RHEL/CENTOS <= 7 are incomplete. Can't support PDF output\n"); + printf("If you want to build PDF, please read:\n"); + printf("\thttps://www.systutorials.com/241660/how-to-install-tex-live-on-centos-7-linux/\n"); + } + } else { + if ($rel && $rel < 26) { + $old = 1; + } + } + if (!$rel) { + printf("Couldn't identify release number\n"); + $old = 1; + $pdf = 0; + } - $release = $1 if ($system_release =~ /Fedora\s+release\s+(\d+)/); + if ($pdf) { + check_missing_file("/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc", + "google-noto-sans-cjk-ttc-fonts", 2); + } - check_rpm_missing(\@fedora26_opt_pkgs, 1) if ($pdf && $release >= 26); - check_rpm_missing(\@fedora_tex_pkgs, 1) if ($pdf); - check_missing_tex(1) if ($pdf); + check_rpm_missing(\@fedora26_opt_pkgs, 2) if ($pdf && !$old); + check_rpm_missing(\@fedora_tex_pkgs, 2) if ($pdf); + check_missing_tex(2) if ($pdf); check_missing(\%map); return if (!$need && !$optional); - if ($release >= 18) { + if (!$old) { # dnf, for Fedora 18+ printf("You should run:\n\n\tsudo dnf install -y $install\n"); } else { @@ -425,8 +463,15 @@ sub give_opensuse_hints() "texlive-zapfding", ); - check_rpm_missing(\@suse_tex_pkgs, 1) if ($pdf); - check_missing_tex(1) if ($pdf); + $map{"latexmk"} = "texlive-latexmk-bin"; + + # FIXME: add support for installing CJK fonts + # + # I tried hard, but was unable to find a way to install + # "Noto Sans CJK SC" on openSUSE + + check_rpm_missing(\@suse_tex_pkgs, 2) if ($pdf); + check_missing_tex(2) if ($pdf); check_missing(\%map); return if (!$need && !$optional); @@ -450,7 +495,14 @@ sub give_mageia_hints() "texlive-fontsextra", ); - check_rpm_missing(\@tex_pkgs, 1) if ($pdf); + $map{"latexmk"} = "texlive-collection-basic"; + + if ($pdf) { + check_missing_file("/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc", + "google-noto-sans-cjk-ttc-fonts", 2); + } + + check_rpm_missing(\@tex_pkgs, 2) if ($pdf); check_missing(\%map); return if (!$need && !$optional); @@ -473,7 +525,13 @@ sub give_arch_linux_hints() "texlive-latexextra", "ttf-dejavu", ); - check_pacman_missing(\@archlinux_tex_pkgs, 1) if ($pdf); + check_pacman_missing(\@archlinux_tex_pkgs, 2) if ($pdf); + + if ($pdf) { + check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc", + "noto-fonts-cjk", 2); + } + check_missing(\%map); return if (!$need && !$optional); @@ -492,15 +550,31 @@ sub give_gentoo_hints() ); check_missing_file("/usr/share/fonts/dejavu/DejaVuSans.ttf", - "media-fonts/dejavu", 1) if ($pdf); + "media-fonts/dejavu", 2) if ($pdf); + + if ($pdf) { + check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJKsc-Regular.otf", + "media-fonts/noto-cjk", 2); + } check_missing(\%map); return if (!$need && !$optional); printf("You should run:\n\n"); - printf("\tsudo su -c 'echo \"media-gfx/imagemagick svg png\" > /etc/portage/package.use/imagemagick'\n"); - printf("\tsudo su -c 'echo \"media-gfx/graphviz cairo pdf\" > /etc/portage/package.use/graphviz'\n"); + + my $imagemagick = "media-gfx/imagemagick svg png"; + my $cairo = "media-gfx/graphviz cairo pdf"; + my $portage_imagemagick = "/etc/portage/package.use/imagemagick"; + my $portage_cairo = "/etc/portage/package.use/graphviz"; + + if (qx(cat $portage_imagemagick) ne "$imagemagick\n") { + printf("\tsudo su -c 'echo \"$imagemagick\" > $portage_imagemagick'\n") + } + if (qx(cat $portage_cairo) ne "$cairo\n") { + printf("\tsudo su -c 'echo \"$cairo\" > $portage_cairo'\n"); + } + printf("\tsudo emerge --ask $install\n"); } @@ -560,7 +634,7 @@ sub check_distros() my %map = ( "sphinx-build" => "sphinx" ); - check_missing_tex(1) if ($pdf); + check_missing_tex(2) if ($pdf); check_missing(\%map); print "I don't know distro $system_release.\n"; print "So, I can't provide you a hint with the install procedure.\n"; @@ -589,11 +663,13 @@ sub check_needs() check_program("make", 0); check_program("gcc", 0); check_python_module("sphinx_rtd_theme", 1) if (!$virtualenv); - check_program("xelatex", 1) if ($pdf); check_program("dot", 1); check_program("convert", 1); - check_program("rsvg-convert", 1) if ($pdf); - check_program("latexmk", 1) if ($pdf); + + # Extra PDF files - should use 2 for is_optional + check_program("xelatex", 2) if ($pdf); + check_program("rsvg-convert", 2) if ($pdf); + check_program("latexmk", 2) if ($pdf); check_distros(); diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening index a1ffe2eb4d5f..af4c979b38ee 100644 --- a/security/Kconfig.hardening +++ b/security/Kconfig.hardening @@ -61,6 +61,7 @@ choice config GCC_PLUGIN_STRUCTLEAK_BYREF bool "zero-init structs passed by reference (strong)" depends on GCC_PLUGINS + depends on !(KASAN && KASAN_STACK=1) select GCC_PLUGIN_STRUCTLEAK help Zero-initialize any structures on the stack that may @@ -70,9 +71,15 @@ choice exposures, like CVE-2017-1000410: https://git.kernel.org/linus/06e7e776ca4d3654 + As a side-effect, this keeps a lot of variables on the + stack that can otherwise be optimized out, so combining + this with CONFIG_KASAN_STACK can lead to a stack overflow + and is disallowed. + config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL bool "zero-init anything passed by reference (very strong)" depends on GCC_PLUGINS + depends on !(KASAN && KASAN_STACK=1) select GCC_PLUGIN_STRUCTLEAK help Zero-initialize any stack variables that may be passed diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 624ccc6ac744..f8efaa9f647c 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -272,6 +272,8 @@ static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2) return v; } +static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap); + /* * Initialize a policy database structure. */ @@ -319,8 +321,10 @@ static int policydb_init(struct policydb *p) out: hashtab_destroy(p->filename_trans); hashtab_destroy(p->range_tr); - for (i = 0; i < SYM_NUM; i++) + for (i = 0; i < SYM_NUM; i++) { + hashtab_map(p->symtab[i].table, destroy_f[i], NULL); hashtab_destroy(p->symtab[i].table); + } return rc; } diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c index e63a90ff2728..1f0a6eaa2d6a 100644 --- a/security/selinux/ss/sidtab.c +++ b/security/selinux/ss/sidtab.c @@ -286,6 +286,11 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context, ++count; } + /* bail out if we already reached max entries */ + rc = -EOVERFLOW; + if (count >= SIDTAB_MAX) + goto out_unlock; + /* insert context into new entry */ rc = -ENOMEM; dst = sidtab_do_lookup(s, count, 1); diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c index 7b977b753a03..7985dd8198b6 100644 --- a/sound/ac97/bus.c +++ b/sound/ac97/bus.c @@ -122,17 +122,12 @@ static int ac97_codec_add(struct ac97_controller *ac97_ctrl, int idx, vendor_id); ret = device_add(&codec->dev); - if (ret) - goto err_free_codec; + if (ret) { + put_device(&codec->dev); + return ret; + } return 0; -err_free_codec: - of_node_put(codec->dev.of_node); - put_device(&codec->dev); - kfree(codec); - ac97_ctrl->codecs[idx] = NULL; - - return ret; } unsigned int snd_ac97_bus_scan_one(struct ac97_controller *adrv, diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index 99b882158705..41905afada63 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c @@ -574,10 +574,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg) stream->metadata_set = false; stream->next_track = false; - if (stream->direction == SND_COMPRESS_PLAYBACK) - stream->runtime->state = SNDRV_PCM_STATE_SETUP; - else - stream->runtime->state = SNDRV_PCM_STATE_PREPARED; + stream->runtime->state = SNDRV_PCM_STATE_SETUP; } else { return -EPERM; } @@ -693,8 +690,17 @@ static int snd_compr_start(struct snd_compr_stream *stream) { int retval; - if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED) + switch (stream->runtime->state) { + case SNDRV_PCM_STATE_SETUP: + if (stream->direction != SND_COMPRESS_CAPTURE) + return -EPERM; + break; + case SNDRV_PCM_STATE_PREPARED: + break; + default: return -EPERM; + } + retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START); if (!retval) stream->runtime->state = SNDRV_PCM_STATE_RUNNING; @@ -705,9 +711,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream) { int retval; - if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED || - stream->runtime->state == SNDRV_PCM_STATE_SETUP) + switch (stream->runtime->state) { + case SNDRV_PCM_STATE_OPEN: + case SNDRV_PCM_STATE_SETUP: + case SNDRV_PCM_STATE_PREPARED: return -EPERM; + default: + break; + } + retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP); if (!retval) { snd_compr_drain_notify(stream); @@ -795,9 +807,17 @@ static int snd_compr_drain(struct snd_compr_stream *stream) { int retval; - if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED || - stream->runtime->state == SNDRV_PCM_STATE_SETUP) + switch (stream->runtime->state) { + case SNDRV_PCM_STATE_OPEN: + case SNDRV_PCM_STATE_SETUP: + case SNDRV_PCM_STATE_PREPARED: + case SNDRV_PCM_STATE_PAUSED: return -EPERM; + case SNDRV_PCM_STATE_XRUN: + return -EPIPE; + default: + break; + } retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN); if (retval) { @@ -817,6 +837,10 @@ static int snd_compr_next_track(struct snd_compr_stream *stream) if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) return -EPERM; + /* next track doesn't have any meaning for capture streams */ + if (stream->direction == SND_COMPRESS_CAPTURE) + return -EPERM; + /* you can signal next track if this is intended to be a gapless stream * and current track metadata is set */ @@ -834,9 +858,23 @@ static int snd_compr_next_track(struct snd_compr_stream *stream) static int snd_compr_partial_drain(struct snd_compr_stream *stream) { int retval; - if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED || - stream->runtime->state == SNDRV_PCM_STATE_SETUP) + + switch (stream->runtime->state) { + case SNDRV_PCM_STATE_OPEN: + case SNDRV_PCM_STATE_SETUP: + case SNDRV_PCM_STATE_PREPARED: + case SNDRV_PCM_STATE_PAUSED: + return -EPERM; + case SNDRV_PCM_STATE_XRUN: + return -EPIPE; + default: + break; + } + + /* partial drain doesn't have any meaning for capture streams */ + if (stream->direction == SND_COMPRESS_CAPTURE) return -EPERM; + /* stream can be drained only when next track has been signalled */ if (stream->next_track == false) return -EPERM; diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 860543a4c840..703857aab00f 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -77,7 +77,7 @@ void snd_pcm_group_init(struct snd_pcm_group *group) spin_lock_init(&group->lock); mutex_init(&group->mutex); INIT_LIST_HEAD(&group->substreams); - refcount_set(&group->refs, 0); + refcount_set(&group->refs, 1); } /* define group lock helpers */ @@ -1096,8 +1096,7 @@ static void snd_pcm_group_unref(struct snd_pcm_group *group, if (!group) return; - do_free = refcount_dec_and_test(&group->refs) && - list_empty(&group->substreams); + do_free = refcount_dec_and_test(&group->refs); snd_pcm_group_unlock(group, substream->pcm->nonatomic); if (do_free) kfree(group); @@ -1874,6 +1873,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, if (!to_check) break; /* all drained */ init_waitqueue_entry(&wait, current); + set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&to_check->sleep, &wait); snd_pcm_stream_unlock_irq(substream); if (runtime->no_period_wakeup) @@ -1886,7 +1886,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, } tout = msecs_to_jiffies(tout * 1000); } - tout = schedule_timeout_interruptible(tout); + tout = schedule_timeout(tout); snd_pcm_stream_lock_irq(substream); group = snd_pcm_stream_group_ref(substream); @@ -2020,6 +2020,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) snd_pcm_group_lock_irq(target_group, nonatomic); snd_pcm_stream_lock(substream1); snd_pcm_group_assign(substream1, target_group); + refcount_inc(&target_group->refs); snd_pcm_stream_unlock(substream1); snd_pcm_group_unlock_irq(target_group, nonatomic); _end: @@ -2056,13 +2057,14 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream) snd_pcm_group_lock_irq(group, nonatomic); relink_to_local(substream); + refcount_dec(&group->refs); /* detach the last stream, too */ if (list_is_singular(&group->substreams)) { relink_to_local(list_first_entry(&group->substreams, struct snd_pcm_substream, link_list)); - do_free = !refcount_read(&group->refs); + do_free = refcount_dec_and_test(&group->refs); } snd_pcm_group_unlock_irq(group, nonatomic); diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index 1192c7561d62..3c2db3816029 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c @@ -136,10 +136,12 @@ int snd_hdac_i915_init(struct hdac_bus *bus) if (!acomp) return -ENODEV; if (!acomp->ops) { - request_module("i915"); - /* 60s timeout */ - wait_for_completion_timeout(&bind_complete, - msecs_to_jiffies(60 * 1000)); + if (!IS_ENABLED(CONFIG_MODULES) || + !request_module("i915")) { + /* 60s timeout */ + wait_for_completion_timeout(&bind_complete, + msecs_to_jiffies(60 * 1000)); + } } if (!acomp->ops) { dev_info(bus->dev, "couldn't bind with audio component\n"); diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index e30e86ca6b72..51f10ed9bc43 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2942,7 +2942,7 @@ static int hda_codec_runtime_resume(struct device *dev) static int hda_codec_force_resume(struct device *dev) { struct hda_codec *codec = dev_to_hda_codec(dev); - bool forced_resume = !codec->relaxed_resume; + bool forced_resume = !codec->relaxed_resume && codec->jacktbl.used; int ret; /* The get/put pair below enforces the runtime resume even if the diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index cb8b0945547c..1e14d7270adf 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -313,11 +313,10 @@ enum { #define AZX_DCAPS_INTEL_SKYLAKE \ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\ + AZX_DCAPS_SYNC_WRITE |\ AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT) -#define AZX_DCAPS_INTEL_BROXTON \ - (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\ - AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT) +#define AZX_DCAPS_INTEL_BROXTON AZX_DCAPS_INTEL_SKYLAKE /* quirks for ATI SB / AMD Hudson */ #define AZX_DCAPS_PRESET_ATI_SB \ diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 4f8d0845ee1e..f299f137eaea 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -1083,6 +1083,7 @@ static int patch_conexant_auto(struct hda_codec *codec) */ static const struct hda_device_id snd_hda_id_conexant[] = { + HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto), diff --git a/sound/usb/helper.c b/sound/usb/helper.c index 71d5f540334a..4c12cc5b53fd 100644 --- a/sound/usb/helper.c +++ b/sound/usb/helper.c @@ -72,7 +72,7 @@ int snd_usb_pipe_sanity_check(struct usb_device *dev, unsigned int pipe) struct usb_host_endpoint *ep; ep = usb_pipe_endpoint(dev, pipe); - if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) + if (!ep || usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) return -EINVAL; return 0; } diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c index f0662bd4e50f..27bf61c177c0 100644 --- a/sound/usb/line6/podhd.c +++ b/sound/usb/line6/podhd.c @@ -368,7 +368,7 @@ static const struct line6_properties podhd_properties_table[] = { .name = "POD HD500", .capabilities = LINE6_CAP_PCM | LINE6_CAP_HWMON, - .altsetting = 1, + .altsetting = 0, .ep_ctrl_r = 0x81, .ep_ctrl_w = 0x01, .ep_audio_r = 0x86, diff --git a/sound/usb/line6/variax.c b/sound/usb/line6/variax.c index 0d24c72c155f..ed158f04de80 100644 --- a/sound/usb/line6/variax.c +++ b/sound/usb/line6/variax.c @@ -244,5 +244,5 @@ static struct usb_driver variax_driver = { module_usb_driver(variax_driver); -MODULE_DESCRIPTION("Vairax Workbench USB driver"); +MODULE_DESCRIPTION("Variax Workbench USB driver"); MODULE_LICENSE("GPL"); diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index 4602464ebdfb..a4217c1a5d01 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h @@ -214,6 +214,18 @@ struct kvm_vcpu_events { #define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ KVM_REG_ARM_FW | ((r) & 0xffff)) #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1) + /* Higher values mean better protection. */ +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2) + /* Higher values mean better protection. */ +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4) /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index d819a3e8b552..9a507716ae2f 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -229,6 +229,16 @@ struct kvm_vcpu_events { #define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ KVM_REG_ARM_FW | ((r) & 0xffff)) #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1) +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2) +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4) /* SVE registers */ #define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT) diff --git a/tools/arch/powerpc/include/uapi/asm/mman.h b/tools/arch/powerpc/include/uapi/asm/mman.h index f33105bc5ca6..8601d824a9c6 100644 --- a/tools/arch/powerpc/include/uapi/asm/mman.h +++ b/tools/arch/powerpc/include/uapi/asm/mman.h @@ -4,12 +4,8 @@ #define MAP_DENYWRITE 0x0800 #define MAP_EXECUTABLE 0x1000 #define MAP_GROWSDOWN 0x0100 -#define MAP_HUGETLB 0x40000 #define MAP_LOCKED 0x80 -#define MAP_NONBLOCK 0x10000 #define MAP_NORESERVE 0x40 -#define MAP_POPULATE 0x8000 -#define MAP_STACK 0x20000 #include <uapi/asm-generic/mman-common.h> /* MAP_32BIT is undefined on powerpc, fix it for perf */ #define MAP_32BIT 0 diff --git a/tools/arch/sparc/include/uapi/asm/mman.h b/tools/arch/sparc/include/uapi/asm/mman.h index 38920eed8cbf..7b94dccc843d 100644 --- a/tools/arch/sparc/include/uapi/asm/mman.h +++ b/tools/arch/sparc/include/uapi/asm/mman.h @@ -4,12 +4,8 @@ #define MAP_DENYWRITE 0x0800 #define MAP_EXECUTABLE 0x1000 #define MAP_GROWSDOWN 0x0200 -#define MAP_HUGETLB 0x40000 #define MAP_LOCKED 0x100 -#define MAP_NONBLOCK 0x10000 #define MAP_NORESERVE 0x40 -#define MAP_POPULATE 0x8000 -#define MAP_STACK 0x20000 #include <uapi/asm-generic/mman-common.h> /* MAP_32BIT is undefined on sparc, fix it for perf */ #define MAP_32BIT 0 diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index d6ab5b4d15e5..503d3f42da16 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -378,10 +378,11 @@ struct kvm_sync_regs { struct kvm_vcpu_events events; }; -#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) -#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) -#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) -#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3) +#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) +#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) +#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) +#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3) +#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4) #define KVM_STATE_NESTED_FORMAT_VMX 0 #define KVM_STATE_NESTED_FORMAT_SVM 1 /* unused */ @@ -432,4 +433,17 @@ struct kvm_nested_state { } data; }; +/* for KVM_CAP_PMU_EVENT_FILTER */ +struct kvm_pmu_event_filter { + __u32 action; + __u32 nevents; + __u32 fixed_counter_bitmap; + __u32 flags; + __u32 pad[4]; + __u64 events[0]; +}; + +#define KVM_PMU_EVENT_ALLOW 0 +#define KVM_PMU_EVENT_DENY 1 + #endif /* _ASM_X86_KVM_H */ diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h index d213ec5c3766..f0b0c90dd398 100644 --- a/tools/arch/x86/include/uapi/asm/vmx.h +++ b/tools/arch/x86/include/uapi/asm/vmx.h @@ -146,7 +146,6 @@ #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2 -#define VMX_ABORT_VMCS_CORRUPTED 3 #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 #endif /* _UAPIVMX_H */ diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h index abd238d0f7a4..63b1f506ea67 100644 --- a/tools/include/uapi/asm-generic/mman-common.h +++ b/tools/include/uapi/asm-generic/mman-common.h @@ -19,15 +19,18 @@ #define MAP_TYPE 0x0f /* Mask for type of mapping */ #define MAP_FIXED 0x10 /* Interpret addr exactly */ #define MAP_ANONYMOUS 0x20 /* don't use a file */ -#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED -# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */ -#else -# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ -#endif -/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */ +/* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */ +#define MAP_POPULATE 0x008000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x010000 /* do not block on IO */ +#define MAP_STACK 0x020000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x040000 /* create a huge page mapping */ +#define MAP_SYNC 0x080000 /* perform synchronous page faults for the mapping */ #define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ +#define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be + * uninitialized */ + /* * Flags for mlock */ diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h index 36c197fc44a0..406f7718f9ad 100644 --- a/tools/include/uapi/asm-generic/mman.h +++ b/tools/include/uapi/asm-generic/mman.h @@ -9,13 +9,11 @@ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ #define MAP_LOCKED 0x2000 /* pages are locked */ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ -#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ -#define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */ -/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */ +/* + * Bits [26:31] are reserved, see asm-generic/hugetlb_encode.h + * for MAP_HUGETLB usage + */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h index a87904daf103..1be0e798e362 100644 --- a/tools/include/uapi/asm-generic/unistd.h +++ b/tools/include/uapi/asm-generic/unistd.h @@ -844,9 +844,15 @@ __SYSCALL(__NR_fsconfig, sys_fsconfig) __SYSCALL(__NR_fsmount, sys_fsmount) #define __NR_fspick 433 __SYSCALL(__NR_fspick, sys_fspick) +#define __NR_pidfd_open 434 +__SYSCALL(__NR_pidfd_open, sys_pidfd_open) +#ifdef __ARCH_WANT_SYS_CLONE3 +#define __NR_clone3 435 +__SYSCALL(__NR_clone3, sys_clone3) +#endif #undef __NR_syscalls -#define __NR_syscalls 434 +#define __NR_syscalls 436 /* * 32 bit systems traditionally used different diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h index 661d73f9a919..8a5b2f8f8eb9 100644 --- a/tools/include/uapi/drm/drm.h +++ b/tools/include/uapi/drm/drm.h @@ -50,6 +50,7 @@ typedef unsigned int drm_handle_t; #else /* One of the BSDs */ +#include <stdint.h> #include <sys/ioccom.h> #include <sys/types.h> typedef int8_t __s8; diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h index 3a73f5316766..328d05e77d9f 100644 --- a/tools/include/uapi/drm/i915_drm.h +++ b/tools/include/uapi/drm/i915_drm.h @@ -136,6 +136,8 @@ enum drm_i915_gem_engine_class { struct i915_engine_class_instance { __u16 engine_class; /* see enum drm_i915_gem_engine_class */ __u16 engine_instance; +#define I915_ENGINE_CLASS_INVALID_NONE -1 +#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2 }; /** @@ -355,6 +357,8 @@ typedef struct _drm_i915_sarea { #define DRM_I915_PERF_ADD_CONFIG 0x37 #define DRM_I915_PERF_REMOVE_CONFIG 0x38 #define DRM_I915_QUERY 0x39 +#define DRM_I915_GEM_VM_CREATE 0x3a +#define DRM_I915_GEM_VM_DESTROY 0x3b /* Must be kept compact -- no holes */ #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) @@ -415,6 +419,8 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query) +#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control) +#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -598,6 +604,12 @@ typedef struct drm_i915_irq_wait { */ #define I915_PARAM_MMAP_GTT_COHERENT 52 +/* + * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel + * execution through use of explicit fence support. + * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT. + */ +#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53 /* Must be kept compact -- no holes and well documented */ typedef struct drm_i915_getparam { @@ -1120,7 +1132,16 @@ struct drm_i915_gem_execbuffer2 { */ #define I915_EXEC_FENCE_ARRAY (1<<19) -#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1)) +/* + * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent + * a sync_file fd to wait upon (in a nonblocking manner) prior to executing + * the batch. + * + * Returns -EINVAL if the sync_file fd cannot be found. + */ +#define I915_EXEC_FENCE_SUBMIT (1 << 20) + +#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1)) #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) #define i915_execbuffer2_set_context_id(eb2, context) \ @@ -1464,8 +1485,9 @@ struct drm_i915_gem_context_create_ext { __u32 ctx_id; /* output: id of new context*/ __u32 flags; #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0) +#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1) #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \ - (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1)) + (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1)) __u64 extensions; }; @@ -1507,6 +1529,41 @@ struct drm_i915_gem_context_param { * On creation, all new contexts are marked as recoverable. */ #define I915_CONTEXT_PARAM_RECOVERABLE 0x8 + + /* + * The id of the associated virtual memory address space (ppGTT) of + * this context. Can be retrieved and passed to another context + * (on the same fd) for both to use the same ppGTT and so share + * address layouts, and avoid reloading the page tables on context + * switches between themselves. + * + * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY. + */ +#define I915_CONTEXT_PARAM_VM 0x9 + +/* + * I915_CONTEXT_PARAM_ENGINES: + * + * Bind this context to operate on this subset of available engines. Henceforth, + * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as + * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0] + * and upwards. Slots 0...N are filled in using the specified (class, instance). + * Use + * engine_class: I915_ENGINE_CLASS_INVALID, + * engine_instance: I915_ENGINE_CLASS_INVALID_NONE + * to specify a gap in the array that can be filled in later, e.g. by a + * virtual engine used for load balancing. + * + * Setting the number of engines bound to the context to 0, by passing a zero + * sized argument, will revert back to default settings. + * + * See struct i915_context_param_engines. + * + * Extensions: + * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE) + * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND) + */ +#define I915_CONTEXT_PARAM_ENGINES 0xa /* Must be kept compact -- no holes and well documented */ __u64 value; @@ -1540,9 +1597,10 @@ struct drm_i915_gem_context_param_sseu { struct i915_engine_class_instance engine; /* - * Unused for now. Must be cleared to zero. + * Unknown flags must be cleared to zero. */ __u32 flags; +#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0) /* * Mask of slices to enable for the context. Valid values are a subset @@ -1570,12 +1628,115 @@ struct drm_i915_gem_context_param_sseu { __u32 rsvd; }; +/* + * i915_context_engines_load_balance: + * + * Enable load balancing across this set of engines. + * + * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when + * used will proxy the execbuffer request onto one of the set of engines + * in such a way as to distribute the load evenly across the set. + * + * The set of engines must be compatible (e.g. the same HW class) as they + * will share the same logical GPU context and ring. + * + * To intermix rendering with the virtual engine and direct rendering onto + * the backing engines (bypassing the load balancing proxy), the context must + * be defined to use a single timeline for all engines. + */ +struct i915_context_engines_load_balance { + struct i915_user_extension base; + + __u16 engine_index; + __u16 num_siblings; + __u32 flags; /* all undefined flags must be zero */ + + __u64 mbz64; /* reserved for future use; must be zero */ + + struct i915_engine_class_instance engines[0]; +} __attribute__((packed)); + +#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \ + struct i915_user_extension base; \ + __u16 engine_index; \ + __u16 num_siblings; \ + __u32 flags; \ + __u64 mbz64; \ + struct i915_engine_class_instance engines[N__]; \ +} __attribute__((packed)) name__ + +/* + * i915_context_engines_bond: + * + * Constructed bonded pairs for execution within a virtual engine. + * + * All engines are equal, but some are more equal than others. Given + * the distribution of resources in the HW, it may be preferable to run + * a request on a given subset of engines in parallel to a request on a + * specific engine. We enable this selection of engines within a virtual + * engine by specifying bonding pairs, for any given master engine we will + * only execute on one of the corresponding siblings within the virtual engine. + * + * To execute a request in parallel on the master engine and a sibling requires + * coordination with a I915_EXEC_FENCE_SUBMIT. + */ +struct i915_context_engines_bond { + struct i915_user_extension base; + + struct i915_engine_class_instance master; + + __u16 virtual_index; /* index of virtual engine in ctx->engines[] */ + __u16 num_bonds; + + __u64 flags; /* all undefined flags must be zero */ + __u64 mbz64[4]; /* reserved for future use; must be zero */ + + struct i915_engine_class_instance engines[0]; +} __attribute__((packed)); + +#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \ + struct i915_user_extension base; \ + struct i915_engine_class_instance master; \ + __u16 virtual_index; \ + __u16 num_bonds; \ + __u64 flags; \ + __u64 mbz64[4]; \ + struct i915_engine_class_instance engines[N__]; \ +} __attribute__((packed)) name__ + +struct i915_context_param_engines { + __u64 extensions; /* linked chain of extension blocks, 0 terminates */ +#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */ +#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */ + struct i915_engine_class_instance engines[0]; +} __attribute__((packed)); + +#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \ + __u64 extensions; \ + struct i915_engine_class_instance engines[N__]; \ +} __attribute__((packed)) name__ + struct drm_i915_gem_context_create_ext_setparam { #define I915_CONTEXT_CREATE_EXT_SETPARAM 0 struct i915_user_extension base; struct drm_i915_gem_context_param param; }; +struct drm_i915_gem_context_create_ext_clone { +#define I915_CONTEXT_CREATE_EXT_CLONE 1 + struct i915_user_extension base; + __u32 clone_id; + __u32 flags; +#define I915_CONTEXT_CLONE_ENGINES (1u << 0) +#define I915_CONTEXT_CLONE_FLAGS (1u << 1) +#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2) +#define I915_CONTEXT_CLONE_SSEU (1u << 3) +#define I915_CONTEXT_CLONE_TIMELINE (1u << 4) +#define I915_CONTEXT_CLONE_VM (1u << 5) +#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1) + __u64 rsvd; +}; + struct drm_i915_gem_context_destroy { __u32 ctx_id; __u32 pad; @@ -1821,6 +1982,7 @@ struct drm_i915_perf_oa_config { struct drm_i915_query_item { __u64 query_id; #define DRM_I915_QUERY_TOPOLOGY_INFO 1 +#define DRM_I915_QUERY_ENGINE_INFO 2 /* Must be kept compact -- no holes and well documented */ /* @@ -1919,6 +2081,47 @@ struct drm_i915_query_topology_info { __u8 data[]; }; +/** + * struct drm_i915_engine_info + * + * Describes one engine and it's capabilities as known to the driver. + */ +struct drm_i915_engine_info { + /** Engine class and instance. */ + struct i915_engine_class_instance engine; + + /** Reserved field. */ + __u32 rsvd0; + + /** Engine flags. */ + __u64 flags; + + /** Capabilities of this engine. */ + __u64 capabilities; +#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0) +#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1) + + /** Reserved fields. */ + __u64 rsvd1[4]; +}; + +/** + * struct drm_i915_query_engine_info + * + * Engine info query enumerates all engines known to the driver by filling in + * an array of struct drm_i915_engine_info structures. + */ +struct drm_i915_query_engine_info { + /** Number of struct drm_i915_engine_info structs following. */ + __u32 num_engines; + + /** MBZ */ + __u32 rsvd[3]; + + /** Marker for drm_i915_engine_info structures. */ + struct drm_i915_engine_info engines[]; +}; + #if defined(__cplusplus) } #endif diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 7d113a9602f0..4a8c02cafa9a 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -695,6 +695,7 @@ enum { IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */ IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */ IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */ + IFLA_VF_BROADCAST, /* VF broadcast */ __IFLA_VF_MAX, }; @@ -705,6 +706,10 @@ struct ifla_vf_mac { __u8 mac[32]; /* MAX_ADDR_LEN */ }; +struct ifla_vf_broadcast { + __u8 broadcast[32]; +}; + struct ifla_vf_vlan { __u32 vf; __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index c2152f3dd02d..5e3f12d5359e 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -116,7 +116,7 @@ struct kvm_irq_level { * ACPI gsi notion of irq. * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47.. * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23.. - * For ARM: See Documentation/virtual/kvm/api.txt + * For ARM: See Documentation/virt/kvm/api.txt */ union { __u32 irq; @@ -995,6 +995,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_SVE 170 #define KVM_CAP_ARM_PTRAUTH_ADDRESS 171 #define KVM_CAP_ARM_PTRAUTH_GENERIC 172 +#define KVM_CAP_PMU_EVENT_FILTER 173 #ifdef KVM_CAP_IRQ_ROUTING @@ -1085,7 +1086,7 @@ struct kvm_xen_hvm_config { * * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies * the irqfd to operate in resampling mode for level triggered interrupt - * emulation. See Documentation/virtual/kvm/api.txt. + * emulation. See Documentation/virt/kvm/api.txt. */ #define KVM_IRQFD_FLAG_RESAMPLE (1 << 1) @@ -1329,6 +1330,8 @@ struct kvm_s390_ucas_mapping { #define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info) /* Available with KVM_CAP_PPC_GET_CPU_CHAR */ #define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char) +/* Available with KVM_CAP_PMU_EVENT_FILTER */ +#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter) /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) diff --git a/tools/include/uapi/linux/sched.h b/tools/include/uapi/linux/sched.h index ed4ee170bee2..b3105ac1381a 100644 --- a/tools/include/uapi/linux/sched.h +++ b/tools/include/uapi/linux/sched.h @@ -2,6 +2,8 @@ #ifndef _UAPI_LINUX_SCHED_H #define _UAPI_LINUX_SCHED_H +#include <linux/types.h> + /* * cloning flags: */ @@ -32,6 +34,20 @@ #define CLONE_IO 0x80000000 /* Clone io context */ /* + * Arguments for the clone3 syscall + */ +struct clone_args { + __aligned_u64 flags; + __aligned_u64 pidfd; + __aligned_u64 child_tid; + __aligned_u64 parent_tid; + __aligned_u64 exit_signal; + __aligned_u64 stack; + __aligned_u64 stack_size; + __aligned_u64 tls; +}; + +/* * Scheduling policies */ #define SCHED_NORMAL 0 @@ -51,9 +67,21 @@ #define SCHED_FLAG_RESET_ON_FORK 0x01 #define SCHED_FLAG_RECLAIM 0x02 #define SCHED_FLAG_DL_OVERRUN 0x04 +#define SCHED_FLAG_KEEP_POLICY 0x08 +#define SCHED_FLAG_KEEP_PARAMS 0x10 +#define SCHED_FLAG_UTIL_CLAMP_MIN 0x20 +#define SCHED_FLAG_UTIL_CLAMP_MAX 0x40 + +#define SCHED_FLAG_KEEP_ALL (SCHED_FLAG_KEEP_POLICY | \ + SCHED_FLAG_KEEP_PARAMS) + +#define SCHED_FLAG_UTIL_CLAMP (SCHED_FLAG_UTIL_CLAMP_MIN | \ + SCHED_FLAG_UTIL_CLAMP_MAX) #define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \ SCHED_FLAG_RECLAIM | \ - SCHED_FLAG_DL_OVERRUN) + SCHED_FLAG_DL_OVERRUN | \ + SCHED_FLAG_KEEP_ALL | \ + SCHED_FLAG_UTIL_CLAMP) #endif /* _UAPI_LINUX_SCHED_H */ diff --git a/tools/include/uapi/linux/usbdevice_fs.h b/tools/include/uapi/linux/usbdevice_fs.h index 964e87217be4..78efe870c2b7 100644 --- a/tools/include/uapi/linux/usbdevice_fs.h +++ b/tools/include/uapi/linux/usbdevice_fs.h @@ -76,6 +76,26 @@ struct usbdevfs_connectinfo { unsigned char slow; }; +struct usbdevfs_conninfo_ex { + __u32 size; /* Size of the structure from the kernel's */ + /* point of view. Can be used by userspace */ + /* to determine how much data can be */ + /* used/trusted. */ + __u32 busnum; /* USB bus number, as enumerated by the */ + /* kernel, the device is connected to. */ + __u32 devnum; /* Device address on the bus. */ + __u32 speed; /* USB_SPEED_* constants from ch9.h */ + __u8 num_ports; /* Number of ports the device is connected */ + /* to on the way to the root hub. It may */ + /* be bigger than size of 'ports' array so */ + /* userspace can detect overflows. */ + __u8 ports[7]; /* List of ports on the way from the root */ + /* hub to the device. Current limit in */ + /* USB specification is 7 tiers (root hub, */ + /* 5 intermediate hubs, device), which */ + /* gives at most 6 port entries. */ +}; + #define USBDEVFS_URB_SHORT_NOT_OK 0x01 #define USBDEVFS_URB_ISO_ASAP 0x02 #define USBDEVFS_URB_BULK_CONTINUATION 0x04 @@ -137,6 +157,7 @@ struct usbdevfs_hub_portinfo { #define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10 #define USBDEVFS_CAP_MMAP 0x20 #define USBDEVFS_CAP_DROP_PRIVILEGES 0x40 +#define USBDEVFS_CAP_CONNINFO_EX 0x80 /* USBDEVFS_DISCONNECT_CLAIM flags & struct */ @@ -197,5 +218,10 @@ struct usbdevfs_streams { #define USBDEVFS_FREE_STREAMS _IOR('U', 29, struct usbdevfs_streams) #define USBDEVFS_DROP_PRIVILEGES _IOW('U', 30, __u32) #define USBDEVFS_GET_SPEED _IO('U', 31) +/* + * Returns struct usbdevfs_conninfo_ex; length is variable to allow + * extending size of the data returned. + */ +#define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len) #endif /* _UAPI_LINUX_USBDEVICE_FS_H */ diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h index 03748a742146..bae8879cdf58 100644 --- a/tools/lib/bpf/hashmap.h +++ b/tools/lib/bpf/hashmap.h @@ -10,6 +10,11 @@ #include <stdbool.h> #include <stddef.h> +#ifdef __GLIBC__ +#include <bits/wordsize.h> +#else +#include <bits/reg.h> +#endif #include "libbpf_internal.h" static inline size_t hash_bits(size_t h, int bits) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 5f26620f13f5..176f2f084060 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -1946,6 +1946,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, struct alternative *alt; struct instruction *insn, *next_insn; struct section *sec; + u8 visited; int ret; insn = first; @@ -1972,12 +1973,12 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, return 1; } + visited = 1 << state.uaccess; if (insn->visited) { if (!insn->hint && !insn_state_match(insn, &state)) return 1; - /* If we were here with AC=0, but now have AC=1, go again */ - if (insn->state.uaccess || !state.uaccess) + if (insn->visited & visited) return 0; } @@ -2024,7 +2025,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, } else insn->state = state; - insn->visited = true; + insn->visited |= visited; if (!insn->ignore_alts) { bool skip_orig = false; diff --git a/tools/objtool/check.h b/tools/objtool/check.h index b881fafcf55d..6d875ca6fce0 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h @@ -33,8 +33,9 @@ struct instruction { unsigned int len; enum insn_type type; unsigned long immediate; - bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts; + bool alt_group, dead_end, ignore, hint, save, restore, ignore_alts; bool retpoline_safe; + u8 visited; struct symbol *call_dest; struct instruction *jump_dest; struct instruction *first_jump_src; diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index d4e2e18a5881..caaab28f8400 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt @@ -228,11 +228,11 @@ OPTIONS With the metric option perf script can compute metrics for sampling periods, similar to perf stat. This requires - specifying a group with multiple metrics with the :S option + specifying a group with multiple events defining metrics with the :S option for perf record. perf will sample on the first event, and - compute metrics for all the events in the group. Please note + print computed metrics for all the events in the group. Please note that the metric computed is averaged over the whole sampling - period, not just for the sample point. + period (since the last sample), not just for the sample point. For sample events it's possible to display misc field with -F +misc option, following letters are displayed for each bit: @@ -384,7 +384,7 @@ include::itrace.txt[] perf script --time 0%-10%,30%-40% --max-blocks:: - Set the maximum number of program blocks to print with brstackasm for + Set the maximum number of program blocks to print with brstackinsn for each sample. --reltime:: diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt index 5f54feb19977..d030c87ed9f5 100644 --- a/tools/perf/Documentation/perf.data-file-format.txt +++ b/tools/perf/Documentation/perf.data-file-format.txt @@ -126,7 +126,7 @@ vendor,family,model,stepping. For example: GenuineIntel,6,69,1 HEADER_TOTAL_MEM = 10, -An uint64_t with the total memory in bytes. +An uint64_t with the total memory in kilobytes. HEADER_CMDLINE = 11, diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index b4e6f9e6204a..c29976eca4a8 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -355,6 +355,8 @@ 431 common fsconfig __x64_sys_fsconfig 432 common fsmount __x64_sys_fsmount 433 common fspick __x64_sys_fspick +434 common pidfd_open __x64_sys_pidfd_open +435 common clone3 __x64_sys_clone3/ptregs # # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 6418782951a4..3d0ffd41fb55 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -698,6 +698,16 @@ __cmd_probe(int argc, const char **argv) ret = perf_add_probe_events(params.events, params.nevents); if (ret < 0) { + + /* + * When perf_add_probe_events() fails it calls + * cleanup_perf_probe_events(pevs, npevs), i.e. + * cleanup_perf_probe_events(params.events, params.nevents), which + * will call clear_perf_probe_event(), so set nevents to zero + * to avoid cleanup_params() to call clear_perf_probe_event() again + * on the same pevs. + */ + params.nevents = 0; pr_err_with_code(" Error: Failed to add events.", ret); return ret; } diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 8f24865596af..0140ddb8dd0b 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -1059,7 +1059,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp); if (ip == end) { - printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp, + printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, ++insn, fp, &total_cycles); if (PRINT_FIELD(SRCCODE)) printed += print_srccode(thread, x.cpumode, ip); diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index b55a534b4de0..352cf39d7c2f 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -607,7 +607,13 @@ try_again: * group leaders. */ read_counters(&(struct timespec) { .tv_nsec = t1-t0 }); - perf_evlist__close(evsel_list); + + /* + * We need to keep evsel_list alive, because it's processed + * later the evsel_list will be closed after. + */ + if (!STAT_RECORD) + perf_evlist__close(evsel_list); return WEXITSTATUS(status); } @@ -1997,6 +2003,7 @@ int cmd_stat(int argc, const char **argv) perf_session__write_header(perf_stat.session, evsel_list, fd, true); } + perf_evlist__close(evsel_list); perf_session__delete(perf_stat.session); } diff --git a/tools/perf/trace/beauty/usbdevfs_ioctl.sh b/tools/perf/trace/beauty/usbdevfs_ioctl.sh index 930b80f422e8..aa597ae53747 100755 --- a/tools/perf/trace/beauty/usbdevfs_ioctl.sh +++ b/tools/perf/trace/beauty/usbdevfs_ioctl.sh @@ -3,10 +3,13 @@ [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ +# also as: +# #define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len) + printf "static const char *usbdevfs_ioctl_cmds[] = {\n" -regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)[[:space:]]+_IO[WR]{0,2}\([[:space:]]*'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*" -egrep $regex ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \ - sed -r "s/$regex/\2 \1/g" | \ +regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)(\(\w+\))?[[:space:]]+_IO[CWR]{0,2}\([[:space:]]*(_IOC_\w+,[[:space:]]*)?'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*" +egrep "$regex" ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \ + sed -r "s/$regex/\4 \1/g" | \ sort | xargs printf "\t[%s] = \"%s\",\n" printf "};\n\n" printf "#if 0\n" diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index ebb46da4dfe5..52459dd5ad0c 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1291,6 +1291,7 @@ static void perf_evsel__free_id(struct perf_evsel *evsel) xyarray__delete(evsel->sample_id); evsel->sample_id = NULL; zfree(&evsel->id); + evsel->ids = 0; } static void perf_evsel__free_config_terms(struct perf_evsel *evsel) @@ -2077,6 +2078,7 @@ void perf_evsel__close(struct perf_evsel *evsel) perf_evsel__close_fd(evsel); perf_evsel__free_fd(evsel); + perf_evsel__free_id(evsel); } int perf_evsel__open_per_cpu(struct perf_evsel *evsel, diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index c24db7f4909c..1903d7ec9797 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -3559,6 +3559,13 @@ int perf_session__read_header(struct perf_session *session) data->file.path); } + if (f_header.attr_size == 0) { + pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n" + "Was the 'perf record' command properly terminated?\n", + data->file.path); + return -EINVAL; + } + nr_attrs = f_header.attrs.size / f_header.attr_size; lseek(fd, f_header.attrs.offset, SEEK_SET); @@ -3639,7 +3646,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool, size += sizeof(struct perf_event_header); size += ids * sizeof(u64); - ev = malloc(size); + ev = zalloc(size); if (ev == NULL) return -ENOMEM; @@ -3747,7 +3754,7 @@ int perf_event__process_feature(struct perf_session *session, return 0; ff.buf = (void *)fe->data; - ff.size = event->header.size - sizeof(event->header); + ff.size = event->header.size - sizeof(*fe); ff.ph = &session->header; if (feat_ops[feat].process(&ff, NULL)) diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index cd1eb73cfe83..8394d48f8b32 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -2230,6 +2230,7 @@ void clear_perf_probe_event(struct perf_probe_event *pev) field = next; } } + pev->nargs = 0; zfree(&pev->args); } diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index d0fd6c614e68..37efa1f43d8b 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -36,10 +36,16 @@ static int perf_session__process_compressed_event(struct perf_session *session, void *src; size_t decomp_size, src_size; u64 decomp_last_rem = 0; - size_t decomp_len = session->header.env.comp_mmap_len; + size_t mmap_len, decomp_len = session->header.env.comp_mmap_len; struct decomp *decomp, *decomp_last = session->decomp_last; - decomp = mmap(NULL, sizeof(struct decomp) + decomp_len, PROT_READ|PROT_WRITE, + if (decomp_last) { + decomp_last_rem = decomp_last->size - decomp_last->head; + decomp_len += decomp_last_rem; + } + + mmap_len = sizeof(struct decomp) + decomp_len; + decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); if (decomp == MAP_FAILED) { pr_err("Couldn't allocate memory for decompression\n"); @@ -47,10 +53,10 @@ static int perf_session__process_compressed_event(struct perf_session *session, } decomp->file_pos = file_offset; + decomp->mmap_len = mmap_len; decomp->head = 0; - if (decomp_last) { - decomp_last_rem = decomp_last->size - decomp_last->head; + if (decomp_last_rem) { memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem); decomp->size = decomp_last_rem; } @@ -61,7 +67,7 @@ static int perf_session__process_compressed_event(struct perf_session *session, decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size, &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem); if (!decomp_size) { - munmap(decomp, sizeof(struct decomp) + decomp_len); + munmap(decomp, mmap_len); pr_err("Couldn't decompress data\n"); return -1; } @@ -255,15 +261,15 @@ static void perf_session__delete_threads(struct perf_session *session) static void perf_session__release_decomp_events(struct perf_session *session) { struct decomp *next, *decomp; - size_t decomp_len; + size_t mmap_len; next = session->decomp; - decomp_len = session->header.env.comp_mmap_len; do { decomp = next; if (decomp == NULL) break; next = decomp->next; - munmap(decomp, decomp_len + sizeof(struct decomp)); + mmap_len = decomp->mmap_len; + munmap(decomp, mmap_len); } while (1); } diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index dd8920b745bc..863dbad87849 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -46,6 +46,7 @@ struct perf_session { struct decomp { struct decomp *next; u64 file_pos; + size_t mmap_len; u64 head; size_t size; char data[]; diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 656065af4971..accb1bf1cfd8 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -819,7 +819,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, "stalled cycles per insn", ratio); } else if (have_frontend_stalled) { - print_metric(config, ctxp, NULL, NULL, + out->new_line(config, ctxp); + print_metric(config, ctxp, NULL, "%7.2f ", "stalled cycles per insn", 0); } } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) { diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c index 23bdb9884576..d2202392ffdb 100644 --- a/tools/perf/util/zstd.c +++ b/tools/perf/util/zstd.c @@ -99,8 +99,8 @@ size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size while (input.pos < input.size) { ret = ZSTD_decompressStream(data->dstream, &output, &input); if (ZSTD_isError(ret)) { - pr_err("failed to decompress (B): %ld -> %ld : %s\n", - src_size, output.size, ZSTD_getErrorName(ret)); + pr_err("failed to decompress (B): %ld -> %ld, dst_size %ld : %s\n", + src_size, output.size, dst_size, ZSTD_getErrorName(ret)); break; } output.dst = dst + output.pos; diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include index 495066bafbe3..ded7a950dc40 100644 --- a/tools/scripts/Makefile.include +++ b/tools/scripts/Makefile.include @@ -32,7 +32,6 @@ EXTRA_WARNINGS += -Wno-system-headers EXTRA_WARNINGS += -Wold-style-definition EXTRA_WARNINGS += -Wpacked EXTRA_WARNINGS += -Wredundant-decls -EXTRA_WARNINGS += -Wshadow EXTRA_WARNINGS += -Wstrict-prototypes EXTRA_WARNINGS += -Wswitch-default EXTRA_WARNINGS += -Wswitch-enum @@ -69,8 +68,16 @@ endif # will do for now and keep the above -Wstrict-aliasing=3 in place # in newer systems. # Needed for the __raw_cmpxchg in tools/arch/x86/include/asm/cmpxchg.h +# +# See https://lkml.org/lkml/2006/11/28/253 and https://gcc.gnu.org/gcc-4.8/changes.html, +# that takes into account Linus's comments (search for Wshadow) for the reasoning about +# -Wshadow not being interesting before gcc 4.8. + ifneq ($(filter 3.%,$(MAKE_VERSION)),) # make-3 EXTRA_WARNINGS += -fno-strict-aliasing +EXTRA_WARNINGS += -Wno-shadow +else +EXTRA_WARNINGS += -Wshadow endif ifneq ($(findstring $(MAKEFLAGS), w),w) diff --git a/tools/testing/ktest/config-bisect.pl b/tools/testing/ktest/config-bisect.pl index 72525426654b..6fd864935319 100755 --- a/tools/testing/ktest/config-bisect.pl +++ b/tools/testing/ktest/config-bisect.pl @@ -663,7 +663,7 @@ while ($#ARGV >= 0) { } else { - die "Unknow option $opt\n"; + die "Unknown option $opt\n"; } } @@ -732,7 +732,7 @@ if ($start) { } } run_command "cp $good_start $good" or die "failed to copy to $good\n"; - run_command "cp $bad_start $bad" or die "faield to copy to $bad\n"; + run_command "cp $bad_start $bad" or die "failed to copy to $bad\n"; } else { if ( ! -f $good ) { die "Can not find file $good\n"; diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c index 4c223266299a..bdb69599c4bd 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.c +++ b/tools/testing/selftests/cgroup/cgroup_util.c @@ -191,8 +191,7 @@ int cg_find_unified_root(char *root, size_t len) strtok(NULL, delim); strtok(NULL, delim); - if (strcmp(fs, "cgroup") == 0 && - strcmp(type, "cgroup2") == 0) { + if (strcmp(type, "cgroup2") == 0) { strncpy(root, mount, len); return 0; } diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh index 71231ad2dbfb..47315fe48d5a 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh @@ -262,7 +262,7 @@ test_mc_aware() stop_traffic - log_test "UC performace under MC overload" + log_test "UC performance under MC overload" echo "UC-only throughput $(humanize $ucth1)" echo "UC+MC throughput $(humanize $ucth2)" @@ -316,7 +316,7 @@ test_uc_aware() stop_traffic - log_test "MC performace under UC overload" + log_test "MC performance under UC overload" echo " ingress UC throughput $(humanize ${uc_ir})" echo " egress UC throughput $(humanize ${uc_er})" echo " sent $attempts BC ARPs, got $passes responses" diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh index 0a76314b4414..8b944cf042f6 100755 --- a/tools/testing/selftests/kmod/kmod.sh +++ b/tools/testing/selftests/kmod/kmod.sh @@ -28,7 +28,7 @@ # override by exporting to your environment prior running this script. # For instance this script assumes you do not have xfs loaded upon boot. # If this is false, export DEFAULT_KMOD_FS="ext4" prior to running this -# script if the filesyste module you don't have loaded upon bootup +# script if the filesystem module you don't have loaded upon bootup # is ext4 instead. Refer to allow_user_defaults() for a list of user # override variables possible. # @@ -263,7 +263,7 @@ config_get_test_result() config_reset() { if ! echo -n "1" >"$DIR"/reset; then - echo "$0: reset shuld have worked" >&2 + echo "$0: reset should have worked" >&2 exit 1 fi } @@ -488,7 +488,7 @@ usage() echo Example uses: echo echo "${TEST_NAME}.sh -- executes all tests" - echo "${TEST_NAME}.sh -t 0008 -- Executes test ID 0008 number of times is recomended" + echo "${TEST_NAME}.sh -t 0008 -- Executes test ID 0008 number of times is recommended" echo "${TEST_NAME}.sh -w 0008 -- Watch test ID 0008 run until an error occurs" echo "${TEST_NAME}.sh -s 0008 -- Run test ID 0008 once" echo "${TEST_NAME}.sh -c 0008 3 -- Run test ID 0008 three times" diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh index 30195449c63c..edcfeace4655 100644 --- a/tools/testing/selftests/livepatch/functions.sh +++ b/tools/testing/selftests/livepatch/functions.sh @@ -13,6 +13,14 @@ function log() { echo "$1" > /dev/kmsg } +# skip(msg) - testing can't proceed +# msg - explanation +function skip() { + log "SKIP: $1" + echo "SKIP: $1" >&2 + exit 4 +} + # die(msg) - game over, man # msg - dying words function die() { @@ -43,6 +51,12 @@ function loop_until() { done } +function assert_mod() { + local mod="$1" + + modprobe --dry-run "$mod" &>/dev/null +} + function is_livepatch_mod() { local mod="$1" @@ -75,6 +89,9 @@ function __load_mod() { function load_mod() { local mod="$1"; shift + assert_mod "$mod" || + skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root" + is_livepatch_mod "$mod" && die "use load_lp() to load the livepatch module $mod" @@ -88,6 +105,9 @@ function load_mod() { function load_lp_nowait() { local mod="$1"; shift + assert_mod "$mod" || + skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root" + is_livepatch_mod "$mod" || die "module $mod is not a livepatch" diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c index 7eaa8a3de262..b632965e60eb 100644 --- a/tools/testing/selftests/pidfd/pidfd_test.c +++ b/tools/testing/selftests/pidfd/pidfd_test.c @@ -339,13 +339,9 @@ static int test_pidfd_send_signal_syscall_support(void) ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0); if (ret < 0) { - /* - * pidfd_send_signal() will currently return ENOSYS when - * CONFIG_PROC_FS is not set. - */ if (errno == ENOSYS) ksft_exit_skip( - "%s test: pidfd_send_signal() syscall not supported (Ensure that CONFIG_PROC_FS=y is set)\n", + "%s test: pidfd_send_signal() syscall not supported\n", test_name); ksft_exit_fail_msg("%s test: Failed to send signal\n", diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c index 4602326b8f5b..a4f4d4cf22c3 100644 --- a/tools/testing/selftests/x86/test_vsyscall.c +++ b/tools/testing/selftests/x86/test_vsyscall.c @@ -451,7 +451,7 @@ static int test_vsys_x(void) printf("[OK]\tExecuting the vsyscall page failed: #PF(0x%lx)\n", segv_err); } else { - printf("[FAILT]\tExecution failed with the wrong error: #PF(0x%lx)\n", + printf("[FAIL]\tExecution failed with the wrong error: #PF(0x%lx)\n", segv_err); return 1; } diff --git a/usr/include/Makefile b/usr/include/Makefile index aa316d99e035..1fb6abe29b2f 100644 --- a/usr/include/Makefile +++ b/usr/include/Makefile @@ -101,10 +101,6 @@ ifeq ($(SRCARCH),riscv) header-test- += linux/bpf_perf_event.h endif -ifeq ($(SRCARCH),s390) -header-test- += asm/zcrypt.h -endif - ifeq ($(SRCARCH),sparc) header-test- += asm/stat.h header-test- += asm/uctx.h diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index f645c0fbf7ec..acc43242a310 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -727,7 +727,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) * Ensure we set mode to IN_GUEST_MODE after we disable * interrupts and before the final VCPU requests check. * See the comment in kvm_vcpu_exiting_guest_mode() and - * Documentation/virtual/kvm/vcpu-requests.rst + * Documentation/virt/kvm/vcpu-requests.rst */ smp_store_mb(vcpu->mode, IN_GUEST_MODE); diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 936962abc38d..c45e2d7e942f 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -250,7 +250,7 @@ static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu, * pending state of interrupt is latched in pending_latch variable. * Userspace will save and restore pending state and line_level * separately. - * Refer to Documentation/virtual/kvm/devices/arm-vgic-v3.txt + * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.txt * for handling of ISPENDR and ICPENDR. */ for (i = 0; i < len * 8; i++) { diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 57205beaa981..3b7525deec80 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -42,7 +42,7 @@ VGIC_AFFINITY_LEVEL(val, 3)) /* - * As per Documentation/virtual/kvm/devices/arm-vgic-v3.txt, + * As per Documentation/virt/kvm/devices/arm-vgic-v3.txt, * below macros are defined for CPUREG encoding. */ #define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK 0x000000000000c000 @@ -63,7 +63,7 @@ KVM_REG_ARM_VGIC_SYSREG_OP2_MASK) /* - * As per Documentation/virtual/kvm/devices/arm-vgic-its.txt, + * As per Documentation/virt/kvm/devices/arm-vgic-its.txt, * below macros are defined for ITS table entry encoding. */ #define KVM_ITS_CTE_VALID_SHIFT 63 |