Commit 4145cb54 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-next-2020-07-22' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for v5.9:

UAPI Changes:

Cross-subsystem Changes:
- Convert panel-dsi-cm and ingenic bindings to YAML.
- Add lockdep annotations for dma-fence. \o/
- Describe why indefinite fences are a bad idea
- Update binding for rocktech jh057n00900.

Core Changes:
- Add vblank workers.
- Use spin_(un)lock_irq instead of the irqsave/restore variants in crtc code.
- Add managed vram helpers.
- Convert more logging to drm functions.
- Replace more http links with https in core and drivers.
- Cleanup to ttm iomem functions and implementation.
- Remove TTM CMA memtype as it doesn't work correctly.
- Remove TTM_MEMTYPE_FLAG_MAPPABLE for many drivers that have no
  unmappable memory resources.

Driver Changes:
- Add CRC support to nouveau, using the new vblank workers.
- Dithering and atomic state fix for nouveau.
- Fixes for Frida FRD350H54004 panel.
- Add support for OSD mode (sprite planes), IPU (scaling) and multiple
  panels/bridges to ingenic.
- Use managed vram helpers in ast.
- Assorted small fixes to ingenic, i810, mxsfb.
- Remove optional unused ttm dummy functions.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/d6bf269e-ccb2-8a7b-fdae-226e9e3f8274@linux.intel.com
parents 3ffff3c6 acc0c39a
...@@ -165,6 +165,7 @@ examples: ...@@ -165,6 +165,7 @@ examples:
- | - |
#include <dt-bindings/clock/imx8mq-clock.h> #include <dt-bindings/clock/imx8mq-clock.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/reset/imx8mq-reset.h> #include <dt-bindings/reset/imx8mq-reset.h>
...@@ -191,12 +192,12 @@ examples: ...@@ -191,12 +192,12 @@ examples:
phy-names = "dphy"; phy-names = "dphy";
panel@0 { panel@0 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "rocktech,jh057n00900"; compatible = "rocktech,jh057n00900";
reg = <0>; reg = <0>;
port@0 { vcc-supply = <&reg_2v8_p>;
reg = <0>; iovcc-supply = <&reg_1v8_p>;
reset-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
port {
panel_in: endpoint { panel_in: endpoint {
remote-endpoint = <&mipi_dsi_out>; remote-endpoint = <&mipi_dsi_out>;
}; };
......
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/ingenic,ipu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ingenic SoCs Image Processing Unit (IPU) devicetree bindings
maintainers:
- Paul Cercueil <paul@crapouillou.net>
properties:
compatible:
oneOf:
- enum:
- ingenic,jz4725b-ipu
- ingenic,jz4760-ipu
- items:
- const: ingenic,jz4770-ipu
- const: ingenic,jz4760-ipu
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 1
clock-names:
const: ipu
patternProperties:
"^ports?$":
description: OF graph bindings (specified in bindings/graph.txt).
required:
- compatible
- reg
- interrupts
- clocks
- clock-names
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/jz4770-cgu.h>
ipu@13080000 {
compatible = "ingenic,jz4770-ipu", "ingenic,jz4760-ipu";
reg = <0x13080000 0x800>;
interrupt-parent = <&intc>;
interrupts = <29>;
clocks = <&cgu JZ4770_CLK_IPU>;
clock-names = "ipu";
port {
ipu_ep: endpoint {
remote-endpoint = <&lcdc_ep>;
};
};
};
Ingenic JZ47xx LCD driver
Required properties:
- compatible: one of:
* ingenic,jz4740-lcd
* ingenic,jz4725b-lcd
* ingenic,jz4770-lcd
- reg: LCD registers location and length
- clocks: LCD pixclock and device clock specifiers.
The device clock is only required on the JZ4740.
- clock-names: "lcd_pclk" and "lcd"
- interrupts: Specifies the interrupt line the LCD controller is connected to.
Example:
panel {
compatible = "sharp,ls020b1dd01d";
backlight = <&backlight>;
power-supply = <&vcc>;
port {
panel_input: endpoint {
remote-endpoint = <&panel_output>;
};
};
};
lcd: lcd-controller@13050000 {
compatible = "ingenic,jz4725b-lcd";
reg = <0x13050000 0x1000>;
interrupt-parent = <&intc>;
interrupts = <31>;
clocks = <&cgu JZ4725B_CLK_LCD>;
clock-names = "lcd";
port {
panel_output: endpoint {
remote-endpoint = <&panel_input>;
};
};
};
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/ingenic,lcd.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ingenic SoCs LCD controller devicetree bindings
maintainers:
- Paul Cercueil <paul@crapouillou.net>
properties:
$nodename:
pattern: "^lcd-controller@[0-9a-f]+$"
compatible:
enum:
- ingenic,jz4740-lcd
- ingenic,jz4725b-lcd
- ingenic,jz4770-lcd
- ingenic,jz4780-lcd
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
items:
- description: Pixel clock
- description: Module clock
minItems: 1
clock-names:
items:
- const: lcd_pclk
- const: lcd
minItems: 1
port:
description: OF graph bindings (specified in bindings/graph.txt).
ports:
description: OF graph bindings (specified in bindings/graph.txt).
type: object
properties:
port@0:
type: object
description: DPI output, to interface with TFT panels.
port@8:
type: object
description: Link to the Image Processing Unit (IPU).
(See ingenic,ipu.yaml).
required:
- port@0
required:
- compatible
- reg
- interrupts
- clocks
- clock-names
if:
properties:
compatible:
contains:
enum:
- ingenic,jz4740-lcd
- ingenic,jz4780-lcd
then:
properties:
clocks:
minItems: 2
clock-names:
minItems: 2
else:
properties:
clocks:
maxItems: 1
clock-names:
maxItems: 1
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/jz4740-cgu.h>
lcd-controller@13050000 {
compatible = "ingenic,jz4740-lcd";
reg = <0x13050000 0x1000>;
interrupt-parent = <&intc>;
interrupts = <30>;
clocks = <&cgu JZ4740_CLK_LCD_PCLK>, <&cgu JZ4740_CLK_LCD>;
clock-names = "lcd_pclk", "lcd";
port {
endpoint {
remote-endpoint = <&panel_input>;
};
};
};
- |
#include <dt-bindings/clock/jz4725b-cgu.h>
lcd-controller@13050000 {
compatible = "ingenic,jz4725b-lcd";
reg = <0x13050000 0x1000>;
interrupt-parent = <&intc>;
interrupts = <31>;
clocks = <&cgu JZ4725B_CLK_LCD>;
clock-names = "lcd_pclk";
port {
endpoint {
remote-endpoint = <&panel_input>;
};
};
};
Generic MIPI DSI Command Mode Panel
===================================
Required properties:
- compatible: "panel-dsi-cm"
Optional properties:
- label: a symbolic name for the panel
- reset-gpios: panel reset gpio
- te-gpios: panel TE gpio
Required nodes:
- Video port for DSI input
Example
-------
lcd0: display {
compatible = "tpo,taal", "panel-dsi-cm";
label = "lcd0";
reset-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>;
port {
lcd0_in: endpoint {
remote-endpoint = <&dsi1_out_ep>;
};
};
};
# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/panel-dsi-cm.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: DSI command mode panels
maintainers:
- Tomi Valkeinen <tomi.valkeinen@ti.com>
- Sebastian Reichel <sre@kernel.org>
description: |
This binding file is a collection of the DSI panels that
are usually driven in command mode. If no backlight is
referenced via the optional backlight property, the DSI
panel is assumed to have native backlight support.
The panel may use an OF graph binding for the association
to the display, or it may be a direct child node of the
display.
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
items:
- enum:
- motorola,droid4-panel # Panel from Motorola Droid4 phone
- nokia,himalaya # Panel from Nokia N950 phone
- tpo,taal # Panel from OMAP4 SDP board
- const: panel-dsi-cm # Generic DSI command mode panel compatible fallback
reg:
maxItems: 1
description: DSI virtual channel
vddi-supply:
description:
Display panels require power to be supplied. While several panels need
more than one power supply with panel-specific constraints governing the
order and timings of the power supplies, in many cases a single power
supply is sufficient, either because the panel has a single power rail, or
because all its power rails can be driven by the same supply. In that case
the vddi-supply property specifies the supply powering the panel as a
phandle to a regulator.
vpnl-supply:
description:
When the display panel needs a second power supply, this property can be
used in addition to vddi-supply. Both supplies will be enabled at the
same time before the panel is being accessed.
width-mm: true
height-mm: true
label: true
rotation: true
panel-timing: true
port: true
reset-gpios: true
te-gpios: true
backlight: true
additionalProperties: false
required:
- compatible
- reg
examples:
- |
#include <dt-bindings/gpio/gpio.h>
dsi-controller {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "tpo,taal", "panel-dsi-cm";
reg = <0>;
reset-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>;
};
};
...
...@@ -24,6 +24,7 @@ properties: ...@@ -24,6 +24,7 @@ properties:
# Xingbangda XBD599 5.99" 720x1440 TFT LCD panel # Xingbangda XBD599 5.99" 720x1440 TFT LCD panel
- xingbangda,xbd599 - xingbangda,xbd599
port: true
reg: reg:
maxItems: 1 maxItems: 1
description: DSI virtual channel description: DSI virtual channel
......
...@@ -133,6 +133,18 @@ DMA Fences ...@@ -133,6 +133,18 @@ DMA Fences
.. kernel-doc:: drivers/dma-buf/dma-fence.c .. kernel-doc:: drivers/dma-buf/dma-fence.c
:doc: DMA fences overview :doc: DMA fences overview
DMA Fence Cross-Driver Contract
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/dma-buf/dma-fence.c
:doc: fence cross-driver contract
DMA Fence Signalling Annotations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/dma-buf/dma-fence.c
:doc: fence signalling annotation
DMA Fences Functions Reference DMA Fences Functions Reference
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
...@@ -166,3 +178,73 @@ DMA Fence uABI/Sync File ...@@ -166,3 +178,73 @@ DMA Fence uABI/Sync File
.. kernel-doc:: include/linux/sync_file.h .. kernel-doc:: include/linux/sync_file.h
:internal: :internal:
Indefinite DMA Fences
~~~~~~~~~~~~~~~~~~~~
At various times &dma_fence with an indefinite time until dma_fence_wait()
finishes have been proposed. Examples include:
* Future fences, used in HWC1 to signal when a buffer isn't used by the display
any longer, and created with the screen update that makes the buffer visible.
The time this fence completes is entirely under userspace's control.
* Proxy fences, proposed to handle &drm_syncobj for which the fence has not yet
been set. Used to asynchronously delay command submission.
* Userspace fences or gpu futexes, fine-grained locking within a command buffer
that userspace uses for synchronization across engines or with the CPU, which
are then imported as a DMA fence for integration into existing winsys
protocols.
* Long-running compute command buffers, while still using traditional end of
batch DMA fences for memory management instead of context preemption DMA
fences which get reattached when the compute job is rescheduled.
Common to all these schemes is that userspace controls the dependencies of these
fences and controls when they fire. Mixing indefinite fences with normal
in-kernel DMA fences does not work, even when a fallback timeout is included to
protect against malicious userspace:
* Only the kernel knows about all DMA fence dependencies, userspace is not aware
of dependencies injected due to memory management or scheduler decisions.
* Only userspace knows about all dependencies in indefinite fences and when
exactly they will complete, the kernel has no visibility.
Furthermore the kernel has to be able to hold up userspace command submission
for memory management needs, which means we must support indefinite fences being
dependent upon DMA fences. If the kernel also support indefinite fences in the
kernel like a DMA fence, like any of the above proposal would, there is the
potential for deadlocks.
.. kernel-render:: DOT
:alt: Indefinite Fencing Dependency Cycle
:caption: Indefinite Fencing Dependency Cycle
digraph "Fencing Cycle" {
node [shape=box bgcolor=grey style=filled]
kernel [label="Kernel DMA Fences"]
userspace [label="userspace controlled fences"]
kernel -> userspace [label="memory management"]
userspace -> kernel [label="Future fence, fence proxy, ..."]
{ rank=same; kernel userspace }
}
This means that the kernel might accidentally create deadlocks
through memory management dependencies which userspace is unaware of, which
randomly hangs workloads until the timeout kicks in. Workloads, which from
userspace's perspective, do not contain a deadlock. In such a mixed fencing
architecture there is no single entity with knowledge of all dependencies.
Thefore preventing such deadlocks from within the kernel is not possible.
The only solution to avoid dependencies loops is by not allowing indefinite
fences in the kernel. This means:
* No future fences, proxy fences or userspace fences imported as DMA fences,
with or without a timeout.
* No DMA fences that signal end of batchbuffer for command submission where
userspace is allowed to use userspace fencing or long running compute
workloads. This also means no implicit fencing for shared buffers in these
cases.
...@@ -127,7 +127,7 @@ At least on the EP9315 there is a silicon bug which causes bit 27 of ...@@ -127,7 +127,7 @@ At least on the EP9315 there is a silicon bug which causes bit 27 of
the VIDSCRNPAGE (framebuffer physical offset) to be tied low. There is the VIDSCRNPAGE (framebuffer physical offset) to be tied low. There is
an unofficial errata for this bug at:: an unofficial errata for this bug at::
http://marc.info/?l=linux-arm-kernel&m=110061245502000&w=2 https://marc.info/?l=linux-arm-kernel&m=110061245502000&w=2
By default the EP93xx framebuffer driver checks if the allocated physical By default the EP93xx framebuffer driver checks if the allocated physical
address has bit 27 set. If it does, then the memory is freed and an address has bit 27 set. If it does, then the memory is freed and an
......
...@@ -543,3 +543,18 @@ Vertical Blanking and Interrupt Handling Functions Reference ...@@ -543,3 +543,18 @@ Vertical Blanking and Interrupt Handling Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_vblank.c .. kernel-doc:: drivers/gpu/drm/drm_vblank.c
:export: :export:
Vertical Blank Work
===================
.. kernel-doc:: drivers/gpu/drm/drm_vblank_work.c
:doc: vblank works
Vertical Blank Work Functions Reference
---------------------------------------
.. kernel-doc:: include/drm/drm_vblank_work.h
:internal:
.. kernel-doc:: drivers/gpu/drm/drm_vblank_work.c
:export:
...@@ -185,7 +185,7 @@ enhancing the kernel code to adapt as a kernel module and also did the ...@@ -185,7 +185,7 @@ enhancing the kernel code to adapt as a kernel module and also did the
implementation of the user space side [3]. Now (2009) Tiago Vignatti and Dave implementation of the user space side [3]. Now (2009) Tiago Vignatti and Dave
Airlie finally put this work in shape and queued to Jesse Barnes' PCI tree. Airlie finally put this work in shape and queued to Jesse Barnes' PCI tree.
0) http://cgit.freedesktop.org/xorg/xserver/commit/?id=4b42448a2388d40f257774fbffdccaea87bd0347 0) https://cgit.freedesktop.org/xorg/xserver/commit/?id=4b42448a2388d40f257774fbffdccaea87bd0347
1) http://lists.freedesktop.org/archives/xorg/2005-March/006663.html 1) https://lists.freedesktop.org/archives/xorg/2005-March/006663.html
2) http://lists.freedesktop.org/archives/xorg/2005-March/006745.html 2) https://lists.freedesktop.org/archives/xorg/2005-March/006745.html
3) http://lists.freedesktop.org/archives/xorg/2007-October/029507.html 3) https://lists.freedesktop.org/archives/xorg/2007-October/029507.html
...@@ -64,6 +64,52 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1); ...@@ -64,6 +64,52 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
* &dma_buf.resv pointer. * &dma_buf.resv pointer.
*/ */
/**
* DOC: fence cross-driver contract
*
* Since &dma_fence provide a cross driver contract, all drivers must follow the
* same rules:
*
* * Fences must complete in a reasonable time. Fences which represent kernels
* and shaders submitted by userspace, which could run forever, must be backed
* up by timeout and gpu hang recovery code. Minimally that code must prevent
* further command submission and force complete all in-flight fences, e.g.
* when the driver or hardware do not support gpu reset, or if the gpu reset
* failed for some reason. Ideally the driver supports gpu recovery which only
* affects the offending userspace context, and no other userspace
* submissions.
*
* * Drivers may have different ideas of what completion within a reasonable
* time means. Some hang recovery code uses a fixed timeout, others a mix
* between observing forward progress and increasingly strict timeouts.
* Drivers should not try to second guess timeout handling of fences from
* other drivers.
*
* * To ensure there's no deadlocks of dma_fence_wait() against other locks
* drivers should annotate all code required to reach dma_fence_signal(),
* which completes the fences, with dma_fence_begin_signalling() and
* dma_fence_end_signalling().
*
* * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock().
* This means any code required for fence completion cannot acquire a
* &dma_resv lock. Note that this also pulls in the entire established
* locking hierarchy around dma_resv_lock() and dma_resv_unlock().
*
* * Drivers are allowed to call dma_fence_wait() from their &shrinker
* callbacks. This means any code required for fence completion cannot
* allocate memory with GFP_KERNEL.
*
* * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
* respectively &mmu_interval_notifier callbacks. This means any code required
* for fence completeion cannot allocate memory with GFP_NOFS or GFP_NOIO.
* Only GFP_ATOMIC is permissible, which might fail.
*
* Note that only GPU drivers have a reasonable excuse for both requiring
* &mmu_interval_notifier and &shrinker callbacks at the same time as having to
* track asynchronous compute work using &dma_fence. No driver outside of
* drivers/gpu should ever call dma_fence_wait() in such contexts.
*/
static const char *dma_fence_stub_get_name(struct dma_fence *fence) static const char *dma_fence_stub_get_name(struct dma_fence *fence)
{ {
return "stub"; return "stub";
...@@ -110,6 +156,160 @@ u64 dma_fence_context_alloc(unsigned num) ...@@ -110,6 +156,160 @@ u64 dma_fence_context_alloc(unsigned num)
} }
EXPORT_SYMBOL(dma_fence_context_alloc); EXPORT_SYMBOL(dma_fence_context_alloc);
/**
* DOC: fence signalling annotation
*
* Proving correctness of all the kernel code around &dma_fence through code
* review and testing is tricky for a few reasons:
*
* * It is a cross-driver contract, and therefore all drivers must follow the
* same rules for lock nesting order, calling contexts for various functions
* and anything else significant for in-kernel interfaces. But it is also
* impossible to test all drivers in a single machine, hence brute-force N vs.
* N testing of all combinations is impossible. Even just limiting to the
* possible combinations is infeasible.
*
* * There is an enormous amount of driver code involved. For render drivers
* there's the tail of command submission, after fences are published,
* scheduler code, interrupt and workers to process job completion,
* and timeout, gpu reset and gpu hang recovery code. Plus for integration
* with core mm with have &mmu_notifier, respectively &mmu_interval_notifier,
* and &shrinker. For modesetting drivers there's the commit tail functions
* between when fences for an atomic modeset are published, and when the
* corresponding vblank completes, including any interrupt processing and
* related workers. Auditing all that code, across all drivers, is not
* feasible.
*
* * Due to how many other subsystems are involved and the locking hierarchies
* this pulls in there is extremely thin wiggle-room for driver-specific
* differences. &dma_fence interacts with almost all of the core memory
* handling through page fault handlers via &dma_resv, dma_resv_lock() and
* dma_resv_unlock(). On the other side it also interacts through all
* allocation sites through &mmu_notifier and &shrinker.
*
* Furthermore lockdep does not handle cross-release dependencies, which means
* any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught
* at runtime with some quick testing. The simplest example is one thread
* waiting on a &dma_fence while holding a lock::
*
* lock(A);
* dma_fence_wait(B);
* unlock(A);
*
* while the other thread is stuck trying to acquire the same lock, which
* prevents it from signalling the fence the previous thread is stuck waiting
* on::
*
* lock(A);
* unlock(A);
* dma_fence_signal(B);
*
* By manually annotating all code relevant to signalling a &dma_fence we can
* teach lockdep about these dependencies, which also helps with the validation
* headache since now lockdep can check all the rules for us::
*
* cookie = dma_fence_begin_signalling();
* lock(A);
* unlock(A);
* dma_fence_signal(B);
* dma_fence_end_signalling(cookie);
*
* For using dma_fence_begin_signalling() and dma_fence_end_signalling() to
* annotate critical sections the following rules need to be observed:
*
* * All code necessary to complete a &dma_fence must be annotated, from the
* point where a fence is accessible to other threads, to the point where
* dma_fence_signal() is called. Un-annotated code can contain deadlock issues,
* and due to the very strict rules and many corner cases it is infeasible to
* catch these just with review or normal stress testing.
*
* * &struct dma_resv deserves a special note, since the readers are only
* protected by rcu. This means the signalling critical section starts as soon
* as the new fences are installed, even before dma_resv_unlock() is called.
*
* * The only exception are fast paths and opportunistic signalling code, which
* calls dma_fence_signal() purely as an optimization, but is not required to
* guarantee completion of a &dma_fence. The usual example is a wait IOCTL
* which calls dma_fence_signal(), while the mandatory completion path goes
* through a hardware interrupt and possible job completion worker.
*
* * To aid composability of code, the annotations can be freely nested, as long
* as the overall locking hierarchy is consistent. The annotations also work
* both in interrupt and process context. Due to implementation details this
* requires that callers pass an opaque cookie from
* dma_fence_begin_signalling() to dma_fence_end_signalling().
*
* * Validation against the cross driver contract is implemented by priming
* lockdep with the relevant hierarchy at boot-up. This means even just
* testing with a single device is enough to validate a driver, at least as
* far as deadlocks with dma_fence_wait() against dma_fence_signal() are
* concerned.
*/
#ifdef CONFIG_LOCKDEP
struct lockdep_map dma_fence_lockdep_map = {
.name = "dma_fence_map"
};
/**
* dma_fence_begin_signalling - begin a critical DMA fence signalling section
*
* Drivers should use this to annotate the beginning of any code section
* required to eventually complete &dma_fence by calling dma_fence_signal().
*
* The end of these critical sections are annotated with
* dma_fence_end_signalling().
*
* Returns:
*
* Opaque cookie needed by the implementation, which needs to be passed to
* dma_fence_end_signalling().
*/
bool dma_fence_begin_signalling(void)
{
/* explicitly nesting ... */
if (lock_is_held_type(&dma_fence_lockdep_map, 1))
return true;
/* rely on might_sleep check for soft/hardirq locks */
if (in_atomic())
return true;
/* ... and non-recursive readlock */
lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_);
return false;
}
EXPORT_SYMBOL(dma_fence_begin_signalling);
/**
* dma_fence_end_signalling - end a critical DMA fence signalling section
*
* Closes a critical section annotation opened by dma_fence_begin_signalling().
*/
void dma_fence_end_signalling(bool cookie)
{
if (cookie)
return;
lock_release(&dma_fence_lockdep_map, _RET_IP_);
}
EXPORT_SYMBOL(dma_fence_end_signalling);
void __dma_fence_might_wait(void)
{
bool tmp;
tmp = lock_is_held_type(&dma_fence_lockdep_map, 1);
if (tmp)
lock_release(&dma_fence_lockdep_map, _THIS_IP_);
lock_map_acquire(&dma_fence_lockdep_map);
lock_map_release(&dma_fence_lockdep_map);
if (tmp)
lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_);
}
#endif
/** /**
* dma_fence_signal_locked - signal completion of a fence * dma_fence_signal_locked - signal completion of a fence
* @fence: the fence to signal * @fence: the fence to signal
...@@ -170,14 +370,19 @@ int dma_fence_signal(struct dma_fence *fence) ...@@ -170,14 +370,19 @@ int dma_fence_signal(struct dma_fence *fence)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
bool tmp;
if (!fence) if (!fence)
return -EINVAL; return -EINVAL;
tmp = dma_fence_begin_signalling();
spin_lock_irqsave(fence->lock, flags); spin_lock_irqsave(fence->lock, flags);
ret = dma_fence_signal_locked(fence); ret = dma_fence_signal_locked(fence);
spin_unlock_irqrestore(fence->lock, flags); spin_unlock_irqrestore(fence->lock, flags);
dma_fence_end_signalling(tmp);
return ret; return ret;
} }
EXPORT_SYMBOL(dma_fence_signal); EXPORT_SYMBOL(dma_fence_signal);
...@@ -210,6 +415,8 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) ...@@ -210,6 +415,8 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
might_sleep(); might_sleep();
__dma_fence_might_wait();
trace_dma_fence_wait_start(fence); trace_dma_fence_wait_start(fence);
if (fence->ops->wait) if (fence->ops->wait)
ret = fence->ops->wait(fence, intr, timeout); ret = fence->ops->wait(fence, intr, timeout);
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/mmu_notifier.h>
/** /**
* DOC: Reservation Object Overview * DOC: Reservation Object Overview
...@@ -116,6 +117,13 @@ static int __init dma_resv_lockdep(void) ...@@ -116,6 +117,13 @@ static int __init dma_resv_lockdep(void)
if (ret == -EDEADLK) if (ret == -EDEADLK)
dma_resv_lock_slow(&obj, &ctx); dma_resv_lock_slow(&obj, &ctx);
fs_reclaim_acquire(GFP_KERNEL); fs_reclaim_acquire(GFP_KERNEL);
#ifdef CONFIG_MMU_NOTIFIER
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
__dma_fence_might_wait();
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
#else
__dma_fence_might_wait();
#endif
fs_reclaim_release(GFP_KERNEL); fs_reclaim_release(GFP_KERNEL);
ww_mutex_unlock(&obj.lock); ww_mutex_unlock(&obj.lock);
ww_acquire_fini(&ctx); ww_acquire_fini(&ctx);
......
...@@ -18,7 +18,7 @@ drm-y := drm_auth.o drm_cache.o \ ...@@ -18,7 +18,7 @@ drm-y := drm_auth.o drm_cache.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \ drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \ drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \ drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \
drm_managed.o drm_managed.o drm_vblank_work.o
drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
......
...@@ -94,7 +94,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -94,7 +94,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->func = &amdgpu_gtt_mgr_func; man->func = &amdgpu_gtt_mgr_func;
man->available_caching = TTM_PL_MASK_CACHING; man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
/* "On-card" video ram */ /* "On-card" video ram */
...@@ -109,7 +109,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -109,7 +109,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case AMDGPU_PL_OA: case AMDGPU_PL_OA:
/* On-chip GDS memory*/ /* On-chip GDS memory*/
man->func = &ttm_bo_manager_func; man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; man->flags = TTM_MEMTYPE_FLAG_FIXED;
man->available_caching = TTM_PL_FLAG_UNCACHED; man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED; man->default_caching = TTM_PL_FLAG_UNCACHED;
break; break;
...@@ -837,10 +837,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ ...@@ -837,10 +837,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
return 0; return 0;
} }
static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset) unsigned long page_offset)
{ {
...@@ -1755,7 +1751,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = { ...@@ -1755,7 +1751,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.release_notify = &amdgpu_bo_release_notify, .release_notify = &amdgpu_bo_release_notify,
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve, .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
.io_mem_pfn = amdgpu_ttm_io_mem_pfn, .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
.access_memory = &amdgpu_ttm_access_memory, .access_memory = &amdgpu_ttm_access_memory,
.del_from_lru_notify = &amdgpu_vm_del_from_lru_notify .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# Makefile for the drm device driver. This driver provides support for the # Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ast-y := ast_cursor.o ast_drv.o ast_main.o ast_mode.o ast_ttm.o ast_post.o \ ast-y := ast_cursor.o ast_drv.o ast_main.o ast_mm.o ast_mode.o ast_post.o \
ast_dp501.o ast_dp501.o
obj-$(CONFIG_DRM_AST) := ast.o obj-$(CONFIG_DRM_AST) := ast.o
...@@ -110,7 +110,6 @@ struct ast_private { ...@@ -110,7 +110,6 @@ struct ast_private {
uint32_t dram_bus_width; uint32_t dram_bus_width;
uint32_t dram_type; uint32_t dram_type;
uint32_t mclk; uint32_t mclk;
uint32_t vram_size;
int fb_mtrr; int fb_mtrr;
...@@ -292,7 +291,6 @@ int ast_mode_config_init(struct ast_private *ast); ...@@ -292,7 +291,6 @@ int ast_mode_config_init(struct ast_private *ast);
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1) #define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
int ast_mm_init(struct ast_private *ast); int ast_mm_init(struct ast_private *ast);
void ast_mm_fini(struct ast_private *ast);
/* ast post */ /* ast post */
void ast_enable_vga(struct drm_device *dev); void ast_enable_vga(struct drm_device *dev);
......
...@@ -378,38 +378,6 @@ static int ast_get_dram_info(struct drm_device *dev) ...@@ -378,38 +378,6 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0; return 0;
} }
static u32 ast_get_vram_info(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
u8 jreg;
u32 vram_size;
ast_open_key(ast);
vram_size = AST_VIDMEM_DEFAULT_SIZE;
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
switch (jreg & 3) {
case 0: vram_size = AST_VIDMEM_SIZE_8M; break;
case 1: vram_size = AST_VIDMEM_SIZE_16M; break;
case 2: vram_size = AST_VIDMEM_SIZE_32M; break;
case 3: vram_size = AST_VIDMEM_SIZE_64M; break;
}
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xff);
switch (jreg & 0x03) {
case 1:
vram_size -= 0x100000;
break;
case 2:
vram_size -= 0x200000;
break;
case 3:
vram_size -= 0x400000;
break;
}
return vram_size;
}
int ast_driver_load(struct drm_device *dev, unsigned long flags) int ast_driver_load(struct drm_device *dev, unsigned long flags)
{ {
struct ast_private *ast; struct ast_private *ast;
...@@ -450,16 +418,14 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -450,16 +418,14 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
ast_detect_chip(dev, &need_post); ast_detect_chip(dev, &need_post);
if (need_post)
ast_post_gpu(dev);
ret = ast_get_dram_info(dev); ret = ast_get_dram_info(dev);
if (ret) if (ret)
goto out_free; goto out_free;
ast->vram_size = ast_get_vram_info(dev); drm_info(dev, "dram MCLK=%u Mhz type=%d bus_width=%d\n",
drm_info(dev, "dram MCLK=%u Mhz type=%d bus_width=%d size=%08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width);
ast->mclk, ast->dram_type,
ast->dram_bus_width, ast->vram_size); if (need_post)
ast_post_gpu(dev);
ret = ast_mm_init(ast); ret = ast_mm_init(ast);
if (ret) if (ret)
...@@ -486,6 +452,5 @@ void ast_driver_unload(struct drm_device *dev) ...@@ -486,6 +452,5 @@ void ast_driver_unload(struct drm_device *dev)
ast_release_firmware(dev); ast_release_firmware(dev);
kfree(ast->dp501_fw_addr); kfree(ast->dp501_fw_addr);
ast_mm_fini(ast);
kfree(ast); kfree(ast);
} }
...@@ -28,22 +28,72 @@ ...@@ -28,22 +28,72 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <drm/drm_print.h>
#include <drm/drm_gem_vram_helper.h> #include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include "ast_drv.h" #include "ast_drv.h"
static u32 ast_get_vram_size(struct ast_private *ast)
{
u8 jreg;
u32 vram_size;
ast_open_key(ast);
vram_size = AST_VIDMEM_DEFAULT_SIZE;
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
switch (jreg & 3) {
case 0:
vram_size = AST_VIDMEM_SIZE_8M;
break;
case 1:
vram_size = AST_VIDMEM_SIZE_16M;
break;
case 2:
vram_size = AST_VIDMEM_SIZE_32M;
break;
case 3:
vram_size = AST_VIDMEM_SIZE_64M;
break;
}
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xff);
switch (jreg & 0x03) {
case 1:
vram_size -= 0x100000;
break;
case 2:
vram_size -= 0x200000;
break;
case 3:
vram_size -= 0x400000;
break;
}
return vram_size;
}
static void ast_mm_release(struct drm_device *dev, void *ptr)
{
struct ast_private *ast = to_ast_private(dev);
arch_phys_wc_del(ast->fb_mtrr);
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
}
int ast_mm_init(struct ast_private *ast) int ast_mm_init(struct ast_private *ast)
{ {
struct drm_vram_mm *vmm; u32 vram_size;
int ret; int ret;
struct drm_device *dev = ast->dev; struct drm_device *dev = ast->dev;
vmm = drm_vram_helper_alloc_mm( vram_size = ast_get_vram_size(ast);
dev, pci_resource_start(dev->pdev, 0),
ast->vram_size); ret = drmm_vram_helper_init(dev, pci_resource_start(dev->pdev, 0),
if (IS_ERR(vmm)) { vram_size);
ret = PTR_ERR(vmm); if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret); drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
return ret; return ret;
} }
...@@ -53,16 +103,5 @@ int ast_mm_init(struct ast_private *ast) ...@@ -53,16 +103,5 @@ int ast_mm_init(struct ast_private *ast)
ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0)); pci_resource_len(dev->pdev, 0));
return 0; return drmm_add_action_or_reset(dev, ast_mm_release, NULL);
}
void ast_mm_fini(struct ast_private *ast)
{
struct drm_device *dev = ast->dev;
drm_vram_helper_release_mm(dev);
arch_phys_wc_del(ast->fb_mtrr);
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
} }
...@@ -1844,9 +1844,7 @@ static void connector_bad_edid(struct drm_connector *connector, ...@@ -1844,9 +1844,7 @@ static void connector_bad_edid(struct drm_connector *connector,
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS)) if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return; return;
dev_warn(connector->dev->dev, drm_warn(connector->dev, "%s: EDID is invalid:\n", connector->name);
"%s: EDID is invalid:\n",
connector->name);
for (i = 0; i < num_blocks; i++) { for (i = 0; i < num_blocks; i++) {
u8 *block = edid + i * EDID_LENGTH; u8 *block = edid + i * EDID_LENGTH;
char prefix[20]; char prefix[20];
...@@ -5298,7 +5296,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) ...@@ -5298,7 +5296,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
} }
if (!drm_edid_is_valid(edid)) { if (!drm_edid_is_valid(edid)) {
clear_eld(connector); clear_eld(connector);
dev_warn(connector->dev->dev, "%s: EDID invalid.\n", drm_warn(connector->dev, "%s: EDID invalid.\n",
connector->name); connector->name);
return 0; return 0;
} }
......
...@@ -105,7 +105,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, ...@@ -105,7 +105,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr, cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
GFP_KERNEL | __GFP_NOWARN); GFP_KERNEL | __GFP_NOWARN);
if (!cma_obj->vaddr) { if (!cma_obj->vaddr) {
dev_dbg(drm->dev, "failed to allocate buffer with size %zu\n", drm_dbg(drm, "failed to allocate buffer with size %zu\n",
size); size);
ret = -ENOMEM; ret = -ENOMEM;
goto error; goto error;
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_ttm_helper.h> #include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_gem_vram_helper.h> #include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mode.h> #include <drm/drm_mode.h>
#include <drm/drm_plane.h> #include <drm/drm_plane.h>
#include <drm/drm_prime.h> #include <drm/drm_prime.h>
...@@ -40,12 +41,11 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; ...@@ -40,12 +41,11 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
* the frame's scanout buffer or the cursor image. If there's no more space * the frame's scanout buffer or the cursor image. If there's no more space
* left in VRAM, inactive GEM objects can be moved to system memory. * left in VRAM, inactive GEM objects can be moved to system memory.
* *
* The easiest way to use the VRAM helper library is to call * To initialize the VRAM helper library call drmm_vram_helper_alloc_mm().
* drm_vram_helper_alloc_mm(). The function allocates and initializes an * The function allocates and initializes an instance of &struct drm_vram_mm
* instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use * in &struct drm_device.vram_mm . Use &DRM_GEM_VRAM_DRIVER to initialize
* &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and * &struct drm_driver and &DRM_VRAM_MM_FILE_OPERATIONS to initialize
* &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations; * &struct file_operations; as illustrated below.
* as illustrated below.
* *
* .. code-block:: c * .. code-block:: c
* *
...@@ -69,7 +69,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; ...@@ -69,7 +69,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
* // setup device, vram base and size * // setup device, vram base and size
* // ... * // ...
* *
* ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size); * ret = drmm_vram_helper_alloc_mm(dev, vram_base, vram_size);
* if (ret) * if (ret)
* return ret; * return ret;
* return 0; * return 0;
...@@ -81,20 +81,12 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; ...@@ -81,20 +81,12 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
* manages an area of video RAM with VRAM MM and provides GEM VRAM objects * manages an area of video RAM with VRAM MM and provides GEM VRAM objects
* to userspace. * to userspace.
* *
* To clean up the VRAM memory management, call drm_vram_helper_release_mm() * You don't have to clean up the instance of VRAM MM.
* in the driver's clean-up code. * drmm_vram_helper_alloc_mm() is a managed interface that installs a
* clean-up handler to run during the DRM device's release.
* *
* .. code-block:: c * For drawing or scanout operations, rsp. buffer objects have to be pinned
* * in video RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
* void fini_drm_driver()
* {
* struct drm_device *dev = ...;
*
* drm_vram_helper_release_mm(dev);
* }
*
* For drawing or scanout operations, buffer object have to be pinned in video
* RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
* &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
* memory. Call drm_gem_vram_unpin() to release the pinned object afterwards. * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
* *
...@@ -1017,14 +1009,13 @@ static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -1017,14 +1009,13 @@ static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
{ {
switch (type) { switch (type) {
case TTM_PL_SYSTEM: case TTM_PL_SYSTEM:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->flags = 0;
man->available_caching = TTM_PL_MASK_CACHING; man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED;
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
man->func = &ttm_bo_manager_func; man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_FIXED | man->flags = TTM_MEMTYPE_FLAG_FIXED;
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC; TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC;
...@@ -1067,12 +1058,8 @@ static void bo_driver_move_notify(struct ttm_buffer_object *bo, ...@@ -1067,12 +1058,8 @@ static void bo_driver_move_notify(struct ttm_buffer_object *bo,
static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev); struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
mem->bus.addr = NULL; mem->bus.addr = NULL;
mem->bus.size = mem->num_pages << PAGE_SHIFT; mem->bus.size = mem->num_pages << PAGE_SHIFT;
...@@ -1094,10 +1081,6 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, ...@@ -1094,10 +1081,6 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
return 0; return 0;
} }
static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{ }
static struct ttm_bo_driver bo_driver = { static struct ttm_bo_driver bo_driver = {
.ttm_tt_create = bo_driver_ttm_tt_create, .ttm_tt_create = bo_driver_ttm_tt_create,
.ttm_tt_populate = ttm_pool_populate, .ttm_tt_populate = ttm_pool_populate,
...@@ -1107,7 +1090,6 @@ static struct ttm_bo_driver bo_driver = { ...@@ -1107,7 +1090,6 @@ static struct ttm_bo_driver bo_driver = {
.evict_flags = bo_driver_evict_flags, .evict_flags = bo_driver_evict_flags,
.move_notify = bo_driver_move_notify, .move_notify = bo_driver_move_notify,
.io_mem_reserve = bo_driver_io_mem_reserve, .io_mem_reserve = bo_driver_io_mem_reserve,
.io_mem_free = bo_driver_io_mem_free,
}; };
/* /*
...@@ -1176,17 +1158,7 @@ static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm) ...@@ -1176,17 +1158,7 @@ static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
* Helpers for integration with struct drm_device * Helpers for integration with struct drm_device
*/ */
/** /* deprecated; use drmm_vram_mm_init() */
* drm_vram_helper_alloc_mm - Allocates a device's instance of \
&struct drm_vram_mm
* @dev: the DRM device
* @vram_base: the base address of the video memory
* @vram_size: the size of the video memory in bytes
*
* Returns:
* The new instance of &struct drm_vram_mm on success, or
* an ERR_PTR()-encoded errno code otherwise.
*/
struct drm_vram_mm *drm_vram_helper_alloc_mm( struct drm_vram_mm *drm_vram_helper_alloc_mm(
struct drm_device *dev, uint64_t vram_base, size_t vram_size) struct drm_device *dev, uint64_t vram_base, size_t vram_size)
{ {
...@@ -1212,11 +1184,6 @@ struct drm_vram_mm *drm_vram_helper_alloc_mm( ...@@ -1212,11 +1184,6 @@ struct drm_vram_mm *drm_vram_helper_alloc_mm(
} }
EXPORT_SYMBOL(drm_vram_helper_alloc_mm); EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
/**
* drm_vram_helper_release_mm - Releases a device's instance of \
&struct drm_vram_mm
* @dev: the DRM device
*/
void drm_vram_helper_release_mm(struct drm_device *dev) void drm_vram_helper_release_mm(struct drm_device *dev)
{ {
if (!dev->vram_mm) if (!dev->vram_mm)
...@@ -1228,6 +1195,41 @@ void drm_vram_helper_release_mm(struct drm_device *dev) ...@@ -1228,6 +1195,41 @@ void drm_vram_helper_release_mm(struct drm_device *dev)
} }
EXPORT_SYMBOL(drm_vram_helper_release_mm); EXPORT_SYMBOL(drm_vram_helper_release_mm);
static void drm_vram_mm_release(struct drm_device *dev, void *ptr)
{
drm_vram_helper_release_mm(dev);
}
/**
* drmm_vram_helper_init - Initializes a device's instance of
* &struct drm_vram_mm
* @dev: the DRM device
* @vram_base: the base address of the video memory
* @vram_size: the size of the video memory in bytes
*
* Creates a new instance of &struct drm_vram_mm and stores it in
* struct &drm_device.vram_mm. The instance is auto-managed and cleaned
* up as part of device cleanup. Calling this function multiple times
* will generate an error message.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drmm_vram_helper_init(struct drm_device *dev, uint64_t vram_base,
size_t vram_size)
{
struct drm_vram_mm *vram_mm;
if (drm_WARN_ON_ONCE(dev, dev->vram_mm))
return 0;
vram_mm = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
if (IS_ERR(vram_mm))
return PTR_ERR(vram_mm);
return drmm_add_action_or_reset(dev, drm_vram_mm_release, NULL);
}
EXPORT_SYMBOL(drmm_vram_helper_init);
/* /*
* Mode-config helpers * Mode-config helpers
*/ */
......
...@@ -21,7 +21,10 @@ ...@@ -21,7 +21,10 @@
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
*/ */
#include <linux/kthread.h>
#include <drm/drm_ioctl.h> #include <drm/drm_ioctl.h>
#include <drm/drm_vblank.h>
#define DRM_IF_MAJOR 1 #define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 4 #define DRM_IF_MINOR 4
...@@ -38,6 +41,7 @@ struct drm_master; ...@@ -38,6 +41,7 @@ struct drm_master;
struct drm_minor; struct drm_minor;
struct drm_prime_file_private; struct drm_prime_file_private;
struct drm_printer; struct drm_printer;
struct drm_vblank_crtc;
/* drm_file.c */ /* drm_file.c */
extern struct mutex drm_global_mutex; extern struct mutex drm_global_mutex;
...@@ -93,7 +97,30 @@ void drm_minor_release(struct drm_minor *minor); ...@@ -93,7 +97,30 @@ void drm_minor_release(struct drm_minor *minor);
void drm_managed_release(struct drm_device *dev); void drm_managed_release(struct drm_device *dev);
/* drm_vblank.c */ /* drm_vblank.c */
static inline bool drm_vblank_passed(u64 seq, u64 ref)
{
return (seq - ref) <= (1 << 23);
}
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe); void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
/* drm_vblank_work.c */
static inline void drm_vblank_flush_worker(struct drm_vblank_crtc *vblank)
{
kthread_flush_worker(vblank->worker);
}
static inline void drm_vblank_destroy_worker(struct drm_vblank_crtc *vblank)
{
kthread_destroy_worker(vblank->worker);
}
int drm_vblank_worker_init(struct drm_vblank_crtc *vblank);
void drm_vblank_cancel_pending_works(struct drm_vblank_crtc *vblank);
void drm_handle_vblank_works(struct drm_vblank_crtc *vblank);
/* IOCTLS */ /* IOCTLS */
int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
......
...@@ -225,9 +225,8 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, ...@@ -225,9 +225,8 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
drm_fb_xrgb8888_to_rgb565(dst, src, fb, clip, swap); drm_fb_xrgb8888_to_rgb565(dst, src, fb, clip, swap);
break; break;
default: default:
dev_err_once(fb->dev->dev, "Format is not supported: %s\n", drm_err_once(fb->dev, "Format is not supported: %s\n",
drm_get_format_name(fb->format->format, drm_get_format_name(fb->format->format, &format_name));
&format_name));
return -EINVAL; return -EINVAL;
} }
...@@ -295,7 +294,7 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) ...@@ -295,7 +294,7 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
width * height * 2); width * height * 2);
err_msg: err_msg:
if (ret) if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret); drm_err_once(fb->dev, "Failed to update display %d\n", ret);
drm_dev_exit(idx); drm_dev_exit(idx);
} }
......
...@@ -548,7 +548,7 @@ EXPORT_SYMBOL(drm_gtf_mode_complex); ...@@ -548,7 +548,7 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
* Generalized Timing Formula is derived from: * Generalized Timing Formula is derived from:
* *
* GTF Spreadsheet by Andy Morrish (1/5/97) * GTF Spreadsheet by Andy Morrish (1/5/97)
* available at http://www.vesa.org * available at https://www.vesa.org
* *
* And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
* What I have done is to translate it by using integer calculation. * What I have done is to translate it by using integer calculation.
......
This diff is collapsed.
// SPDX-License-Identifier: MIT
#include <uapi/linux/sched/types.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
#include <drm/drm_crtc.h>
#include "drm_internal.h"
/**
* DOC: vblank works
*
* Many DRM drivers need to program hardware in a time-sensitive manner, many
* times with a deadline of starting and finishing within a certain region of
* the scanout. Most of the time the safest way to accomplish this is to
* simply do said time-sensitive programming in the driver's IRQ handler,
* which allows drivers to avoid being preempted during these critical
* regions. Or even better, the hardware may even handle applying such
* time-critical programming independently of the CPU.
*
* While there's a decent amount of hardware that's designed so that the CPU
* doesn't need to be concerned with extremely time-sensitive programming,
* there's a few situations where it can't be helped. Some unforgiving
* hardware may require that certain time-sensitive programming be handled
* completely by the CPU, and said programming may even take too long to
* handle in an IRQ handler. Another such situation would be where the driver
* needs to perform a task that needs to complete within a specific scanout
* period, but might possibly block and thus cannot be handled in an IRQ
* context. Both of these situations can't be solved perfectly in Linux since
* we're not a realtime kernel, and thus the scheduler may cause us to miss
* our deadline if it decides to preempt us. But for some drivers, it's good
* enough if we can lower our chance of being preempted to an absolute
* minimum.
*
* This is where &drm_vblank_work comes in. &drm_vblank_work provides a simple
* generic delayed work implementation which delays work execution until a
* particular vblank has passed, and then executes the work at realtime
* priority. This provides the best possible chance at performing
* time-sensitive hardware programming on time, even when the system is under
* heavy load. &drm_vblank_work also supports rescheduling, so that self
* re-arming work items can be easily implemented.
*/
void drm_handle_vblank_works(struct drm_vblank_crtc *vblank)
{
struct drm_vblank_work *work, *next;
u64 count = atomic64_read(&vblank->count);
bool wake = false;
assert_spin_locked(&vblank->dev->event_lock);
list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
if (!drm_vblank_passed(count, work->count))
continue;
list_del_init(&work->node);
drm_vblank_put(vblank->dev, vblank->pipe);
kthread_queue_work(vblank->worker, &work->base);
wake = true;
}
if (wake)
wake_up_all(&vblank->work_wait_queue);
}
/* Handle cancelling any pending vblank work items and drop respective vblank
* references in response to vblank interrupts being disabled.
*/
void drm_vblank_cancel_pending_works(struct drm_vblank_crtc *vblank)
{
struct drm_vblank_work *work, *next;
assert_spin_locked(&vblank->dev->event_lock);
list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
list_del_init(&work->node);
drm_vblank_put(vblank->dev, vblank->pipe);
}
wake_up_all(&vblank->work_wait_queue);
}
/**
* drm_vblank_work_schedule - schedule a vblank work
* @work: vblank work to schedule
* @count: target vblank count
* @nextonmiss: defer until the next vblank if target vblank was missed
*
* Schedule @work for execution once the crtc vblank count reaches @count.
*
* If the crtc vblank count has already reached @count and @nextonmiss is
* %false the work starts to execute immediately.
*
* If the crtc vblank count has already reached @count and @nextonmiss is
* %true the work is deferred until the next vblank (as if @count has been
* specified as crtc vblank count + 1).
*
* If @work is already scheduled, this function will reschedule said work
* using the new @count. This can be used for self-rearming work items.
*
* Returns:
* %1 if @work was successfully (re)scheduled, %0 if it was either already
* scheduled or cancelled, or a negative error code on failure.
*/
int drm_vblank_work_schedule(struct drm_vblank_work *work,
u64 count, bool nextonmiss)
{
struct drm_vblank_crtc *vblank = work->vblank;
struct drm_device *dev = vblank->dev;
u64 cur_vbl;
unsigned long irqflags;
bool passed, inmodeset, rescheduling = false, wake = false;
int ret = 0;
spin_lock_irqsave(&dev->event_lock, irqflags);
if (work->cancelling)
goto out;
spin_lock(&dev->vbl_lock);
inmodeset = vblank->inmodeset;
spin_unlock(&dev->vbl_lock);
if (inmodeset)
goto out;
if (list_empty(&work->node)) {
ret = drm_vblank_get(dev, vblank->pipe);
if (ret < 0)
goto out;
} else if (work->count == count) {
/* Already scheduled w/ same vbl count */
goto out;
} else {
rescheduling = true;
}
work->count = count;
cur_vbl = drm_vblank_count(dev, vblank->pipe);
passed = drm_vblank_passed(cur_vbl, count);
if (passed)
drm_dbg_core(dev,
"crtc %d vblank %llu already passed (current %llu)\n",
vblank->pipe, count, cur_vbl);
if (!nextonmiss && passed) {
drm_vblank_put(dev, vblank->pipe);
ret = kthread_queue_work(vblank->worker, &work->base);
if (rescheduling) {
list_del_init(&work->node);
wake = true;
}
} else {
if (!rescheduling)
list_add_tail(&work->node, &vblank->pending_work);
ret = true;
}
out:
spin_unlock_irqrestore(&dev->event_lock, irqflags);
if (wake)
wake_up_all(&vblank->work_wait_queue);
return ret;
}
EXPORT_SYMBOL(drm_vblank_work_schedule);
/**
* drm_vblank_work_cancel_sync - cancel a vblank work and wait for it to
* finish executing
* @work: vblank work to cancel
*
* Cancel an already scheduled vblank work and wait for its
* execution to finish.
*
* On return, @work is guaranteed to no longer be scheduled or running, even
* if it's self-arming.
*
* Returns:
* %True if the work was cancelled before it started to execute, %false
* otherwise.
*/
bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work)
{
struct drm_vblank_crtc *vblank = work->vblank;
struct drm_device *dev = vblank->dev;
bool ret = false;
spin_lock_irq(&dev->event_lock);
if (!list_empty(&work->node)) {
list_del_init(&work->node);
drm_vblank_put(vblank->dev, vblank->pipe);
ret = true;
}
work->cancelling++;
spin_unlock_irq(&dev->event_lock);
wake_up_all(&vblank->work_wait_queue);
if (kthread_cancel_work_sync(&work->base))
ret = true;
spin_lock_irq(&dev->event_lock);
work->cancelling--;
spin_unlock_irq(&dev->event_lock);
return ret;
}
EXPORT_SYMBOL(drm_vblank_work_cancel_sync);
/**
* drm_vblank_work_flush - wait for a scheduled vblank work to finish
* executing
* @work: vblank work to flush
*
* Wait until @work has finished executing once.
*/
void drm_vblank_work_flush(struct drm_vblank_work *work)
{
struct drm_vblank_crtc *vblank = work->vblank;
struct drm_device *dev = vblank->dev;
spin_lock_irq(&dev->event_lock);
wait_event_lock_irq(vblank->work_wait_queue, list_empty(&work->node),
dev->event_lock);
spin_unlock_irq(&dev->event_lock);
kthread_flush_work(&work->base);
}
EXPORT_SYMBOL(drm_vblank_work_flush);
/**
* drm_vblank_work_init - initialize a vblank work item
* @work: vblank work item
* @crtc: CRTC whose vblank will trigger the work execution
* @func: work function to be executed
*
* Initialize a vblank work item for a specific crtc.
*/
void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
void (*func)(struct kthread_work *work))
{
kthread_init_work(&work->base, func);
INIT_LIST_HEAD(&work->node);
work->vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
}
EXPORT_SYMBOL(drm_vblank_work_init);
int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
{
struct sched_param param = {
.sched_priority = MAX_RT_PRIO - 1,
};
struct kthread_worker *worker;
INIT_LIST_HEAD(&vblank->pending_work);
init_waitqueue_head(&vblank->work_wait_queue);
worker = kthread_create_worker(0, "card%d-crtc%d",
vblank->dev->primary->index,
vblank->pipe);
if (IS_ERR(worker))
return PTR_ERR(worker);
vblank->worker = worker;
return sched_setscheduler(vblank->worker->task, SCHED_FIFO, &param);
}
...@@ -220,7 +220,7 @@ static int i810_dma_cleanup(struct drm_device *dev) ...@@ -220,7 +220,7 @@ static int i810_dma_cleanup(struct drm_device *dev)
if (dev_priv->ring.virtual_start) if (dev_priv->ring.virtual_start)
drm_legacy_ioremapfree(&dev_priv->ring.map, dev); drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
if (dev_priv->hw_status_page) { if (dev_priv->hw_status_page) {
pci_free_consistent(dev->pdev, PAGE_SIZE, dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
dev_priv->hw_status_page, dev_priv->hw_status_page,
dev_priv->dma_status_page); dev_priv->dma_status_page);
} }
...@@ -398,8 +398,8 @@ static int i810_dma_initialize(struct drm_device *dev, ...@@ -398,8 +398,8 @@ static int i810_dma_initialize(struct drm_device *dev,
/* Program Hardware Status Page */ /* Program Hardware Status Page */
dev_priv->hw_status_page = dev_priv->hw_status_page =
pci_zalloc_consistent(dev->pdev, PAGE_SIZE, dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
&dev_priv->dma_status_page); &dev_priv->dma_status_page, GFP_KERNEL);
if (!dev_priv->hw_status_page) { if (!dev_priv->hw_status_page) {
dev->dev_private = (void *)dev_priv; dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev); i810_dma_cleanup(dev);
......
...@@ -14,3 +14,14 @@ config DRM_INGENIC ...@@ -14,3 +14,14 @@ config DRM_INGENIC
Choose this option for DRM support for the Ingenic SoCs. Choose this option for DRM support for the Ingenic SoCs.
If M is selected the module will be called ingenic-drm. If M is selected the module will be called ingenic-drm.
if DRM_INGENIC
config DRM_INGENIC_IPU
bool "IPU support for Ingenic SoCs"
help
Choose this option to enable support for the IPU found in Ingenic SoCs.
The Image Processing Unit (IPU) will appear as a second primary plane.
endif
obj-$(CONFIG_DRM_INGENIC) += ingenic-drm.o obj-$(CONFIG_DRM_INGENIC) += ingenic-drm.o
ingenic-drm-y = ingenic-drm-drv.o
ingenic-drm-$(CONFIG_DRM_INGENIC_IPU) += ingenic-ipu.o
/* SPDX-License-Identifier: GPL-2.0 */
//
// Ingenic JZ47xx KMS driver - Register definitions and private API
//
// Copyright (C) 2020, Paul Cercueil <paul@crapouillou.net>
#ifndef DRIVERS_GPU_DRM_INGENIC_INGENIC_DRM_H
#define DRIVERS_GPU_DRM_INGENIC_INGENIC_DRM_H
#include <linux/bitops.h>
#include <linux/types.h>
#define JZ_REG_LCD_CFG 0x00
#define JZ_REG_LCD_VSYNC 0x04
#define JZ_REG_LCD_HSYNC 0x08
#define JZ_REG_LCD_VAT 0x0C
#define JZ_REG_LCD_DAH 0x10
#define JZ_REG_LCD_DAV 0x14
#define JZ_REG_LCD_PS 0x18
#define JZ_REG_LCD_CLS 0x1C
#define JZ_REG_LCD_SPL 0x20
#define JZ_REG_LCD_REV 0x24
#define JZ_REG_LCD_CTRL 0x30
#define JZ_REG_LCD_STATE 0x34
#define JZ_REG_LCD_IID 0x38
#define JZ_REG_LCD_DA0 0x40
#define JZ_REG_LCD_SA0 0x44
#define JZ_REG_LCD_FID0 0x48
#define JZ_REG_LCD_CMD0 0x4C
#define JZ_REG_LCD_DA1 0x50
#define JZ_REG_LCD_SA1 0x54
#define JZ_REG_LCD_FID1 0x58
#define JZ_REG_LCD_CMD1 0x5C
#define JZ_REG_LCD_OSDC 0x100
#define JZ_REG_LCD_OSDCTRL 0x104
#define JZ_REG_LCD_OSDS 0x108
#define JZ_REG_LCD_BGC 0x10c
#define JZ_REG_LCD_KEY0 0x110
#define JZ_REG_LCD_KEY1 0x114
#define JZ_REG_LCD_ALPHA 0x118
#define JZ_REG_LCD_IPUR 0x11c
#define JZ_REG_LCD_XYP0 0x120
#define JZ_REG_LCD_XYP1 0x124
#define JZ_REG_LCD_SIZE0 0x128
#define JZ_REG_LCD_SIZE1 0x12c
#define JZ_LCD_CFG_SLCD BIT(31)
#define JZ_LCD_CFG_PS_DISABLE BIT(23)
#define JZ_LCD_CFG_CLS_DISABLE BIT(22)
#define JZ_LCD_CFG_SPL_DISABLE BIT(21)
#define JZ_LCD_CFG_REV_DISABLE BIT(20)
#define JZ_LCD_CFG_HSYNCM BIT(19)
#define JZ_LCD_CFG_PCLKM BIT(18)
#define JZ_LCD_CFG_INV BIT(17)
#define JZ_LCD_CFG_SYNC_DIR BIT(16)
#define JZ_LCD_CFG_PS_POLARITY BIT(15)
#define JZ_LCD_CFG_CLS_POLARITY BIT(14)
#define JZ_LCD_CFG_SPL_POLARITY BIT(13)
#define JZ_LCD_CFG_REV_POLARITY BIT(12)
#define JZ_LCD_CFG_HSYNC_ACTIVE_LOW BIT(11)
#define JZ_LCD_CFG_PCLK_FALLING_EDGE BIT(10)
#define JZ_LCD_CFG_DE_ACTIVE_LOW BIT(9)
#define JZ_LCD_CFG_VSYNC_ACTIVE_LOW BIT(8)
#define JZ_LCD_CFG_18_BIT BIT(7)
#define JZ_LCD_CFG_PDW (BIT(5) | BIT(4))
#define JZ_LCD_CFG_MODE_GENERIC_16BIT 0
#define JZ_LCD_CFG_MODE_GENERIC_18BIT BIT(7)
#define JZ_LCD_CFG_MODE_GENERIC_24BIT BIT(6)
#define JZ_LCD_CFG_MODE_SPECIAL_TFT_1 1
#define JZ_LCD_CFG_MODE_SPECIAL_TFT_2 2
#define JZ_LCD_CFG_MODE_SPECIAL_TFT_3 3
#define JZ_LCD_CFG_MODE_TV_OUT_P 4
#define JZ_LCD_CFG_MODE_TV_OUT_I 6
#define JZ_LCD_CFG_MODE_SINGLE_COLOR_STN 8
#define JZ_LCD_CFG_MODE_SINGLE_MONOCHROME_STN 9
#define JZ_LCD_CFG_MODE_DUAL_COLOR_STN 10
#define JZ_LCD_CFG_MODE_DUAL_MONOCHROME_STN 11
#define JZ_LCD_CFG_MODE_8BIT_SERIAL 12
#define JZ_LCD_CFG_MODE_LCM 13
#define JZ_LCD_VSYNC_VPS_OFFSET 16
#define JZ_LCD_VSYNC_VPE_OFFSET 0
#define JZ_LCD_HSYNC_HPS_OFFSET 16
#define JZ_LCD_HSYNC_HPE_OFFSET 0
#define JZ_LCD_VAT_HT_OFFSET 16
#define JZ_LCD_VAT_VT_OFFSET 0
#define JZ_LCD_DAH_HDS_OFFSET 16
#define JZ_LCD_DAH_HDE_OFFSET 0
#define JZ_LCD_DAV_VDS_OFFSET 16
#define JZ_LCD_DAV_VDE_OFFSET 0
#define JZ_LCD_CTRL_BURST_4 (0x0 << 28)
#define JZ_LCD_CTRL_BURST_8 (0x1 << 28)
#define JZ_LCD_CTRL_BURST_16 (0x2 << 28)
#define JZ_LCD_CTRL_RGB555 BIT(27)
#define JZ_LCD_CTRL_OFUP BIT(26)
#define JZ_LCD_CTRL_FRC_GRAYSCALE_16 (0x0 << 24)
#define JZ_LCD_CTRL_FRC_GRAYSCALE_4 (0x1 << 24)
#define JZ_LCD_CTRL_FRC_GRAYSCALE_2 (0x2 << 24)
#define JZ_LCD_CTRL_PDD_MASK (0xff << 16)
#define JZ_LCD_CTRL_EOF_IRQ BIT(13)
#define JZ_LCD_CTRL_SOF_IRQ BIT(12)
#define JZ_LCD_CTRL_OFU_IRQ BIT(11)
#define JZ_LCD_CTRL_IFU0_IRQ BIT(10)
#define JZ_LCD_CTRL_IFU1_IRQ BIT(9)
#define JZ_LCD_CTRL_DD_IRQ BIT(8)
#define JZ_LCD_CTRL_QDD_IRQ BIT(7)
#define JZ_LCD_CTRL_REVERSE_ENDIAN BIT(6)
#define JZ_LCD_CTRL_LSB_FISRT BIT(5)
#define JZ_LCD_CTRL_DISABLE BIT(4)
#define JZ_LCD_CTRL_ENABLE BIT(3)
#define JZ_LCD_CTRL_BPP_1 0x0
#define JZ_LCD_CTRL_BPP_2 0x1
#define JZ_LCD_CTRL_BPP_4 0x2
#define JZ_LCD_CTRL_BPP_8 0x3
#define JZ_LCD_CTRL_BPP_15_16 0x4
#define JZ_LCD_CTRL_BPP_18_24 0x5
#define JZ_LCD_CTRL_BPP_MASK (JZ_LCD_CTRL_RGB555 | 0x7)
#define JZ_LCD_CMD_SOF_IRQ BIT(31)
#define JZ_LCD_CMD_EOF_IRQ BIT(30)
#define JZ_LCD_CMD_ENABLE_PAL BIT(28)
#define JZ_LCD_SYNC_MASK 0x3ff
#define JZ_LCD_STATE_EOF_IRQ BIT(5)
#define JZ_LCD_STATE_SOF_IRQ BIT(4)
#define JZ_LCD_STATE_DISABLED BIT(0)
#define JZ_LCD_OSDC_OSDEN BIT(0)
#define JZ_LCD_OSDC_F0EN BIT(3)
#define JZ_LCD_OSDC_F1EN BIT(4)
#define JZ_LCD_OSDCTRL_IPU BIT(15)
#define JZ_LCD_OSDCTRL_RGB555 BIT(4)
#define JZ_LCD_OSDCTRL_CHANGE BIT(3)
#define JZ_LCD_OSDCTRL_BPP_15_16 0x4
#define JZ_LCD_OSDCTRL_BPP_18_24 0x5
#define JZ_LCD_OSDCTRL_BPP_30 0x7
#define JZ_LCD_OSDCTRL_BPP_MASK (JZ_LCD_OSDCTRL_RGB555 | 0x7)
#define JZ_LCD_OSDS_READY BIT(0)
#define JZ_LCD_IPUR_IPUREN BIT(31)
#define JZ_LCD_IPUR_IPUR_LSB 0
#define JZ_LCD_XYP01_XPOS_LSB 0
#define JZ_LCD_XYP01_YPOS_LSB 16
#define JZ_LCD_SIZE01_WIDTH_LSB 0
#define JZ_LCD_SIZE01_HEIGHT_LSB 16
struct device;
struct drm_plane;
struct drm_plane_state;
struct platform_driver;
void ingenic_drm_plane_config(struct device *dev,
struct drm_plane *plane, u32 fourcc);
void ingenic_drm_plane_disable(struct device *dev, struct drm_plane *plane);
extern struct platform_driver *ingenic_ipu_driver_ptr;
#endif /* DRIVERS_GPU_DRM_INGENIC_INGENIC_DRM_H */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
//
// Ingenic JZ47xx IPU - Register definitions and private API
//
// Copyright (C) 2020, Paul Cercueil <paul@crapouillou.net>
#ifndef DRIVERS_GPU_DRM_INGENIC_INGENIC_IPU_H
#define DRIVERS_GPU_DRM_INGENIC_INGENIC_IPU_H
#include <linux/bitops.h>
#define JZ_REG_IPU_CTRL 0x00
#define JZ_REG_IPU_STATUS 0x04
#define JZ_REG_IPU_D_FMT 0x08
#define JZ_REG_IPU_Y_ADDR 0x0c
#define JZ_REG_IPU_U_ADDR 0x10
#define JZ_REG_IPU_V_ADDR 0x14
#define JZ_REG_IPU_IN_GS 0x18
#define JZ_REG_IPU_Y_STRIDE 0x1c
#define JZ_REG_IPU_UV_STRIDE 0x20
#define JZ_REG_IPU_OUT_ADDR 0x24
#define JZ_REG_IPU_OUT_GS 0x28
#define JZ_REG_IPU_OUT_STRIDE 0x2c
#define JZ_REG_IPU_RSZ_COEF_INDEX 0x30
#define JZ_REG_IPU_CSC_C0_COEF 0x34
#define JZ_REG_IPU_CSC_C1_COEF 0x38
#define JZ_REG_IPU_CSC_C2_COEF 0x3c
#define JZ_REG_IPU_CSC_C3_COEF 0x40
#define JZ_REG_IPU_CSC_C4_COEF 0x44
#define JZ_REG_IPU_HRSZ_COEF_LUT 0x48
#define JZ_REG_IPU_VRSZ_COEF_LUT 0x4c
#define JZ_REG_IPU_CSC_OFFSET 0x50
#define JZ_REG_IPU_Y_PHY_T_ADDR 0x54
#define JZ_REG_IPU_U_PHY_T_ADDR 0x58
#define JZ_REG_IPU_V_PHY_T_ADDR 0x5c
#define JZ_REG_IPU_OUT_PHY_T_ADDR 0x60
#define JZ_IPU_CTRL_ADDR_SEL BIT(20)
#define JZ_IPU_CTRL_ZOOM_SEL BIT(18)
#define JZ_IPU_CTRL_DFIX_SEL BIT(17)
#define JZ_IPU_CTRL_LCDC_SEL BIT(11)
#define JZ_IPU_CTRL_SPKG_SEL BIT(10)
#define JZ_IPU_CTRL_VSCALE BIT(9)
#define JZ_IPU_CTRL_HSCALE BIT(8)
#define JZ_IPU_CTRL_STOP BIT(7)
#define JZ_IPU_CTRL_RST BIT(6)
#define JZ_IPU_CTRL_FM_IRQ_EN BIT(5)
#define JZ_IPU_CTRL_CSC_EN BIT(4)
#define JZ_IPU_CTRL_VRSZ_EN BIT(3)
#define JZ_IPU_CTRL_HRSZ_EN BIT(2)
#define JZ_IPU_CTRL_RUN BIT(1)
#define JZ_IPU_CTRL_CHIP_EN BIT(0)
#define JZ_IPU_STATUS_OUT_END BIT(0)
#define JZ_IPU_IN_GS_H_LSB 0x0
#define JZ_IPU_IN_GS_W_LSB 0x10
#define JZ_IPU_OUT_GS_H_LSB 0x0
#define JZ_IPU_OUT_GS_W_LSB 0x10
#define JZ_IPU_Y_STRIDE_Y_LSB 0
#define JZ_IPU_UV_STRIDE_U_LSB 16
#define JZ_IPU_UV_STRIDE_V_LSB 0
#define JZ_IPU_D_FMT_IN_FMT_LSB 0
#define JZ_IPU_D_FMT_IN_FMT_RGB555 (0x0 << JZ_IPU_D_FMT_IN_FMT_LSB)
#define JZ_IPU_D_FMT_IN_FMT_YUV420 (0x0 << JZ_IPU_D_FMT_IN_FMT_LSB)
#define JZ_IPU_D_FMT_IN_FMT_YUV422 (0x1 << JZ_IPU_D_FMT_IN_FMT_LSB)
#define JZ_IPU_D_FMT_IN_FMT_RGB888 (0x2 << JZ_IPU_D_FMT_IN_FMT_LSB)
#define JZ_IPU_D_FMT_IN_FMT_YUV444 (0x2 << JZ_IPU_D_FMT_IN_FMT_LSB)
#define JZ_IPU_D_FMT_IN_FMT_RGB565 (0x3 << JZ_IPU_D_FMT_IN_FMT_LSB)
#define JZ_IPU_D_FMT_YUV_FMT_LSB 2
#define JZ_IPU_D_FMT_YUV_Y1UY0V (0x0 << JZ_IPU_D_FMT_YUV_FMT_LSB)
#define JZ_IPU_D_FMT_YUV_Y1VY0U (0x1 << JZ_IPU_D_FMT_YUV_FMT_LSB)
#define JZ_IPU_D_FMT_YUV_UY1VY0 (0x2 << JZ_IPU_D_FMT_YUV_FMT_LSB)
#define JZ_IPU_D_FMT_YUV_VY1UY0 (0x3 << JZ_IPU_D_FMT_YUV_FMT_LSB)
#define JZ_IPU_D_FMT_IN_FMT_YUV411 (0x3 << JZ_IPU_D_FMT_IN_FMT_LSB)
#define JZ_IPU_D_FMT_OUT_FMT_LSB 19
#define JZ_IPU_D_FMT_OUT_FMT_RGB555 (0x0 << JZ_IPU_D_FMT_OUT_FMT_LSB)
#define JZ_IPU_D_FMT_OUT_FMT_RGB565 (0x1 << JZ_IPU_D_FMT_OUT_FMT_LSB)
#define JZ_IPU_D_FMT_OUT_FMT_RGB888 (0x2 << JZ_IPU_D_FMT_OUT_FMT_LSB)
#define JZ_IPU_D_FMT_OUT_FMT_YUV422 (0x3 << JZ_IPU_D_FMT_OUT_FMT_LSB)
#define JZ_IPU_D_FMT_OUT_FMT_RGBAAA (0x4 << JZ_IPU_D_FMT_OUT_FMT_LSB)
#define JZ_IPU_D_FMT_RGB_OUT_OFT_LSB 22
#define JZ_IPU_D_FMT_RGB_OUT_OFT_RGB (0x0 << JZ_IPU_D_FMT_RGB_OUT_OFT_LSB)
#define JZ_IPU_D_FMT_RGB_OUT_OFT_RBG (0x1 << JZ_IPU_D_FMT_RGB_OUT_OFT_LSB)
#define JZ_IPU_D_FMT_RGB_OUT_OFT_GBR (0x2 << JZ_IPU_D_FMT_RGB_OUT_OFT_LSB)
#define JZ_IPU_D_FMT_RGB_OUT_OFT_GRB (0x3 << JZ_IPU_D_FMT_RGB_OUT_OFT_LSB)
#define JZ_IPU_D_FMT_RGB_OUT_OFT_BRG (0x4 << JZ_IPU_D_FMT_RGB_OUT_OFT_LSB)
#define JZ_IPU_D_FMT_RGB_OUT_OFT_BGR (0x5 << JZ_IPU_D_FMT_RGB_OUT_OFT_LSB)
#define JZ4725B_IPU_RSZ_LUT_COEF_LSB 2
#define JZ4725B_IPU_RSZ_LUT_COEF_MASK 0x7ff
#define JZ4725B_IPU_RSZ_LUT_IN_EN BIT(1)
#define JZ4725B_IPU_RSZ_LUT_OUT_EN BIT(0)
#define JZ4760_IPU_RSZ_COEF20_LSB 6
#define JZ4760_IPU_RSZ_COEF31_LSB 17
#define JZ4760_IPU_RSZ_COEF_MASK 0x7ff
#define JZ4760_IPU_RSZ_OFFSET_LSB 1
#define JZ4760_IPU_RSZ_OFFSET_MASK 0x1f
#define JZ_IPU_CSC_OFFSET_CHROMA_LSB 16
#define JZ_IPU_CSC_OFFSET_LUMA_LSB 16
#endif /* DRIVERS_GPU_DRM_INGENIC_INGENIC_IPU_H */
...@@ -69,6 +69,11 @@ static const uint32_t mxsfb_formats[] = { ...@@ -69,6 +69,11 @@ static const uint32_t mxsfb_formats[] = {
DRM_FORMAT_RGB565 DRM_FORMAT_RGB565
}; };
static const uint64_t mxsfb_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static struct mxsfb_drm_private * static struct mxsfb_drm_private *
drm_pipe_to_mxsfb_drm_private(struct drm_simple_display_pipe *pipe) drm_pipe_to_mxsfb_drm_private(struct drm_simple_display_pipe *pipe)
{ {
...@@ -191,7 +196,7 @@ static struct drm_simple_display_pipe_funcs mxsfb_funcs = { ...@@ -191,7 +196,7 @@ static struct drm_simple_display_pipe_funcs mxsfb_funcs = {
.disable_vblank = mxsfb_pipe_disable_vblank, .disable_vblank = mxsfb_pipe_disable_vblank,
}; };
static int mxsfb_load(struct drm_device *drm, unsigned long flags) static int mxsfb_load(struct drm_device *drm)
{ {
struct platform_device *pdev = to_platform_device(drm->dev); struct platform_device *pdev = to_platform_device(drm->dev);
struct mxsfb_drm_private *mxsfb; struct mxsfb_drm_private *mxsfb;
...@@ -244,8 +249,8 @@ static int mxsfb_load(struct drm_device *drm, unsigned long flags) ...@@ -244,8 +249,8 @@ static int mxsfb_load(struct drm_device *drm, unsigned long flags)
} }
ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs, ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs,
mxsfb_formats, ARRAY_SIZE(mxsfb_formats), NULL, mxsfb_formats, ARRAY_SIZE(mxsfb_formats),
mxsfb->connector); mxsfb_modifiers, mxsfb->connector);
if (ret < 0) { if (ret < 0) {
dev_err(drm->dev, "Cannot setup simple display pipe\n"); dev_err(drm->dev, "Cannot setup simple display pipe\n");
goto err_vblank; goto err_vblank;
...@@ -398,7 +403,7 @@ static int mxsfb_probe(struct platform_device *pdev) ...@@ -398,7 +403,7 @@ static int mxsfb_probe(struct platform_device *pdev)
if (IS_ERR(drm)) if (IS_ERR(drm))
return PTR_ERR(drm); return PTR_ERR(drm);
ret = mxsfb_load(drm, 0); ret = mxsfb_load(drm);
if (ret) if (ret)
goto err_free; goto err_free;
......
...@@ -44,6 +44,9 @@ ...@@ -44,6 +44,9 @@
#include <subdev/bios/pll.h> #include <subdev/bios/pll.h>
#include <subdev/clk.h> #include <subdev/clk.h>
#include <nvif/event.h>
#include <nvif/cl0046.h>
static int static int
nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb); struct drm_framebuffer *old_fb);
...@@ -756,6 +759,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc) ...@@ -756,6 +759,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
nouveau_bo_unmap(nv_crtc->cursor.nvbo); nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_unpin(nv_crtc->cursor.nvbo); nouveau_bo_unpin(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
nvif_notify_fini(&nv_crtc->vblank);
kfree(nv_crtc); kfree(nv_crtc);
} }
...@@ -1297,9 +1301,19 @@ create_primary_plane(struct drm_device *dev) ...@@ -1297,9 +1301,19 @@ create_primary_plane(struct drm_device *dev)
return primary; return primary;
} }
static int nv04_crtc_vblank_handler(struct nvif_notify *notify)
{
struct nouveau_crtc *nv_crtc =
container_of(notify, struct nouveau_crtc, vblank);
drm_crtc_handle_vblank(&nv_crtc->base);
return NVIF_NOTIFY_KEEP;
}
int int
nv04_crtc_create(struct drm_device *dev, int crtc_num) nv04_crtc_create(struct drm_device *dev, int crtc_num)
{ {
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_crtc *nv_crtc; struct nouveau_crtc *nv_crtc;
int ret; int ret;
...@@ -1337,5 +1351,14 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) ...@@ -1337,5 +1351,14 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
nv04_cursor_init(nv_crtc); nv04_cursor_init(nv_crtc);
return 0; ret = nvif_notify_init(&disp->disp.object, nv04_crtc_vblank_handler,
false, NV04_DISP_NTFY_VBLANK,
&(struct nvif_notify_head_req_v0) {
.head = nv_crtc->index,
},
sizeof(struct nvif_notify_head_req_v0),
sizeof(struct nvif_notify_head_rep_v0),
&nv_crtc->vblank);
return ret;
} }
...@@ -10,6 +10,10 @@ nouveau-y += dispnv50/core917d.o ...@@ -10,6 +10,10 @@ nouveau-y += dispnv50/core917d.o
nouveau-y += dispnv50/corec37d.o nouveau-y += dispnv50/corec37d.o
nouveau-y += dispnv50/corec57d.o nouveau-y += dispnv50/corec57d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc907d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc37d.o
nouveau-y += dispnv50/dac507d.o nouveau-y += dispnv50/dac507d.o
nouveau-y += dispnv50/dac907d.o nouveau-y += dispnv50/dac907d.o
......
...@@ -2,6 +2,9 @@ ...@@ -2,6 +2,9 @@
#define __NV50_KMS_ATOM_H__ #define __NV50_KMS_ATOM_H__
#define nv50_atom(p) container_of((p), struct nv50_atom, state) #define nv50_atom(p) container_of((p), struct nv50_atom, state)
#include <drm/drm_atomic.h> #include <drm/drm_atomic.h>
#include "crc.h"
struct nouveau_encoder;
struct nv50_atom { struct nv50_atom {
struct drm_atomic_state state; struct drm_atomic_state state;
...@@ -18,6 +21,7 @@ struct nv50_head_atom { ...@@ -18,6 +21,7 @@ struct nv50_head_atom {
struct { struct {
u32 mask; u32 mask;
u32 owned;
u32 olut; u32 olut;
} wndw; } wndw;
...@@ -114,9 +118,12 @@ struct nv50_head_atom { ...@@ -114,9 +118,12 @@ struct nv50_head_atom {
u8 nhsync:1; u8 nhsync:1;
u8 nvsync:1; u8 nvsync:1;
u8 depth:4; u8 depth:4;
u8 crc_raster:2;
u8 bpc; u8 bpc;
} or; } or;
struct nv50_crc_atom crc;
/* Currently only used for MST */ /* Currently only used for MST */
struct { struct {
int pbn; int pbn;
...@@ -134,6 +141,7 @@ struct nv50_head_atom { ...@@ -134,6 +141,7 @@ struct nv50_head_atom {
bool ovly:1; bool ovly:1;
bool dither:1; bool dither:1;
bool procamp:1; bool procamp:1;
bool crc:1;
bool or:1; bool or:1;
}; };
u16 mask; u16 mask;
...@@ -149,6 +157,19 @@ nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc) ...@@ -149,6 +157,19 @@ nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
return nv50_head_atom(statec); return nv50_head_atom(statec);
} }
static inline struct drm_encoder *
nv50_head_atom_get_encoder(struct nv50_head_atom *atom)
{
struct drm_encoder *encoder = NULL;
/* We only ever have a single encoder */
drm_for_each_encoder_mask(encoder, atom->state.crtc->dev,
atom->state.encoder_mask)
break;
return encoder;
}
#define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state) #define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
struct nv50_wndw_atom { struct nv50_wndw_atom {
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define __NV50_KMS_CORE_H__ #define __NV50_KMS_CORE_H__
#include "disp.h" #include "disp.h"
#include "atom.h" #include "atom.h"
#include "crc.h"
#include <nouveau_encoder.h> #include <nouveau_encoder.h>
struct nv50_core { struct nv50_core {
...@@ -26,6 +27,9 @@ struct nv50_core_func { ...@@ -26,6 +27,9 @@ struct nv50_core_func {
} wndw; } wndw;
const struct nv50_head_func *head; const struct nv50_head_func *head;
#if IS_ENABLED(CONFIG_DEBUG_FS)
const struct nv50_crc_func *crc;
#endif
const struct nv50_outp_func { const struct nv50_outp_func {
void (*ctrl)(struct nv50_core *, int or, u32 ctrl, void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
struct nv50_head_atom *); struct nv50_head_atom *);
......
...@@ -30,6 +30,9 @@ core907d = { ...@@ -30,6 +30,9 @@ core907d = {
.ntfy_wait_done = core507d_ntfy_wait_done, .ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update, .update = core507d_update,
.head = &head907d, .head = &head907d,
#if IS_ENABLED(CONFIG_DEBUG_FS)
.crc = &crc907d,
#endif
.dac = &dac907d, .dac = &dac907d,
.sor = &sor907d, .sor = &sor907d,
}; };
......
...@@ -30,6 +30,9 @@ core917d = { ...@@ -30,6 +30,9 @@ core917d = {
.ntfy_wait_done = core507d_ntfy_wait_done, .ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update, .update = core507d_update,
.head = &head917d, .head = &head917d,
#if IS_ENABLED(CONFIG_DEBUG_FS)
.crc = &crc907d,
#endif
.dac = &dac907d, .dac = &dac907d,
.sor = &sor907d, .sor = &sor907d,
}; };
......
...@@ -142,6 +142,9 @@ corec37d = { ...@@ -142,6 +142,9 @@ corec37d = {
.wndw.owner = corec37d_wndw_owner, .wndw.owner = corec37d_wndw_owner,
.head = &headc37d, .head = &headc37d,
.sor = &sorc37d, .sor = &sorc37d,
#if IS_ENABLED(CONFIG_DEBUG_FS)
.crc = &crcc37d,
#endif
}; };
int int
......
...@@ -52,6 +52,9 @@ corec57d = { ...@@ -52,6 +52,9 @@ corec57d = {
.wndw.owner = corec37d_wndw_owner, .wndw.owner = corec37d_wndw_owner,
.head = &headc57d, .head = &headc57d,
.sor = &sorc37d, .sor = &sorc37d,
#if IS_ENABLED(CONFIG_DEBUG_FS)
.crc = &crcc37d,
#endif
}; };
int int
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#ifndef __NV50_KMS_H__ #ifndef __NV50_KMS_H__
#define __NV50_KMS_H__ #define __NV50_KMS_H__
#include <linux/workqueue.h>
#include <nvif/mem.h> #include <nvif/mem.h>
#include "nouveau_display.h" #include "nouveau_display.h"
struct nv50_msto; struct nv50_msto;
struct nouveau_encoder;
struct nv50_disp { struct nv50_disp {
struct nvif_disp *disp; struct nvif_disp *disp;
...@@ -71,11 +73,33 @@ struct nv50_dmac { ...@@ -71,11 +73,33 @@ struct nv50_dmac {
struct mutex lock; struct mutex lock;
}; };
struct nv50_outp_atom {
struct list_head head;
struct drm_encoder *encoder;
bool flush_disable;
union nv50_outp_atom_mask {
struct {
bool ctrl:1;
};
u8 mask;
} set, clr;
};
int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size, const s32 *oclass, u8 head, void *data, u32 size,
u64 syncbuf, struct nv50_dmac *dmac); u64 syncbuf, struct nv50_dmac *dmac);
void nv50_dmac_destroy(struct nv50_dmac *); void nv50_dmac_destroy(struct nv50_dmac *);
/*
* For normal encoders this just returns the encoder. For active MST encoders,
* this returns the real outp that's driving displays on the topology.
* Inactive MST encoders return NULL, since they would have no real outp to
* return anyway.
*/
struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder);
u32 *evo_wait(struct nv50_dmac *, int nr); u32 *evo_wait(struct nv50_dmac *, int nr);
void evo_kick(u32 *, struct nv50_dmac *); void evo_kick(u32 *, struct nv50_dmac *);
......
/* SPDX-License-Identifier: MIT */
#ifndef __NV50_KMS_HANDLES_H__
#define __NV50_KMS_HANDLES_H__
/*
* Various hard-coded object handles that nouveau uses. These are made-up by
* nouveau developers, not Nvidia. The only significance of the handles chosen
* is that they must all be unique.
*/
#define NV50_DISP_HANDLE_SYNCBUF 0xf0000000
#define NV50_DISP_HANDLE_VRAM 0xf0000001
#define NV50_DISP_HANDLE_WNDW_CTX(kind) (0xfb000000 | kind)
#define NV50_DISP_HANDLE_CRC_CTX(head, i) (0xfc000000 | head->base.index << 1 | i)
#endif /* !__NV50_KMS_HANDLES_H__ */
This diff is collapsed.
This diff is collapsed.
...@@ -19,8 +19,15 @@ ...@@ -19,8 +19,15 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
*/ */
#include <drm/drm_connector.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_vblank.h>
#include "nouveau_drv.h"
#include "nouveau_bios.h"
#include "nouveau_connector.h"
#include "head.h" #include "head.h"
#include "core.h" #include "core.h"
#include "crc.h"
void void
head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh) head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
...@@ -29,9 +36,10 @@ head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh) ...@@ -29,9 +36,10 @@ head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
u32 *push; u32 *push;
if ((push = evo_wait(core, 3))) { if ((push = evo_wait(core, 3))) {
evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2); evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2);
evo_data(push, 0x00000001 | asyh->or.depth << 6 | evo_data(push, asyh->or.depth << 6 |
asyh->or.nvsync << 4 | asyh->or.nvsync << 4 |
asyh->or.nhsync << 3); asyh->or.nhsync << 3 |
asyh->or.crc_raster);
evo_data(push, 0x31ec6000 | head->base.index << 25 | evo_data(push, 0x31ec6000 | head->base.index << 25 |
asyh->mode.interlace); asyh->mode.interlace);
evo_kick(push, core); evo_kick(push, core);
......
This diff is collapsed.
This diff is collapsed.
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
*/ */
#include "wndw.h" #include "wndw.h"
#include "wimm.h" #include "wimm.h"
#include "handles.h"
#include <nvif/class.h> #include <nvif/class.h>
#include <nvif/cl0002.h> #include <nvif/cl0002.h>
...@@ -59,7 +60,7 @@ nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb) ...@@ -59,7 +60,7 @@ nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
int ret; int ret;
nouveau_framebuffer_get_layout(fb, &unused, &kind); nouveau_framebuffer_get_layout(fb, &unused, &kind);
handle = 0xfb000000 | kind; handle = NV50_DISP_HANDLE_WNDW_CTX(kind);
list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) { list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
if (ctxdma->object.handle == handle) if (ctxdma->object.handle == handle)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment