Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
cbc8cc04
Commit
cbc8cc04
authored
Dec 18, 2009
by
Dave Airlie
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote branch 'korg/drm-vmware-staging' into drm-core-next
parents
5012f506
fb1d9738
Changes
28
Hide whitespace changes
Inline
Side-by-side
Showing
28 changed files
with
11394 additions
and
1 deletion
+11394
-1
drivers/gpu/drm/Makefile
drivers/gpu/drm/Makefile
+1
-0
drivers/gpu/drm/vmwgfx/Kconfig
drivers/gpu/drm/vmwgfx/Kconfig
+13
-0
drivers/gpu/drm/vmwgfx/Makefile
drivers/gpu/drm/vmwgfx/Makefile
+9
-0
drivers/gpu/drm/vmwgfx/svga3d_reg.h
drivers/gpu/drm/vmwgfx/svga3d_reg.h
+1793
-0
drivers/gpu/drm/vmwgfx/svga_escape.h
drivers/gpu/drm/vmwgfx/svga_escape.h
+89
-0
drivers/gpu/drm/vmwgfx/svga_overlay.h
drivers/gpu/drm/vmwgfx/svga_overlay.h
+201
-0
drivers/gpu/drm/vmwgfx/svga_reg.h
drivers/gpu/drm/vmwgfx/svga_reg.h
+1346
-0
drivers/gpu/drm/vmwgfx/svga_types.h
drivers/gpu/drm/vmwgfx/svga_types.h
+45
-0
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+229
-0
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+735
-0
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+511
-0
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+516
-0
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+742
-0
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+521
-0
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+213
-0
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+81
-0
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+295
-0
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+872
-0
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+102
-0
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+516
-0
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+634
-0
drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+57
-0
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+1192
-0
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+99
-0
drivers/staging/Kconfig
drivers/staging/Kconfig
+2
-0
include/drm/Kbuild
include/drm/Kbuild
+1
-0
include/drm/ttm/ttm_object.h
include/drm/ttm/ttm_object.h
+5
-1
include/drm/vmwgfx_drm.h
include/drm/vmwgfx_drm.h
+574
-0
No files found.
drivers/gpu/drm/Makefile
View file @
cbc8cc04
...
...
@@ -30,6 +30,7 @@ obj-$(CONFIG_DRM_I830) += i830/
obj-$(CONFIG_DRM_I915)
+=
i915/
obj-$(CONFIG_DRM_SIS)
+=
sis/
obj-$(CONFIG_DRM_SAVAGE)
+=
savage/
obj-$(CONFIG_DRM_VMWGFX)
+=
vmwgfx/
obj-$(CONFIG_DRM_VIA)
+=
via/
obj-$(CONFIG_DRM_NOUVEAU)
+=
nouveau/
obj-y
+=
i2c/
drivers/gpu/drm/vmwgfx/Kconfig
0 → 100644
View file @
cbc8cc04
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
depends on DRM && PCI
select FB_DEFERRED_IO
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select DRM_TTM
help
KMS enabled DRM driver for SVGA2 virtual hardware.
If unsure say n. The compiled module will be
called vmwgfx.ko
drivers/gpu/drm/vmwgfx/Makefile
0 → 100644
View file @
cbc8cc04
ccflags-y
:=
-Iinclude
/drm
vmwgfx-y
:=
vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o
\
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o
\
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o
\
vmwgfx_overlay.o
obj-$(CONFIG_DRM_VMWGFX)
:=
vmwgfx.o
drivers/gpu/drm/vmwgfx/svga3d_reg.h
0 → 100644
View file @
cbc8cc04
/**********************************************************
* Copyright 1998-2009 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
/*
* svga3d_reg.h --
*
* SVGA 3D hardware definitions
*/
#ifndef _SVGA3D_REG_H_
#define _SVGA3D_REG_H_
#include "svga_reg.h"
/*
* 3D Hardware Version
*
* The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
* register. Is set by the host and read by the guest. This lets
* us make new guest drivers which are backwards-compatible with old
* SVGA hardware revisions. It does not let us support old guest
* drivers. Good enough for now.
*
*/
#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
typedef
enum
{
SVGA3D_HWVERSION_WS5_RC1
=
SVGA3D_MAKE_HWVERSION
(
0
,
1
),
SVGA3D_HWVERSION_WS5_RC2
=
SVGA3D_MAKE_HWVERSION
(
0
,
2
),
SVGA3D_HWVERSION_WS51_RC1
=
SVGA3D_MAKE_HWVERSION
(
0
,
3
),
SVGA3D_HWVERSION_WS6_B1
=
SVGA3D_MAKE_HWVERSION
(
1
,
1
),
SVGA3D_HWVERSION_FUSION_11
=
SVGA3D_MAKE_HWVERSION
(
1
,
4
),
SVGA3D_HWVERSION_WS65_B1
=
SVGA3D_MAKE_HWVERSION
(
2
,
0
),
SVGA3D_HWVERSION_CURRENT
=
SVGA3D_HWVERSION_WS65_B1
,
}
SVGA3dHardwareVersion
;
/*
* Generic Types
*/
typedef
uint32
SVGA3dBool
;
/* 32-bit Bool definition */
#define SVGA3D_NUM_CLIPPLANES 6
#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
/*
* Surface formats.
*
* If you modify this list, be sure to keep GLUtil.c in sync. It
* includes the internal format definition of each surface in
* GLUtil_ConvertSurfaceFormat, and it contains a table of
* human-readable names in GLUtil_GetFormatName.
*/
typedef
enum
SVGA3dSurfaceFormat
{
SVGA3D_FORMAT_INVALID
=
0
,
SVGA3D_X8R8G8B8
=
1
,
SVGA3D_A8R8G8B8
=
2
,
SVGA3D_R5G6B5
=
3
,
SVGA3D_X1R5G5B5
=
4
,
SVGA3D_A1R5G5B5
=
5
,
SVGA3D_A4R4G4B4
=
6
,
SVGA3D_Z_D32
=
7
,
SVGA3D_Z_D16
=
8
,
SVGA3D_Z_D24S8
=
9
,
SVGA3D_Z_D15S1
=
10
,
SVGA3D_LUMINANCE8
=
11
,
SVGA3D_LUMINANCE4_ALPHA4
=
12
,
SVGA3D_LUMINANCE16
=
13
,
SVGA3D_LUMINANCE8_ALPHA8
=
14
,
SVGA3D_DXT1
=
15
,
SVGA3D_DXT2
=
16
,
SVGA3D_DXT3
=
17
,
SVGA3D_DXT4
=
18
,
SVGA3D_DXT5
=
19
,
SVGA3D_BUMPU8V8
=
20
,
SVGA3D_BUMPL6V5U5
=
21
,
SVGA3D_BUMPX8L8V8U8
=
22
,
SVGA3D_BUMPL8V8U8
=
23
,
SVGA3D_ARGB_S10E5
=
24
,
/* 16-bit floating-point ARGB */
SVGA3D_ARGB_S23E8
=
25
,
/* 32-bit floating-point ARGB */
SVGA3D_A2R10G10B10
=
26
,
/* signed formats */
SVGA3D_V8U8
=
27
,
SVGA3D_Q8W8V8U8
=
28
,
SVGA3D_CxV8U8
=
29
,
/* mixed formats */
SVGA3D_X8L8V8U8
=
30
,
SVGA3D_A2W10V10U10
=
31
,
SVGA3D_ALPHA8
=
32
,
/* Single- and dual-component floating point formats */
SVGA3D_R_S10E5
=
33
,
SVGA3D_R_S23E8
=
34
,
SVGA3D_RG_S10E5
=
35
,
SVGA3D_RG_S23E8
=
36
,
/*
* Any surface can be used as a buffer object, but SVGA3D_BUFFER is
* the most efficient format to use when creating new surfaces
* expressly for index or vertex data.
*/
SVGA3D_BUFFER
=
37
,
SVGA3D_Z_D24X8
=
38
,
SVGA3D_V16U16
=
39
,
SVGA3D_G16R16
=
40
,
SVGA3D_A16B16G16R16
=
41
,
/* Packed Video formats */
SVGA3D_UYVY
=
42
,
SVGA3D_YUY2
=
43
,
SVGA3D_FORMAT_MAX
}
SVGA3dSurfaceFormat
;
typedef
uint32
SVGA3dColor
;
/* a, r, g, b */
/*
* These match the D3DFORMAT_OP definitions used by Direct3D. We need
* them so that we can query the host for what the supported surface
* operations are (when we're using the D3D backend, in particular),
* and so we can send those operations to the guest.
*/
typedef
enum
{
SVGA3DFORMAT_OP_TEXTURE
=
0x00000001
,
SVGA3DFORMAT_OP_VOLUMETEXTURE
=
0x00000002
,
SVGA3DFORMAT_OP_CUBETEXTURE
=
0x00000004
,
SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET
=
0x00000008
,
SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET
=
0x00000010
,
SVGA3DFORMAT_OP_ZSTENCIL
=
0x00000040
,
SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH
=
0x00000080
,
/*
* This format can be used as a render target if the current display mode
* is the same depth if the alpha channel is ignored. e.g. if the device
* can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
* format op list entry for A8R8G8B8 should have this cap.
*/
SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET
=
0x00000100
,
/*
* This format contains DirectDraw support (including Flip). This flag
* should not to be set on alpha formats.
*/
SVGA3DFORMAT_OP_DISPLAYMODE
=
0x00000400
,
/*
* The rasterizer can support some level of Direct3D support in this format
* and implies that the driver can create a Context in this mode (for some
* render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
* flag must also be set.
*/
SVGA3DFORMAT_OP_3DACCELERATION
=
0x00000800
,
/*
* This is set for a private format when the driver has put the bpp in
* the structure.
*/
SVGA3DFORMAT_OP_PIXELSIZE
=
0x00001000
,
/*
* Indicates that this format can be converted to any RGB format for which
* SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
*/
SVGA3DFORMAT_OP_CONVERT_TO_ARGB
=
0x00002000
,
/*
* Indicates that this format can be used to create offscreen plain surfaces.
*/
SVGA3DFORMAT_OP_OFFSCREENPLAIN
=
0x00004000
,
/*
* Indicated that this format can be read as an SRGB texture (meaning that the
* sampler will linearize the looked up data)
*/
SVGA3DFORMAT_OP_SRGBREAD
=
0x00008000
,
/*
* Indicates that this format can be used in the bumpmap instructions
*/
SVGA3DFORMAT_OP_BUMPMAP
=
0x00010000
,
/*
* Indicates that this format can be sampled by the displacement map sampler
*/
SVGA3DFORMAT_OP_DMAP
=
0x00020000
,
/*
* Indicates that this format cannot be used with texture filtering
*/
SVGA3DFORMAT_OP_NOFILTER
=
0x00040000
,
/*
* Indicates that format conversions are supported to this RGB format if
* SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
*/
SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB
=
0x00080000
,
/*
* Indicated that this format can be written as an SRGB target (meaning that the
* pixel pipe will DE-linearize data on output to format)
*/
SVGA3DFORMAT_OP_SRGBWRITE
=
0x00100000
,
/*
* Indicates that this format cannot be used with alpha blending
*/
SVGA3DFORMAT_OP_NOALPHABLEND
=
0x00200000
,
/*
* Indicates that the device can auto-generated sublevels for resources
* of this format
*/
SVGA3DFORMAT_OP_AUTOGENMIPMAP
=
0x00400000
,
/*
* Indicates that this format can be used by vertex texture sampler
*/
SVGA3DFORMAT_OP_VERTEXTEXTURE
=
0x00800000
,
/*
* Indicates that this format supports neither texture coordinate wrap
* modes, nor mipmapping
*/
SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP
=
0x01000000
}
SVGA3dFormatOp
;
/*
* This structure is a conversion of SVGA3DFORMAT_OP_*.
* Entries must be located at the same position.
*/
typedef
union
{
uint32
value
;
struct
{
uint32
texture
:
1
;
uint32
volumeTexture
:
1
;
uint32
cubeTexture
:
1
;
uint32
offscreenRenderTarget
:
1
;
uint32
sameFormatRenderTarget
:
1
;
uint32
unknown1
:
1
;
uint32
zStencil
:
1
;
uint32
zStencilArbitraryDepth
:
1
;
uint32
sameFormatUpToAlpha
:
1
;
uint32
unknown2
:
1
;
uint32
displayMode
:
1
;
uint32
acceleration3d
:
1
;
uint32
pixelSize
:
1
;
uint32
convertToARGB
:
1
;
uint32
offscreenPlain
:
1
;
uint32
sRGBRead
:
1
;
uint32
bumpMap
:
1
;
uint32
dmap
:
1
;
uint32
noFilter
:
1
;
uint32
memberOfGroupARGB
:
1
;
uint32
sRGBWrite
:
1
;
uint32
noAlphaBlend
:
1
;
uint32
autoGenMipMap
:
1
;
uint32
vertexTexture
:
1
;
uint32
noTexCoordWrapNorMip
:
1
;
};
}
SVGA3dSurfaceFormatCaps
;
/*
* SVGA_3D_CMD_SETRENDERSTATE Types. All value types
* must fit in a uint32.
*/
typedef
enum
{
SVGA3D_RS_INVALID
=
0
,
SVGA3D_RS_ZENABLE
=
1
,
/* SVGA3dBool */
SVGA3D_RS_ZWRITEENABLE
=
2
,
/* SVGA3dBool */
SVGA3D_RS_ALPHATESTENABLE
=
3
,
/* SVGA3dBool */
SVGA3D_RS_DITHERENABLE
=
4
,
/* SVGA3dBool */
SVGA3D_RS_BLENDENABLE
=
5
,
/* SVGA3dBool */
SVGA3D_RS_FOGENABLE
=
6
,
/* SVGA3dBool */
SVGA3D_RS_SPECULARENABLE
=
7
,
/* SVGA3dBool */
SVGA3D_RS_STENCILENABLE
=
8
,
/* SVGA3dBool */
SVGA3D_RS_LIGHTINGENABLE
=
9
,
/* SVGA3dBool */
SVGA3D_RS_NORMALIZENORMALS
=
10
,
/* SVGA3dBool */
SVGA3D_RS_POINTSPRITEENABLE
=
11
,
/* SVGA3dBool */
SVGA3D_RS_POINTSCALEENABLE
=
12
,
/* SVGA3dBool */
SVGA3D_RS_STENCILREF
=
13
,
/* uint32 */
SVGA3D_RS_STENCILMASK
=
14
,
/* uint32 */
SVGA3D_RS_STENCILWRITEMASK
=
15
,
/* uint32 */
SVGA3D_RS_FOGSTART
=
16
,
/* float */
SVGA3D_RS_FOGEND
=
17
,
/* float */
SVGA3D_RS_FOGDENSITY
=
18
,
/* float */
SVGA3D_RS_POINTSIZE
=
19
,
/* float */
SVGA3D_RS_POINTSIZEMIN
=
20
,
/* float */
SVGA3D_RS_POINTSIZEMAX
=
21
,
/* float */
SVGA3D_RS_POINTSCALE_A
=
22
,
/* float */
SVGA3D_RS_POINTSCALE_B
=
23
,
/* float */
SVGA3D_RS_POINTSCALE_C
=
24
,
/* float */
SVGA3D_RS_FOGCOLOR
=
25
,
/* SVGA3dColor */
SVGA3D_RS_AMBIENT
=
26
,
/* SVGA3dColor */
SVGA3D_RS_CLIPPLANEENABLE
=
27
,
/* SVGA3dClipPlanes */
SVGA3D_RS_FOGMODE
=
28
,
/* SVGA3dFogMode */
SVGA3D_RS_FILLMODE
=
29
,
/* SVGA3dFillMode */
SVGA3D_RS_SHADEMODE
=
30
,
/* SVGA3dShadeMode */
SVGA3D_RS_LINEPATTERN
=
31
,
/* SVGA3dLinePattern */
SVGA3D_RS_SRCBLEND
=
32
,
/* SVGA3dBlendOp */
SVGA3D_RS_DSTBLEND
=
33
,
/* SVGA3dBlendOp */
SVGA3D_RS_BLENDEQUATION
=
34
,
/* SVGA3dBlendEquation */
SVGA3D_RS_CULLMODE
=
35
,
/* SVGA3dFace */
SVGA3D_RS_ZFUNC
=
36
,
/* SVGA3dCmpFunc */
SVGA3D_RS_ALPHAFUNC
=
37
,
/* SVGA3dCmpFunc */
SVGA3D_RS_STENCILFUNC
=
38
,
/* SVGA3dCmpFunc */
SVGA3D_RS_STENCILFAIL
=
39
,
/* SVGA3dStencilOp */
SVGA3D_RS_STENCILZFAIL
=
40
,
/* SVGA3dStencilOp */
SVGA3D_RS_STENCILPASS
=
41
,
/* SVGA3dStencilOp */
SVGA3D_RS_ALPHAREF
=
42
,
/* float (0.0 .. 1.0) */
SVGA3D_RS_FRONTWINDING
=
43
,
/* SVGA3dFrontWinding */
SVGA3D_RS_COORDINATETYPE
=
44
,
/* SVGA3dCoordinateType */
SVGA3D_RS_ZBIAS
=
45
,
/* float */
SVGA3D_RS_RANGEFOGENABLE
=
46
,
/* SVGA3dBool */
SVGA3D_RS_COLORWRITEENABLE
=
47
,
/* SVGA3dColorMask */
SVGA3D_RS_VERTEXMATERIALENABLE
=
48
,
/* SVGA3dBool */
SVGA3D_RS_DIFFUSEMATERIALSOURCE
=
49
,
/* SVGA3dVertexMaterial */
SVGA3D_RS_SPECULARMATERIALSOURCE
=
50
,
/* SVGA3dVertexMaterial */
SVGA3D_RS_AMBIENTMATERIALSOURCE
=
51
,
/* SVGA3dVertexMaterial */
SVGA3D_RS_EMISSIVEMATERIALSOURCE
=
52
,
/* SVGA3dVertexMaterial */
SVGA3D_RS_TEXTUREFACTOR
=
53
,
/* SVGA3dColor */
SVGA3D_RS_LOCALVIEWER
=
54
,
/* SVGA3dBool */
SVGA3D_RS_SCISSORTESTENABLE
=
55
,
/* SVGA3dBool */
SVGA3D_RS_BLENDCOLOR
=
56
,
/* SVGA3dColor */
SVGA3D_RS_STENCILENABLE2SIDED
=
57
,
/* SVGA3dBool */
SVGA3D_RS_CCWSTENCILFUNC
=
58
,
/* SVGA3dCmpFunc */
SVGA3D_RS_CCWSTENCILFAIL
=
59
,
/* SVGA3dStencilOp */
SVGA3D_RS_CCWSTENCILZFAIL
=
60
,
/* SVGA3dStencilOp */
SVGA3D_RS_CCWSTENCILPASS
=
61
,
/* SVGA3dStencilOp */
SVGA3D_RS_VERTEXBLEND
=
62
,
/* SVGA3dVertexBlendFlags */
SVGA3D_RS_SLOPESCALEDEPTHBIAS
=
63
,
/* float */
SVGA3D_RS_DEPTHBIAS
=
64
,
/* float */
/*
* Output Gamma Level
*
* Output gamma effects the gamma curve of colors that are output from the
* rendering pipeline. A value of 1.0 specifies a linear color space. If the
* value is <= 0.0, gamma correction is ignored and linear color space is
* used.
*/
SVGA3D_RS_OUTPUTGAMMA
=
65
,
/* float */
SVGA3D_RS_ZVISIBLE
=
66
,
/* SVGA3dBool */
SVGA3D_RS_LASTPIXEL
=
67
,
/* SVGA3dBool */
SVGA3D_RS_CLIPPING
=
68
,
/* SVGA3dBool */
SVGA3D_RS_WRAP0
=
69
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP1
=
70
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP2
=
71
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP3
=
72
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP4
=
73
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP5
=
74
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP6
=
75
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP7
=
76
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP8
=
77
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP9
=
78
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP10
=
79
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP11
=
80
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP12
=
81
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP13
=
82
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP14
=
83
,
/* SVGA3dWrapFlags */
SVGA3D_RS_WRAP15
=
84
,
/* SVGA3dWrapFlags */
SVGA3D_RS_MULTISAMPLEANTIALIAS
=
85
,
/* SVGA3dBool */
SVGA3D_RS_MULTISAMPLEMASK
=
86
,
/* uint32 */
SVGA3D_RS_INDEXEDVERTEXBLENDENABLE
=
87
,
/* SVGA3dBool */
SVGA3D_RS_TWEENFACTOR
=
88
,
/* float */
SVGA3D_RS_ANTIALIASEDLINEENABLE
=
89
,
/* SVGA3dBool */
SVGA3D_RS_COLORWRITEENABLE1
=
90
,
/* SVGA3dColorMask */
SVGA3D_RS_COLORWRITEENABLE2
=
91
,
/* SVGA3dColorMask */
SVGA3D_RS_COLORWRITEENABLE3
=
92
,
/* SVGA3dColorMask */
SVGA3D_RS_SEPARATEALPHABLENDENABLE
=
93
,
/* SVGA3dBool */
SVGA3D_RS_SRCBLENDALPHA
=
94
,
/* SVGA3dBlendOp */
SVGA3D_RS_DSTBLENDALPHA
=
95
,
/* SVGA3dBlendOp */
SVGA3D_RS_BLENDEQUATIONALPHA
=
96
,
/* SVGA3dBlendEquation */
SVGA3D_RS_MAX
}
SVGA3dRenderStateName
;
typedef
enum
{
SVGA3D_VERTEXMATERIAL_NONE
=
0
,
/* Use the value in the current material */
SVGA3D_VERTEXMATERIAL_DIFFUSE
=
1
,
/* Use the value in the diffuse component */
SVGA3D_VERTEXMATERIAL_SPECULAR
=
2
,
/* Use the value in the specular component */
}
SVGA3dVertexMaterial
;
typedef
enum
{
SVGA3D_FILLMODE_INVALID
=
0
,
SVGA3D_FILLMODE_POINT
=
1
,
SVGA3D_FILLMODE_LINE
=
2
,
SVGA3D_FILLMODE_FILL
=
3
,
SVGA3D_FILLMODE_MAX
}
SVGA3dFillModeType
;
typedef
union
{
struct
{
uint16
mode
;
/* SVGA3dFillModeType */
uint16
face
;
/* SVGA3dFace */
};
uint32
uintValue
;
}
SVGA3dFillMode
;
typedef
enum
{
SVGA3D_SHADEMODE_INVALID
=
0
,
SVGA3D_SHADEMODE_FLAT
=
1
,
SVGA3D_SHADEMODE_SMOOTH
=
2
,
SVGA3D_SHADEMODE_PHONG
=
3
,
/* Not supported */
SVGA3D_SHADEMODE_MAX
}
SVGA3dShadeMode
;
typedef
union
{
struct
{
uint16
repeat
;
uint16
pattern
;
};
uint32
uintValue
;
}
SVGA3dLinePattern
;
typedef
enum
{
SVGA3D_BLENDOP_INVALID
=
0
,
SVGA3D_BLENDOP_ZERO
=
1
,
SVGA3D_BLENDOP_ONE
=
2
,
SVGA3D_BLENDOP_SRCCOLOR
=
3
,
SVGA3D_BLENDOP_INVSRCCOLOR
=
4
,
SVGA3D_BLENDOP_SRCALPHA
=
5
,
SVGA3D_BLENDOP_INVSRCALPHA
=
6
,
SVGA3D_BLENDOP_DESTALPHA
=
7
,
SVGA3D_BLENDOP_INVDESTALPHA
=
8
,
SVGA3D_BLENDOP_DESTCOLOR
=
9
,
SVGA3D_BLENDOP_INVDESTCOLOR
=
10
,
SVGA3D_BLENDOP_SRCALPHASAT
=
11
,
SVGA3D_BLENDOP_BLENDFACTOR
=
12
,
SVGA3D_BLENDOP_INVBLENDFACTOR
=
13
,
SVGA3D_BLENDOP_MAX
}
SVGA3dBlendOp
;
typedef
enum
{
SVGA3D_BLENDEQ_INVALID
=
0
,
SVGA3D_BLENDEQ_ADD
=
1
,
SVGA3D_BLENDEQ_SUBTRACT
=
2
,
SVGA3D_BLENDEQ_REVSUBTRACT
=
3
,
SVGA3D_BLENDEQ_MINIMUM
=
4
,
SVGA3D_BLENDEQ_MAXIMUM
=
5
,
SVGA3D_BLENDEQ_MAX
}
SVGA3dBlendEquation
;
typedef
enum
{
SVGA3D_FRONTWINDING_INVALID
=
0
,
SVGA3D_FRONTWINDING_CW
=
1
,
SVGA3D_FRONTWINDING_CCW
=
2
,
SVGA3D_FRONTWINDING_MAX
}
SVGA3dFrontWinding
;
typedef
enum
{
SVGA3D_FACE_INVALID
=
0
,
SVGA3D_FACE_NONE
=
1
,
SVGA3D_FACE_FRONT
=
2
,
SVGA3D_FACE_BACK
=
3
,
SVGA3D_FACE_FRONT_BACK
=
4
,
SVGA3D_FACE_MAX
}
SVGA3dFace
;
/*
* The order and the values should not be changed
*/
typedef
enum
{
SVGA3D_CMP_INVALID
=
0
,
SVGA3D_CMP_NEVER
=
1
,
SVGA3D_CMP_LESS
=
2
,
SVGA3D_CMP_EQUAL
=
3
,
SVGA3D_CMP_LESSEQUAL
=
4
,
SVGA3D_CMP_GREATER
=
5
,
SVGA3D_CMP_NOTEQUAL
=
6
,
SVGA3D_CMP_GREATEREQUAL
=
7
,
SVGA3D_CMP_ALWAYS
=
8
,
SVGA3D_CMP_MAX
}
SVGA3dCmpFunc
;
/*
* SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
* the fog factor to be specified in the alpha component of the specular
* (a.k.a. secondary) vertex color.
*/
typedef
enum
{
SVGA3D_FOGFUNC_INVALID
=
0
,
SVGA3D_FOGFUNC_EXP
=
1
,
SVGA3D_FOGFUNC_EXP2
=
2
,
SVGA3D_FOGFUNC_LINEAR
=
3
,
SVGA3D_FOGFUNC_PER_VERTEX
=
4
}
SVGA3dFogFunction
;
/*
* SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
* or per-pixel basis.
*/
typedef
enum
{
SVGA3D_FOGTYPE_INVALID
=
0
,
SVGA3D_FOGTYPE_VERTEX
=
1
,
SVGA3D_FOGTYPE_PIXEL
=
2
,
SVGA3D_FOGTYPE_MAX
=
3
}
SVGA3dFogType
;
/*
* SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
* computed using the eye Z value of each pixel (or vertex), whereas range-
* based fog is computed using the actual distance (range) to the eye.
*/
typedef
enum
{
SVGA3D_FOGBASE_INVALID
=
0
,
SVGA3D_FOGBASE_DEPTHBASED
=
1
,
SVGA3D_FOGBASE_RANGEBASED
=
2
,
SVGA3D_FOGBASE_MAX
=
3
}
SVGA3dFogBase
;
typedef
enum
{
SVGA3D_STENCILOP_INVALID
=
0
,
SVGA3D_STENCILOP_KEEP
=
1
,
SVGA3D_STENCILOP_ZERO
=
2
,
SVGA3D_STENCILOP_REPLACE
=
3
,
SVGA3D_STENCILOP_INCRSAT
=
4
,
SVGA3D_STENCILOP_DECRSAT
=
5
,
SVGA3D_STENCILOP_INVERT
=
6
,
SVGA3D_STENCILOP_INCR
=
7
,
SVGA3D_STENCILOP_DECR
=
8
,
SVGA3D_STENCILOP_MAX
}
SVGA3dStencilOp
;
typedef
enum
{
SVGA3D_CLIPPLANE_0
=
(
1
<<
0
),
SVGA3D_CLIPPLANE_1
=
(
1
<<
1
),
SVGA3D_CLIPPLANE_2
=
(
1
<<
2
),
SVGA3D_CLIPPLANE_3
=
(
1
<<
3
),
SVGA3D_CLIPPLANE_4
=
(
1
<<
4
),
SVGA3D_CLIPPLANE_5
=
(
1
<<
5
),
}
SVGA3dClipPlanes
;
typedef
enum
{
SVGA3D_CLEAR_COLOR
=
0x1
,
SVGA3D_CLEAR_DEPTH
=
0x2
,
SVGA3D_CLEAR_STENCIL
=
0x4
}
SVGA3dClearFlag
;
typedef
enum
{
SVGA3D_RT_DEPTH
=
0
,
SVGA3D_RT_STENCIL
=
1
,
SVGA3D_RT_COLOR0
=
2
,
SVGA3D_RT_COLOR1
=
3
,
SVGA3D_RT_COLOR2
=
4
,
SVGA3D_RT_COLOR3
=
5
,
SVGA3D_RT_COLOR4
=
6
,
SVGA3D_RT_COLOR5
=
7
,
SVGA3D_RT_COLOR6
=
8
,
SVGA3D_RT_COLOR7
=
9
,
SVGA3D_RT_MAX
,
SVGA3D_RT_INVALID
=
((
uint32
)
-
1
),
}
SVGA3dRenderTargetType
;
#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
typedef
union
{
struct
{
uint32
red
:
1
;
uint32
green
:
1
;
uint32
blue
:
1
;
uint32
alpha
:
1
;
};
uint32
uintValue
;
}
SVGA3dColorMask
;
typedef
enum
{
SVGA3D_VBLEND_DISABLE
=
0
,
SVGA3D_VBLEND_1WEIGHT
=
1
,
SVGA3D_VBLEND_2WEIGHT
=
2
,
SVGA3D_VBLEND_3WEIGHT
=
3
,
}
SVGA3dVertexBlendFlags
;
typedef
enum
{
SVGA3D_WRAPCOORD_0
=
1
<<
0
,
SVGA3D_WRAPCOORD_1
=
1
<<
1
,
SVGA3D_WRAPCOORD_2
=
1
<<
2
,
SVGA3D_WRAPCOORD_3
=
1
<<
3
,
SVGA3D_WRAPCOORD_ALL
=
0xF
,
}
SVGA3dWrapFlags
;
/*
* SVGA_3D_CMD_TEXTURESTATE Types. All value types
* must fit in a uint32.
*/
typedef
enum
{
SVGA3D_TS_INVALID
=
0
,
SVGA3D_TS_BIND_TEXTURE
=
1
,
/* SVGA3dSurfaceId */
SVGA3D_TS_COLOROP
=
2
,
/* SVGA3dTextureCombiner */
SVGA3D_TS_COLORARG1
=
3
,
/* SVGA3dTextureArgData */
SVGA3D_TS_COLORARG2
=
4
,
/* SVGA3dTextureArgData */
SVGA3D_TS_ALPHAOP
=
5
,
/* SVGA3dTextureCombiner */
SVGA3D_TS_ALPHAARG1
=
6
,
/* SVGA3dTextureArgData */
SVGA3D_TS_ALPHAARG2
=
7
,
/* SVGA3dTextureArgData */
SVGA3D_TS_ADDRESSU
=
8
,
/* SVGA3dTextureAddress */
SVGA3D_TS_ADDRESSV
=
9
,
/* SVGA3dTextureAddress */
SVGA3D_TS_MIPFILTER
=
10
,
/* SVGA3dTextureFilter */
SVGA3D_TS_MAGFILTER
=
11
,
/* SVGA3dTextureFilter */
SVGA3D_TS_MINFILTER
=
12
,
/* SVGA3dTextureFilter */
SVGA3D_TS_BORDERCOLOR
=
13
,
/* SVGA3dColor */
SVGA3D_TS_TEXCOORDINDEX
=
14
,
/* uint32 */
SVGA3D_TS_TEXTURETRANSFORMFLAGS
=
15
,
/* SVGA3dTexTransformFlags */
SVGA3D_TS_TEXCOORDGEN
=
16
,
/* SVGA3dTextureCoordGen */
SVGA3D_TS_BUMPENVMAT00
=
17
,
/* float */
SVGA3D_TS_BUMPENVMAT01
=
18
,
/* float */
SVGA3D_TS_BUMPENVMAT10
=
19
,
/* float */
SVGA3D_TS_BUMPENVMAT11
=
20
,
/* float */
SVGA3D_TS_TEXTURE_MIPMAP_LEVEL
=
21
,
/* uint32 */
SVGA3D_TS_TEXTURE_LOD_BIAS
=
22
,
/* float */
SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL
=
23
,
/* uint32 */
SVGA3D_TS_ADDRESSW
=
24
,
/* SVGA3dTextureAddress */
/*
* Sampler Gamma Level
*
* Sampler gamma effects the color of samples taken from the sampler. A
* value of 1.0 will produce linear samples. If the value is <= 0.0 the
* gamma value is ignored and a linear space is used.
*/
SVGA3D_TS_GAMMA
=
25
,
/* float */
SVGA3D_TS_BUMPENVLSCALE
=
26
,
/* float */
SVGA3D_TS_BUMPENVLOFFSET
=
27
,
/* float */
SVGA3D_TS_COLORARG0
=
28
,
/* SVGA3dTextureArgData */
SVGA3D_TS_ALPHAARG0
=
29
,
/* SVGA3dTextureArgData */
SVGA3D_TS_MAX
}
SVGA3dTextureStateName
;
typedef
enum
{
SVGA3D_TC_INVALID
=
0
,
SVGA3D_TC_DISABLE
=
1
,
SVGA3D_TC_SELECTARG1
=
2
,
SVGA3D_TC_SELECTARG2
=
3
,
SVGA3D_TC_MODULATE
=
4
,
SVGA3D_TC_ADD
=
5
,
SVGA3D_TC_ADDSIGNED
=
6
,
SVGA3D_TC_SUBTRACT
=
7
,
SVGA3D_TC_BLENDTEXTUREALPHA
=
8
,
SVGA3D_TC_BLENDDIFFUSEALPHA
=
9
,
SVGA3D_TC_BLENDCURRENTALPHA
=
10
,
SVGA3D_TC_BLENDFACTORALPHA
=
11
,
SVGA3D_TC_MODULATE2X
=
12
,
SVGA3D_TC_MODULATE4X
=
13
,
SVGA3D_TC_DSDT
=
14
,
SVGA3D_TC_DOTPRODUCT3
=
15
,
SVGA3D_TC_BLENDTEXTUREALPHAPM
=
16
,
SVGA3D_TC_ADDSIGNED2X
=
17
,
SVGA3D_TC_ADDSMOOTH
=
18
,
SVGA3D_TC_PREMODULATE
=
19
,
SVGA3D_TC_MODULATEALPHA_ADDCOLOR
=
20
,
SVGA3D_TC_MODULATECOLOR_ADDALPHA
=
21
,
SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR
=
22
,
SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA
=
23
,
SVGA3D_TC_BUMPENVMAPLUMINANCE
=
24
,
SVGA3D_TC_MULTIPLYADD
=
25
,
SVGA3D_TC_LERP
=
26
,
SVGA3D_TC_MAX
}
SVGA3dTextureCombiner
;
#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
typedef
enum
{
SVGA3D_TEX_ADDRESS_INVALID
=
0
,
SVGA3D_TEX_ADDRESS_WRAP
=
1
,
SVGA3D_TEX_ADDRESS_MIRROR
=
2
,
SVGA3D_TEX_ADDRESS_CLAMP
=
3
,
SVGA3D_TEX_ADDRESS_BORDER
=
4
,
SVGA3D_TEX_ADDRESS_MIRRORONCE
=
5
,
SVGA3D_TEX_ADDRESS_EDGE
=
6
,
SVGA3D_TEX_ADDRESS_MAX
}
SVGA3dTextureAddress
;
/*
* SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
* disabled, and the rasterizer should use the magnification filter instead.
*/
typedef
enum
{
SVGA3D_TEX_FILTER_NONE
=
0
,
SVGA3D_TEX_FILTER_NEAREST
=
1
,
SVGA3D_TEX_FILTER_LINEAR
=
2
,
SVGA3D_TEX_FILTER_ANISOTROPIC
=
3
,
SVGA3D_TEX_FILTER_FLATCUBIC
=
4
,
// Deprecated, not implemented
SVGA3D_TEX_FILTER_GAUSSIANCUBIC
=
5
,
// Deprecated, not implemented
SVGA3D_TEX_FILTER_PYRAMIDALQUAD
=
6
,
// Not currently implemented
SVGA3D_TEX_FILTER_GAUSSIANQUAD
=
7
,
// Not currently implemented
SVGA3D_TEX_FILTER_MAX
}
SVGA3dTextureFilter
;
typedef
enum
{
SVGA3D_TEX_TRANSFORM_OFF
=
0
,
SVGA3D_TEX_TRANSFORM_S
=
(
1
<<
0
),
SVGA3D_TEX_TRANSFORM_T
=
(
1
<<
1
),
SVGA3D_TEX_TRANSFORM_R
=
(
1
<<
2
),
SVGA3D_TEX_TRANSFORM_Q
=
(
1
<<
3
),
SVGA3D_TEX_PROJECTED
=
(
1
<<
15
),
}
SVGA3dTexTransformFlags
;
typedef
enum
{
SVGA3D_TEXCOORD_GEN_OFF
=
0
,
SVGA3D_TEXCOORD_GEN_EYE_POSITION
=
1
,
SVGA3D_TEXCOORD_GEN_EYE_NORMAL
=
2
,
SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR
=
3
,
SVGA3D_TEXCOORD_GEN_SPHERE
=
4
,
SVGA3D_TEXCOORD_GEN_MAX
}
SVGA3dTextureCoordGen
;
/*
* Texture argument constants for texture combiner
*/
typedef
enum
{
SVGA3D_TA_INVALID
=
0
,
SVGA3D_TA_CONSTANT
=
1
,
SVGA3D_TA_PREVIOUS
=
2
,
SVGA3D_TA_DIFFUSE
=
3
,
SVGA3D_TA_TEXTURE
=
4
,
SVGA3D_TA_SPECULAR
=
5
,
SVGA3D_TA_MAX
}
SVGA3dTextureArgData
;
#define SVGA3D_TM_MASK_LEN 4
/* Modifiers for texture argument constants defined above. */
typedef
enum
{
SVGA3D_TM_NONE
=
0
,
SVGA3D_TM_ALPHA
=
(
1
<<
SVGA3D_TM_MASK_LEN
),
SVGA3D_TM_ONE_MINUS
=
(
2
<<
SVGA3D_TM_MASK_LEN
),
}
SVGA3dTextureArgModifier
;
#define SVGA3D_INVALID_ID ((uint32)-1)
#define SVGA3D_MAX_CLIP_PLANES 6
/*
* This is the limit to the number of fixed-function texture
* transforms and texture coordinates we can support. It does *not*
* correspond to the number of texture image units (samplers) we
* support!
*/
#define SVGA3D_MAX_TEXTURE_COORDS 8
/*
* Vertex declarations
*
* Notes:
*
* SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
* draw with any POSITIONT vertex arrays, the programmable vertex
* pipeline will be implicitly disabled. Drawing will take place as if
* no vertex shader was bound.
*/
typedef
enum
{
SVGA3D_DECLUSAGE_POSITION
=
0
,
SVGA3D_DECLUSAGE_BLENDWEIGHT
,
// 1
SVGA3D_DECLUSAGE_BLENDINDICES
,
// 2
SVGA3D_DECLUSAGE_NORMAL
,
// 3
SVGA3D_DECLUSAGE_PSIZE
,
// 4
SVGA3D_DECLUSAGE_TEXCOORD
,
// 5
SVGA3D_DECLUSAGE_TANGENT
,
// 6
SVGA3D_DECLUSAGE_BINORMAL
,
// 7
SVGA3D_DECLUSAGE_TESSFACTOR
,
// 8
SVGA3D_DECLUSAGE_POSITIONT
,
// 9
SVGA3D_DECLUSAGE_COLOR
,
// 10
SVGA3D_DECLUSAGE_FOG
,
// 11
SVGA3D_DECLUSAGE_DEPTH
,
// 12
SVGA3D_DECLUSAGE_SAMPLE
,
// 13
SVGA3D_DECLUSAGE_MAX
}
SVGA3dDeclUsage
;
typedef
enum
{
SVGA3D_DECLMETHOD_DEFAULT
=
0
,
SVGA3D_DECLMETHOD_PARTIALU
,
SVGA3D_DECLMETHOD_PARTIALV
,
SVGA3D_DECLMETHOD_CROSSUV
,
// Normal
SVGA3D_DECLMETHOD_UV
,
SVGA3D_DECLMETHOD_LOOKUP
,
// Lookup a displacement map
SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED
,
// Lookup a pre-sampled displacement map
}
SVGA3dDeclMethod
;
typedef
enum
{
SVGA3D_DECLTYPE_FLOAT1
=
0
,
SVGA3D_DECLTYPE_FLOAT2
=
1
,
SVGA3D_DECLTYPE_FLOAT3
=
2
,
SVGA3D_DECLTYPE_FLOAT4
=
3
,
SVGA3D_DECLTYPE_D3DCOLOR
=
4
,
SVGA3D_DECLTYPE_UBYTE4
=
5
,
SVGA3D_DECLTYPE_SHORT2
=
6
,
SVGA3D_DECLTYPE_SHORT4
=
7
,
SVGA3D_DECLTYPE_UBYTE4N
=
8
,
SVGA3D_DECLTYPE_SHORT2N
=
9
,
SVGA3D_DECLTYPE_SHORT4N
=
10
,
SVGA3D_DECLTYPE_USHORT2N
=
11
,
SVGA3D_DECLTYPE_USHORT4N
=
12
,
SVGA3D_DECLTYPE_UDEC3
=
13
,
SVGA3D_DECLTYPE_DEC3N
=
14
,
SVGA3D_DECLTYPE_FLOAT16_2
=
15
,
SVGA3D_DECLTYPE_FLOAT16_4
=
16
,
SVGA3D_DECLTYPE_MAX
,
}
SVGA3dDeclType
;
/*
* This structure is used for the divisor for geometry instancing;
* it's a direct translation of the Direct3D equivalent.
*/
typedef
union
{
struct
{
/*
* For index data, this number represents the number of instances to draw.
* For instance data, this number represents the number of
* instances/vertex in this stream
*/
uint32
count
:
30
;
/*
* This is 1 if this is supposed to be the data that is repeated for
* every instance.
*/
uint32
indexedData
:
1
;
/*
* This is 1 if this is supposed to be the per-instance data.
*/
uint32
instanceData
:
1
;
};
uint32
value
;
}
SVGA3dVertexDivisor
;
typedef
enum
{
SVGA3D_PRIMITIVE_INVALID
=
0
,
SVGA3D_PRIMITIVE_TRIANGLELIST
=
1
,
SVGA3D_PRIMITIVE_POINTLIST
=
2
,
SVGA3D_PRIMITIVE_LINELIST
=
3
,
SVGA3D_PRIMITIVE_LINESTRIP
=
4
,
SVGA3D_PRIMITIVE_TRIANGLESTRIP
=
5
,
SVGA3D_PRIMITIVE_TRIANGLEFAN
=
6
,
SVGA3D_PRIMITIVE_MAX
}
SVGA3dPrimitiveType
;
typedef
enum
{
SVGA3D_COORDINATE_INVALID
=
0
,
SVGA3D_COORDINATE_LEFTHANDED
=
1
,
SVGA3D_COORDINATE_RIGHTHANDED
=
2
,
SVGA3D_COORDINATE_MAX
}
SVGA3dCoordinateType
;
typedef
enum
{
SVGA3D_TRANSFORM_INVALID
=
0
,
SVGA3D_TRANSFORM_WORLD
=
1
,
SVGA3D_TRANSFORM_VIEW
=
2
,
SVGA3D_TRANSFORM_PROJECTION
=
3
,
SVGA3D_TRANSFORM_TEXTURE0
=
4
,
SVGA3D_TRANSFORM_TEXTURE1
=
5
,
SVGA3D_TRANSFORM_TEXTURE2
=
6
,
SVGA3D_TRANSFORM_TEXTURE3
=
7
,
SVGA3D_TRANSFORM_TEXTURE4
=
8
,
SVGA3D_TRANSFORM_TEXTURE5
=
9
,
SVGA3D_TRANSFORM_TEXTURE6
=
10
,
SVGA3D_TRANSFORM_TEXTURE7
=
11
,
SVGA3D_TRANSFORM_WORLD1
=
12
,
SVGA3D_TRANSFORM_WORLD2
=
13
,
SVGA3D_TRANSFORM_WORLD3
=
14
,
SVGA3D_TRANSFORM_MAX
}
SVGA3dTransformType
;
typedef
enum
{
SVGA3D_LIGHTTYPE_INVALID
=
0
,
SVGA3D_LIGHTTYPE_POINT
=
1
,
SVGA3D_LIGHTTYPE_SPOT1
=
2
,
/* 1-cone, in degrees */
SVGA3D_LIGHTTYPE_SPOT2
=
3
,
/* 2-cone, in radians */
SVGA3D_LIGHTTYPE_DIRECTIONAL
=
4
,
SVGA3D_LIGHTTYPE_MAX
}
SVGA3dLightType
;
typedef
enum
{
SVGA3D_CUBEFACE_POSX
=
0
,
SVGA3D_CUBEFACE_NEGX
=
1
,
SVGA3D_CUBEFACE_POSY
=
2
,
SVGA3D_CUBEFACE_NEGY
=
3
,
SVGA3D_CUBEFACE_POSZ
=
4
,
SVGA3D_CUBEFACE_NEGZ
=
5
,
}
SVGA3dCubeFace
;
typedef
enum
{
SVGA3D_SHADERTYPE_COMPILED_DX8
=
0
,
SVGA3D_SHADERTYPE_VS
=
1
,
SVGA3D_SHADERTYPE_PS
=
2
,
SVGA3D_SHADERTYPE_MAX
}
SVGA3dShaderType
;
typedef
enum
{
SVGA3D_CONST_TYPE_FLOAT
=
0
,
SVGA3D_CONST_TYPE_INT
=
1
,
SVGA3D_CONST_TYPE_BOOL
=
2
,
}
SVGA3dShaderConstType
;
#define SVGA3D_MAX_SURFACE_FACES 6
typedef
enum
{
SVGA3D_STRETCH_BLT_POINT
=
0
,
SVGA3D_STRETCH_BLT_LINEAR
=
1
,
SVGA3D_STRETCH_BLT_MAX
}
SVGA3dStretchBltMode
;
typedef
enum
{
SVGA3D_QUERYTYPE_OCCLUSION
=
0
,
SVGA3D_QUERYTYPE_MAX
}
SVGA3dQueryType
;
typedef
enum
{
SVGA3D_QUERYSTATE_PENDING
=
0
,
/* Waiting on the host (set by guest) */
SVGA3D_QUERYSTATE_SUCCEEDED
=
1
,
/* Completed successfully (set by host) */
SVGA3D_QUERYSTATE_FAILED
=
2
,
/* Completed unsuccessfully (set by host) */
SVGA3D_QUERYSTATE_NEW
=
3
,
/* Never submitted (For guest use only) */
}
SVGA3dQueryState
;
typedef
enum
{
SVGA3D_WRITE_HOST_VRAM
=
1
,
SVGA3D_READ_HOST_VRAM
=
2
,
}
SVGA3dTransferType
;
/*
* The maximum number vertex arrays we're guaranteed to support in
* SVGA_3D_CMD_DRAWPRIMITIVES.
*/
#define SVGA3D_MAX_VERTEX_ARRAYS 32
/*
* Identifiers for commands in the command FIFO.
*
* IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
* the SVGA3D protocol and remain reserved; they should not be used in the
* future.
*
* IDs between 1040 and 1999 (inclusive) are available for use by the
* current SVGA3D protocol.
*
* FIFO clients other than SVGA3D should stay below 1000, or at 2000
* and up.
*/
#define SVGA_3D_CMD_LEGACY_BASE 1000
#define SVGA_3D_CMD_BASE 1040
#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0
#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
#define SVGA_3D_CMD_SURFACE_DMA SVGA_3D_CMD_BASE + 4
#define SVGA_3D_CMD_CONTEXT_DEFINE SVGA_3D_CMD_BASE + 5
#define SVGA_3D_CMD_CONTEXT_DESTROY SVGA_3D_CMD_BASE + 6
#define SVGA_3D_CMD_SETTRANSFORM SVGA_3D_CMD_BASE + 7
#define SVGA_3D_CMD_SETZRANGE SVGA_3D_CMD_BASE + 8
#define SVGA_3D_CMD_SETRENDERSTATE SVGA_3D_CMD_BASE + 9
#define SVGA_3D_CMD_SETRENDERTARGET SVGA_3D_CMD_BASE + 10
#define SVGA_3D_CMD_SETTEXTURESTATE SVGA_3D_CMD_BASE + 11
#define SVGA_3D_CMD_SETMATERIAL SVGA_3D_CMD_BASE + 12
#define SVGA_3D_CMD_SETLIGHTDATA SVGA_3D_CMD_BASE + 13
#define SVGA_3D_CMD_SETLIGHTENABLED SVGA_3D_CMD_BASE + 14
#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 // Deprecated
#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
#define SVGA_3D_CMD_SET_SHADER_CONST SVGA_3D_CMD_BASE + 22
#define SVGA_3D_CMD_DRAW_PRIMITIVES SVGA_3D_CMD_BASE + 23
#define SVGA_3D_CMD_SETSCISSORRECT SVGA_3D_CMD_BASE + 24
#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 // Deprecated
#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 30
#define SVGA_3D_CMD_FUTURE_MAX 2000
/*
* Common substructures used in multiple FIFO commands:
*/
typedef
struct
{
union
{
struct
{
uint16
function
;
// SVGA3dFogFunction
uint8
type
;
// SVGA3dFogType
uint8
base
;
// SVGA3dFogBase
};
uint32
uintValue
;
};
}
SVGA3dFogMode
;
/*
* Uniquely identify one image (a 1D/2D/3D array) from a surface. This
* is a surface ID as well as face/mipmap indices.
*/
typedef
struct
SVGA3dSurfaceImageId
{
uint32
sid
;
uint32
face
;
uint32
mipmap
;
}
SVGA3dSurfaceImageId
;
typedef
struct
SVGA3dGuestImage
{
SVGAGuestPtr
ptr
;
/*
* A note on interpretation of pitch: This value of pitch is the
* number of bytes between vertically adjacent image
* blocks. Normally this is the number of bytes between the first
* pixel of two adjacent scanlines. With compressed textures,
* however, this may represent the number of bytes between
* compression blocks rather than between rows of pixels.
*
* XXX: Compressed textures currently must be tightly packed in guest memory.
*
* If the image is 1-dimensional, pitch is ignored.
*
* If 'pitch' is zero, the SVGA3D device calculates a pitch value
* assuming each row of blocks is tightly packed.
*/
uint32
pitch
;
}
SVGA3dGuestImage
;
/*
* FIFO command format definitions:
*/
/*
* The data size header following cmdNum for every 3d command
*/
typedef
struct
{
uint32
id
;
uint32
size
;
}
SVGA3dCmdHeader
;
/*
* A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
* optional mipmaps and cube faces.
*/
typedef
struct
{
uint32
width
;
uint32
height
;
uint32
depth
;
}
SVGA3dSize
;
typedef
enum
{
SVGA3D_SURFACE_CUBEMAP
=
(
1
<<
0
),
SVGA3D_SURFACE_HINT_STATIC
=
(
1
<<
1
),
SVGA3D_SURFACE_HINT_DYNAMIC
=
(
1
<<
2
),
SVGA3D_SURFACE_HINT_INDEXBUFFER
=
(
1
<<
3
),
SVGA3D_SURFACE_HINT_VERTEXBUFFER
=
(
1
<<
4
),
SVGA3D_SURFACE_HINT_TEXTURE
=
(
1
<<
5
),
SVGA3D_SURFACE_HINT_RENDERTARGET
=
(
1
<<
6
),
SVGA3D_SURFACE_HINT_DEPTHSTENCIL
=
(
1
<<
7
),
SVGA3D_SURFACE_HINT_WRITEONLY
=
(
1
<<
8
),
}
SVGA3dSurfaceFlags
;
typedef
struct
{
uint32
numMipLevels
;
}
SVGA3dSurfaceFace
;
typedef
struct
{
uint32
sid
;
SVGA3dSurfaceFlags
surfaceFlags
;
SVGA3dSurfaceFormat
format
;
SVGA3dSurfaceFace
face
[
SVGA3D_MAX_SURFACE_FACES
];
/*
* Followed by an SVGA3dSize structure for each mip level in each face.
*
* A note on surface sizes: Sizes are always specified in pixels,
* even if the true surface size is not a multiple of the minimum
* block size of the surface's format. For example, a 3x3x1 DXT1
* compressed texture would actually be stored as a 4x4x1 image in
* memory.
*/
}
SVGA3dCmdDefineSurface
;
/* SVGA_3D_CMD_SURFACE_DEFINE */
typedef
struct
{
uint32
sid
;
}
SVGA3dCmdDestroySurface
;
/* SVGA_3D_CMD_SURFACE_DESTROY */
typedef
struct
{
uint32
cid
;
}
SVGA3dCmdDefineContext
;
/* SVGA_3D_CMD_CONTEXT_DEFINE */
typedef
struct
{
uint32
cid
;
}
SVGA3dCmdDestroyContext
;
/* SVGA_3D_CMD_CONTEXT_DESTROY */
typedef
struct
{
uint32
cid
;
SVGA3dClearFlag
clearFlag
;
uint32
color
;
float
depth
;
uint32
stencil
;
/* Followed by variable number of SVGA3dRect structures */
}
SVGA3dCmdClear
;
/* SVGA_3D_CMD_CLEAR */
typedef
struct
SVGA3dCopyRect
{
uint32
x
;
uint32
y
;
uint32
w
;
uint32
h
;
uint32
srcx
;
uint32
srcy
;
}
SVGA3dCopyRect
;
typedef
struct
SVGA3dCopyBox
{
uint32
x
;
uint32
y
;
uint32
z
;
uint32
w
;
uint32
h
;
uint32
d
;
uint32
srcx
;
uint32
srcy
;
uint32
srcz
;
}
SVGA3dCopyBox
;
typedef
struct
{
uint32
x
;
uint32
y
;
uint32
w
;
uint32
h
;
}
SVGA3dRect
;
typedef
struct
{
uint32
x
;
uint32
y
;
uint32
z
;
uint32
w
;
uint32
h
;
uint32
d
;
}
SVGA3dBox
;
typedef
struct
{
uint32
x
;
uint32
y
;
uint32
z
;
}
SVGA3dPoint
;
typedef
struct
{
SVGA3dLightType
type
;
SVGA3dBool
inWorldSpace
;
float
diffuse
[
4
];
float
specular
[
4
];
float
ambient
[
4
];
float
position
[
4
];
float
direction
[
4
];
float
range
;
float
falloff
;
float
attenuation0
;
float
attenuation1
;
float
attenuation2
;
float
theta
;
float
phi
;
}
SVGA3dLightData
;
typedef
struct
{
uint32
sid
;
/* Followed by variable number of SVGA3dCopyRect structures */
}
SVGA3dCmdPresent
;
/* SVGA_3D_CMD_PRESENT */
typedef
struct
{
SVGA3dRenderStateName
state
;
union
{
uint32
uintValue
;
float
floatValue
;
};
}
SVGA3dRenderState
;
typedef
struct
{
uint32
cid
;
/* Followed by variable number of SVGA3dRenderState structures */
}
SVGA3dCmdSetRenderState
;
/* SVGA_3D_CMD_SETRENDERSTATE */
typedef
struct
{
uint32
cid
;
SVGA3dRenderTargetType
type
;
SVGA3dSurfaceImageId
target
;
}
SVGA3dCmdSetRenderTarget
;
/* SVGA_3D_CMD_SETRENDERTARGET */
typedef
struct
{
SVGA3dSurfaceImageId
src
;
SVGA3dSurfaceImageId
dest
;
/* Followed by variable number of SVGA3dCopyBox structures */
}
SVGA3dCmdSurfaceCopy
;
/* SVGA_3D_CMD_SURFACE_COPY */
typedef
struct
{
SVGA3dSurfaceImageId
src
;
SVGA3dSurfaceImageId
dest
;
SVGA3dBox
boxSrc
;
SVGA3dBox
boxDest
;
SVGA3dStretchBltMode
mode
;
}
SVGA3dCmdSurfaceStretchBlt
;
/* SVGA_3D_CMD_SURFACE_STRETCHBLT */
typedef
struct
{
/*
* If the discard flag is present in a surface DMA operation, the host may
* discard the contents of the current mipmap level and face of the target
* surface before applying the surface DMA contents.
*/
uint32
discard
:
1
;
/*
* If the unsynchronized flag is present, the host may perform this upload
* without syncing to pending reads on this surface.
*/
uint32
unsynchronized
:
1
;
/*
* Guests *MUST* set the reserved bits to 0 before submitting the command
* suffix as future flags may occupy these bits.
*/
uint32
reserved
:
30
;
}
SVGA3dSurfaceDMAFlags
;
typedef
struct
{
SVGA3dGuestImage
guest
;
SVGA3dSurfaceImageId
host
;
SVGA3dTransferType
transfer
;
/*
* Followed by variable number of SVGA3dCopyBox structures. For consistency
* in all clipping logic and coordinate translation, we define the
* "source" in each copyBox as the guest image and the
* "destination" as the host image, regardless of transfer
* direction.
*
* For efficiency, the SVGA3D device is free to copy more data than
* specified. For example, it may round copy boxes outwards such
* that they lie on particular alignment boundaries.
*/
}
SVGA3dCmdSurfaceDMA
;
/* SVGA_3D_CMD_SURFACE_DMA */
/*
* SVGA3dCmdSurfaceDMASuffix --
*
* This is a command suffix that will appear after a SurfaceDMA command in
* the FIFO. It contains some extra information that hosts may use to
* optimize performance or protect the guest. This suffix exists to preserve
* backwards compatibility while also allowing for new functionality to be
* implemented.
*/
typedef
struct
{
uint32
suffixSize
;
/*
* The maximum offset is used to determine the maximum offset from the
* guestPtr base address that will be accessed or written to during this
* surfaceDMA. If the suffix is supported, the host will respect this
* boundary while performing surface DMAs.
*
* Defaults to MAX_UINT32
*/
uint32
maximumOffset
;
/*
* A set of flags that describes optimizations that the host may perform
* while performing this surface DMA operation. The guest should never rely
* on behaviour that is different when these flags are set for correctness.
*
* Defaults to 0
*/
SVGA3dSurfaceDMAFlags
flags
;
}
SVGA3dCmdSurfaceDMASuffix
;
/*
* SVGA_3D_CMD_DRAW_PRIMITIVES --
*
* This command is the SVGA3D device's generic drawing entry point.
* It can draw multiple ranges of primitives, optionally using an
* index buffer, using an arbitrary collection of vertex buffers.
*
* Each SVGA3dVertexDecl defines a distinct vertex array to bind
* during this draw call. The declarations specify which surface
* the vertex data lives in, what that vertex data is used for,
* and how to interpret it.
*
* Each SVGA3dPrimitiveRange defines a collection of primitives
* to render using the same vertex arrays. An index buffer is
* optional.
*/
typedef
struct
{
/*
* A range hint is an optional specification for the range of indices
* in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
* that the entire array will be used.
*
* These are only hints. The SVGA3D device may use them for
* performance optimization if possible, but it's also allowed to
* ignore these values.
*/
uint32
first
;
uint32
last
;
}
SVGA3dArrayRangeHint
;
typedef
struct
{
/*
* Define the origin and shape of a vertex or index array. Both
* 'offset' and 'stride' are in bytes. The provided surface will be
* reinterpreted as a flat array of bytes in the same format used
* by surface DMA operations. To avoid unnecessary conversions, the
* surface should be created with the SVGA3D_BUFFER format.
*
* Index 0 in the array starts 'offset' bytes into the surface.
* Index 1 begins at byte 'offset + stride', etc. Array indices may
* not be negative.
*/
uint32
surfaceId
;
uint32
offset
;
uint32
stride
;
}
SVGA3dArray
;
typedef
struct
{
/*
* Describe a vertex array's data type, and define how it is to be
* used by the fixed function pipeline or the vertex shader. It
* isn't useful to have two VertexDecls with the same
* VertexArrayIdentity in one draw call.
*/
SVGA3dDeclType
type
;
SVGA3dDeclMethod
method
;
SVGA3dDeclUsage
usage
;
uint32
usageIndex
;
}
SVGA3dVertexArrayIdentity
;
typedef
struct
{
SVGA3dVertexArrayIdentity
identity
;
SVGA3dArray
array
;
SVGA3dArrayRangeHint
rangeHint
;
}
SVGA3dVertexDecl
;
typedef
struct
{
/*
* Define a group of primitives to render, from sequential indices.
*
* The value of 'primitiveType' and 'primitiveCount' imply the
* total number of vertices that will be rendered.
*/
SVGA3dPrimitiveType
primType
;
uint32
primitiveCount
;
/*
* Optional index buffer. If indexArray.surfaceId is
* SVGA3D_INVALID_ID, we render without an index buffer. Rendering
* without an index buffer is identical to rendering with an index
* buffer containing the sequence [0, 1, 2, 3, ...].
*
* If an index buffer is in use, indexWidth specifies the width in
* bytes of each index value. It must be less than or equal to
* indexArray.stride.
*
* (Currently, the SVGA3D device requires index buffers to be tightly
* packed. In other words, indexWidth == indexArray.stride)
*/
SVGA3dArray
indexArray
;
uint32
indexWidth
;
/*
* Optional index bias. This number is added to all indices from
* indexArray before they are used as vertex array indices. This
* can be used in multiple ways:
*
* - When not using an indexArray, this bias can be used to
* specify where in the vertex arrays to begin rendering.
*
* - A positive number here is equivalent to increasing the
* offset in each vertex array.
*
* - A negative number can be used to render using a small
* vertex array and an index buffer that contains large
* values. This may be used by some applications that
* crop a vertex buffer without modifying their index
* buffer.
*
* Note that rendering with a negative bias value may be slower and
* use more memory than rendering with a positive or zero bias.
*/
int32
indexBias
;
}
SVGA3dPrimitiveRange
;
typedef
struct
{
uint32
cid
;
uint32
numVertexDecls
;
uint32
numRanges
;
/*
* There are two variable size arrays after the
* SVGA3dCmdDrawPrimitives structure. In order,
* they are:
*
* 1. SVGA3dVertexDecl, quantity 'numVertexDecls'
* 2. SVGA3dPrimitiveRange, quantity 'numRanges'
* 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
* the frequency divisor for this the corresponding vertex decl)
*/
}
SVGA3dCmdDrawPrimitives
;
/* SVGA_3D_CMD_DRAWPRIMITIVES */
typedef
struct
{
uint32
stage
;
SVGA3dTextureStateName
name
;
union
{
uint32
value
;
float
floatValue
;
};
}
SVGA3dTextureState
;
typedef
struct
{
uint32
cid
;
/* Followed by variable number of SVGA3dTextureState structures */
}
SVGA3dCmdSetTextureState
;
/* SVGA_3D_CMD_SETTEXTURESTATE */
typedef
struct
{
uint32
cid
;
SVGA3dTransformType
type
;
float
matrix
[
16
];
}
SVGA3dCmdSetTransform
;
/* SVGA_3D_CMD_SETTRANSFORM */
typedef
struct
{
float
min
;
float
max
;
}
SVGA3dZRange
;
typedef
struct
{
uint32
cid
;
SVGA3dZRange
zRange
;
}
SVGA3dCmdSetZRange
;
/* SVGA_3D_CMD_SETZRANGE */
typedef
struct
{
float
diffuse
[
4
];
float
ambient
[
4
];
float
specular
[
4
];
float
emissive
[
4
];
float
shininess
;
}
SVGA3dMaterial
;
typedef
struct
{
uint32
cid
;
SVGA3dFace
face
;
SVGA3dMaterial
material
;
}
SVGA3dCmdSetMaterial
;
/* SVGA_3D_CMD_SETMATERIAL */
typedef
struct
{
uint32
cid
;
uint32
index
;
SVGA3dLightData
data
;
}
SVGA3dCmdSetLightData
;
/* SVGA_3D_CMD_SETLIGHTDATA */
typedef
struct
{
uint32
cid
;
uint32
index
;
uint32
enabled
;
}
SVGA3dCmdSetLightEnabled
;
/* SVGA_3D_CMD_SETLIGHTENABLED */
typedef
struct
{
uint32
cid
;
SVGA3dRect
rect
;
}
SVGA3dCmdSetViewport
;
/* SVGA_3D_CMD_SETVIEWPORT */
typedef
struct
{
uint32
cid
;
SVGA3dRect
rect
;
}
SVGA3dCmdSetScissorRect
;
/* SVGA_3D_CMD_SETSCISSORRECT */
typedef
struct
{
uint32
cid
;
uint32
index
;
float
plane
[
4
];
}
SVGA3dCmdSetClipPlane
;
/* SVGA_3D_CMD_SETCLIPPLANE */
typedef
struct
{
uint32
cid
;
uint32
shid
;
SVGA3dShaderType
type
;
/* Followed by variable number of DWORDs for shader bycode */
}
SVGA3dCmdDefineShader
;
/* SVGA_3D_CMD_SHADER_DEFINE */
typedef
struct
{
uint32
cid
;
uint32
shid
;
SVGA3dShaderType
type
;
}
SVGA3dCmdDestroyShader
;
/* SVGA_3D_CMD_SHADER_DESTROY */
typedef
struct
{
uint32
cid
;
uint32
reg
;
/* register number */
SVGA3dShaderType
type
;
SVGA3dShaderConstType
ctype
;
uint32
values
[
4
];
}
SVGA3dCmdSetShaderConst
;
/* SVGA_3D_CMD_SET_SHADER_CONST */
typedef
struct
{
uint32
cid
;
SVGA3dShaderType
type
;
uint32
shid
;
}
SVGA3dCmdSetShader
;
/* SVGA_3D_CMD_SET_SHADER */
typedef
struct
{
uint32
cid
;
SVGA3dQueryType
type
;
}
SVGA3dCmdBeginQuery
;
/* SVGA_3D_CMD_BEGIN_QUERY */
typedef
struct
{
uint32
cid
;
SVGA3dQueryType
type
;
SVGAGuestPtr
guestResult
;
/* Points to an SVGA3dQueryResult structure */
}
SVGA3dCmdEndQuery
;
/* SVGA_3D_CMD_END_QUERY */
typedef
struct
{
uint32
cid
;
/* Same parameters passed to END_QUERY */
SVGA3dQueryType
type
;
SVGAGuestPtr
guestResult
;
}
SVGA3dCmdWaitForQuery
;
/* SVGA_3D_CMD_WAIT_FOR_QUERY */
typedef
struct
{
uint32
totalSize
;
/* Set by guest before query is ended. */
SVGA3dQueryState
state
;
/* Set by host or guest. See SVGA3dQueryState. */
union
{
/* Set by host on exit from PENDING state */
uint32
result32
;
};
}
SVGA3dQueryResult
;
/*
* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
*
* This is a blit from an SVGA3D surface to a Screen Object. Just
* like GMR-to-screen blits, this blit may be directed at a
* specific screen or to the virtual coordinate space.
*
* The blit copies from a rectangular region of an SVGA3D surface
* image to a rectangular region of a screen or screens.
*
* This command takes an optional variable-length list of clipping
* rectangles after the body of the command. If no rectangles are
* specified, there is no clipping region. The entire destRect is
* drawn to. If one or more rectangles are included, they describe
* a clipping region. The clip rectangle coordinates are measured
* relative to the top-left corner of destRect.
*
* This clipping region serves multiple purposes:
*
* - It can be used to perform an irregularly shaped blit more
* efficiently than by issuing many separate blit commands.
*
* - It is equivalent to allowing blits with non-integer
* source coordinates. You could blit just one half-pixel
* of a source, for example, by specifying a larger
* destination rectangle than you need, then removing
* part of it using a clip rectangle.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT
*
* Limitations:
*
* - Currently, no backend supports blits from a mipmap or face
* other than the first one.
*/
typedef
struct
{
SVGA3dSurfaceImageId
srcImage
;
SVGASignedRect
srcRect
;
uint32
destScreenId
;
/* Screen ID or SVGA_ID_INVALID for virt. coords */
SVGASignedRect
destRect
;
/* Supports scaling if src/rest different size */
/* Clipping: zero or more SVGASignedRects follow */
}
SVGA3dCmdBlitSurfaceToScreen
;
/* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
/*
* Capability query index.
*
* Notes:
*
* 1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
* fixed-function texture units available. Each of these units
* work in both FFP and Shader modes, and they support texture
* transforms and texture coordinates. The host may have additional
* texture image units that are only usable with shaders.
*
* 2. The BUFFER_FORMAT capabilities are deprecated, and they always
* return TRUE. Even on physical hardware that does not support
* these formats natively, the SVGA3D device will provide an emulation
* which should be invisible to the guest OS.
*
* In general, the SVGA3D device should support any operation on
* any surface format, it just may perform some of these
* operations in software depending on the capabilities of the
* available physical hardware.
*
* XXX: In the future, we will add capabilities that describe in
* detail what formats are supported in hardware for what kinds
* of operations.
*/
typedef
enum
{
SVGA3D_DEVCAP_3D
=
0
,
SVGA3D_DEVCAP_MAX_LIGHTS
=
1
,
SVGA3D_DEVCAP_MAX_TEXTURES
=
2
,
/* See note (1) */
SVGA3D_DEVCAP_MAX_CLIP_PLANES
=
3
,
SVGA3D_DEVCAP_VERTEX_SHADER_VERSION
=
4
,
SVGA3D_DEVCAP_VERTEX_SHADER
=
5
,
SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION
=
6
,
SVGA3D_DEVCAP_FRAGMENT_SHADER
=
7
,
SVGA3D_DEVCAP_MAX_RENDER_TARGETS
=
8
,
SVGA3D_DEVCAP_S23E8_TEXTURES
=
9
,
SVGA3D_DEVCAP_S10E5_TEXTURES
=
10
,
SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND
=
11
,
SVGA3D_DEVCAP_D16_BUFFER_FORMAT
=
12
,
/* See note (2) */
SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT
=
13
,
/* See note (2) */
SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT
=
14
,
/* See note (2) */
SVGA3D_DEVCAP_QUERY_TYPES
=
15
,
SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING
=
16
,
SVGA3D_DEVCAP_MAX_POINT_SIZE
=
17
,
SVGA3D_DEVCAP_MAX_SHADER_TEXTURES
=
18
,
SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH
=
19
,
SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT
=
20
,
SVGA3D_DEVCAP_MAX_VOLUME_EXTENT
=
21
,
SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT
=
22
,
SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO
=
23
,
SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY
=
24
,
SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT
=
25
,
SVGA3D_DEVCAP_MAX_VERTEX_INDEX
=
26
,
SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS
=
27
,
SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS
=
28
,
SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS
=
29
,
SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS
=
30
,
SVGA3D_DEVCAP_TEXTURE_OPS
=
31
,
SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8
=
32
,
SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8
=
33
,
SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10
=
34
,
SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5
=
35
,
SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5
=
36
,
SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4
=
37
,
SVGA3D_DEVCAP_SURFACEFMT_R5G6B5
=
38
,
SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16
=
39
,
SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8
=
40
,
SVGA3D_DEVCAP_SURFACEFMT_ALPHA8
=
41
,
SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8
=
42
,
SVGA3D_DEVCAP_SURFACEFMT_Z_D16
=
43
,
SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8
=
44
,
SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8
=
45
,
SVGA3D_DEVCAP_SURFACEFMT_DXT1
=
46
,
SVGA3D_DEVCAP_SURFACEFMT_DXT2
=
47
,
SVGA3D_DEVCAP_SURFACEFMT_DXT3
=
48
,
SVGA3D_DEVCAP_SURFACEFMT_DXT4
=
49
,
SVGA3D_DEVCAP_SURFACEFMT_DXT5
=
50
,
SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8
=
51
,
SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10
=
52
,
SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8
=
53
,
SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8
=
54
,
SVGA3D_DEVCAP_SURFACEFMT_CxV8U8
=
55
,
SVGA3D_DEVCAP_SURFACEFMT_R_S10E5
=
56
,
SVGA3D_DEVCAP_SURFACEFMT_R_S23E8
=
57
,
SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5
=
58
,
SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8
=
59
,
SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5
=
60
,
SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8
=
61
,
SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES
=
63
,
/*
* Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
* render targets. This does no include the depth or stencil targets.
*/
SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS
=
64
,
SVGA3D_DEVCAP_SURFACEFMT_V16U16
=
65
,
SVGA3D_DEVCAP_SURFACEFMT_G16R16
=
66
,
SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16
=
67
,
SVGA3D_DEVCAP_SURFACEFMT_UYVY
=
68
,
SVGA3D_DEVCAP_SURFACEFMT_YUY2
=
69
,
/*
* Don't add new caps into the previous section; the values in this
* enumeration must not change. You can put new values right before
* SVGA3D_DEVCAP_MAX.
*/
SVGA3D_DEVCAP_MAX
/* This must be the last index. */
}
SVGA3dDevCapIndex
;
typedef
union
{
Bool
b
;
uint32
u
;
int32
i
;
float
f
;
}
SVGA3dDevCapResult
;
#endif
/* _SVGA3D_REG_H_ */
drivers/gpu/drm/vmwgfx/svga_escape.h
0 → 100644
View file @
cbc8cc04
/**********************************************************
* Copyright 2007-2009 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
/*
* svga_escape.h --
*
* Definitions for our own (vendor-specific) SVGA Escape commands.
*/
#ifndef _SVGA_ESCAPE_H_
#define _SVGA_ESCAPE_H_
/*
* Namespace IDs for the escape command
*/
#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
#define SVGA_ESCAPE_NSID_DEVEL 0xFFFFFFFF
/*
* Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
* the first DWORD of escape data (after the nsID and size). As a
* guideline we're using the high word and low word as a major and
* minor command number, respectively.
*
* Major command number allocation:
*
* 0000: Reserved
* 0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
* 0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
* 0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
*/
#define SVGA_ESCAPE_VMWARE_MAJOR_MASK 0xFFFF0000
/*
* SVGA Hint commands.
*
* These escapes let the SVGA driver provide optional information to
* he host about the state of the guest or guest applications. The
* host can use these hints to make user interface or performance
* decisions.
*
* Notes:
*
* - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
* that use the SVGA Screen Object extension. Instead of sending
* this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
* Screen Object.
*/
#define SVGA_ESCAPE_VMWARE_HINT 0x00030000
#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 // Deprecated
typedef
struct
{
uint32
command
;
uint32
fullscreen
;
struct
{
int32
x
,
y
;
}
monitorPosition
;
}
SVGAEscapeHintFullscreen
;
#endif
/* _SVGA_ESCAPE_H_ */
drivers/gpu/drm/vmwgfx/svga_overlay.h
0 → 100644
View file @
cbc8cc04
/**********************************************************
* Copyright 2007-2009 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
/*
* svga_overlay.h --
*
* Definitions for video-overlay support.
*/
#ifndef _SVGA_OVERLAY_H_
#define _SVGA_OVERLAY_H_
#include "svga_reg.h"
/*
* Video formats we support
*/
#define VMWARE_FOURCC_YV12 0x32315659 // 'Y' 'V' '1' '2'
#define VMWARE_FOURCC_YUY2 0x32595559 // 'Y' 'U' 'Y' '2'
#define VMWARE_FOURCC_UYVY 0x59565955 // 'U' 'Y' 'V' 'Y'
typedef
enum
{
SVGA_OVERLAY_FORMAT_INVALID
=
0
,
SVGA_OVERLAY_FORMAT_YV12
=
VMWARE_FOURCC_YV12
,
SVGA_OVERLAY_FORMAT_YUY2
=
VMWARE_FOURCC_YUY2
,
SVGA_OVERLAY_FORMAT_UYVY
=
VMWARE_FOURCC_UYVY
,
}
SVGAOverlayFormat
;
#define SVGA_VIDEO_COLORKEY_MASK 0x00ffffff
#define SVGA_ESCAPE_VMWARE_VIDEO 0x00020000
#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS 0x00020001
/* FIFO escape layout:
* Type, Stream Id, (Register Id, Value) pairs */
#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH 0x00020002
/* FIFO escape layout:
* Type, Stream Id */
typedef
struct
SVGAEscapeVideoSetRegs
{
struct
{
uint32
cmdType
;
uint32
streamId
;
}
header
;
// May include zero or more items.
struct
{
uint32
registerId
;
uint32
value
;
}
items
[
1
];
}
SVGAEscapeVideoSetRegs
;
typedef
struct
SVGAEscapeVideoFlush
{
uint32
cmdType
;
uint32
streamId
;
}
SVGAEscapeVideoFlush
;
/*
* Struct definitions for the video overlay commands built on
* SVGAFifoCmdEscape.
*/
typedef
struct
{
uint32
command
;
uint32
overlay
;
}
SVGAFifoEscapeCmdVideoBase
;
typedef
struct
{
SVGAFifoEscapeCmdVideoBase
videoCmd
;
}
SVGAFifoEscapeCmdVideoFlush
;
typedef
struct
{
SVGAFifoEscapeCmdVideoBase
videoCmd
;
struct
{
uint32
regId
;
uint32
value
;
}
items
[
1
];
}
SVGAFifoEscapeCmdVideoSetRegs
;
typedef
struct
{
SVGAFifoEscapeCmdVideoBase
videoCmd
;
struct
{
uint32
regId
;
uint32
value
;
}
items
[
SVGA_VIDEO_NUM_REGS
];
}
SVGAFifoEscapeCmdVideoSetAllRegs
;
/*
*----------------------------------------------------------------------
*
* VMwareVideoGetAttributes --
*
* Computes the size, pitches and offsets for YUV frames.
*
* Results:
* TRUE on success; otherwise FALSE on failure.
*
* Side effects:
* Pitches and offsets for the given YUV frame are put in 'pitches'
* and 'offsets' respectively. They are both optional though.
*
*----------------------------------------------------------------------
*/
static
inline
bool
VMwareVideoGetAttributes
(
const
SVGAOverlayFormat
format
,
// IN
uint32
*
width
,
// IN / OUT
uint32
*
height
,
// IN / OUT
uint32
*
size
,
// OUT
uint32
*
pitches
,
// OUT (optional)
uint32
*
offsets
)
// OUT (optional)
{
int
tmp
;
*
width
=
(
*
width
+
1
)
&
~
1
;
if
(
offsets
)
{
offsets
[
0
]
=
0
;
}
switch
(
format
)
{
case
VMWARE_FOURCC_YV12
:
*
height
=
(
*
height
+
1
)
&
~
1
;
*
size
=
(
*
width
+
3
)
&
~
3
;
if
(
pitches
)
{
pitches
[
0
]
=
*
size
;
}
*
size
*=
*
height
;
if
(
offsets
)
{
offsets
[
1
]
=
*
size
;
}
tmp
=
((
*
width
>>
1
)
+
3
)
&
~
3
;
if
(
pitches
)
{
pitches
[
1
]
=
pitches
[
2
]
=
tmp
;
}
tmp
*=
(
*
height
>>
1
);
*
size
+=
tmp
;
if
(
offsets
)
{
offsets
[
2
]
=
*
size
;
}
*
size
+=
tmp
;
break
;
case
VMWARE_FOURCC_YUY2
:
case
VMWARE_FOURCC_UYVY
:
*
size
=
*
width
*
2
;
if
(
pitches
)
{
pitches
[
0
]
=
*
size
;
}
*
size
*=
*
height
;
break
;
default:
return
false
;
}
return
true
;
}
#endif // _SVGA_OVERLAY_H_
drivers/gpu/drm/vmwgfx/svga_reg.h
0 → 100644
View file @
cbc8cc04
/**********************************************************
* Copyright 1998-2009 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
/*
* svga_reg.h --
*
* Virtual hardware definitions for the VMware SVGA II device.
*/
#ifndef _SVGA_REG_H_
#define _SVGA_REG_H_
/*
* PCI device IDs.
*/
#define PCI_VENDOR_ID_VMWARE 0x15AD
#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
/*
* Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
* cursor bypass mode. This is still supported, but no new guest
* drivers should use it.
*/
#define SVGA_CURSOR_ON_HIDE 0x0
/* Must be 0 to maintain backward compatibility */
#define SVGA_CURSOR_ON_SHOW 0x1
/* Must be 1 to maintain backward compatibility */
#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2
/* Remove the cursor from the framebuffer because we need to see what's under it */
#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3
/* Put the cursor back in the framebuffer so the user can see it */
/*
* The maximum framebuffer size that can traced for e.g. guests in VESA mode.
* The changeMap in the monitor is proportional to this number. Therefore, we'd
* like to keep it as small as possible to reduce monitor overhead (using
* SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
* 4k!).
*
* NB: For compatibility reasons, this value must be greater than 0xff0000.
* See bug 335072.
*/
#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000
#define SVGA_MAX_PSEUDOCOLOR_DEPTH 8
#define SVGA_MAX_PSEUDOCOLORS (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
#define SVGA_NUM_PALETTE_REGS (3 * SVGA_MAX_PSEUDOCOLORS)
#define SVGA_MAGIC 0x900000UL
#define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver))
/* Version 2 let the address of the frame buffer be unsigned on Win32 */
#define SVGA_VERSION_2 2
#define SVGA_ID_2 SVGA_MAKE_ID(SVGA_VERSION_2)
/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
PALETTE_BASE has moved */
#define SVGA_VERSION_1 1
#define SVGA_ID_1 SVGA_MAKE_ID(SVGA_VERSION_1)
/* Version 0 is the initial version */
#define SVGA_VERSION_0 0
#define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0)
/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
#define SVGA_ID_INVALID 0xFFFFFFFF
/* Port offsets, relative to BAR0 */
#define SVGA_INDEX_PORT 0x0
#define SVGA_VALUE_PORT 0x1
#define SVGA_BIOS_PORT 0x2
#define SVGA_IRQSTATUS_PORT 0x8
/*
* Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
*
* Interrupts are only supported when the
* SVGA_CAP_IRQMASK capability is present.
*/
#define SVGA_IRQFLAG_ANY_FENCE 0x1
/* Any fence was passed */
#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2
/* Made forward progress in the FIFO */
#define SVGA_IRQFLAG_FENCE_GOAL 0x4
/* SVGA_FIFO_FENCE_GOAL reached */
/*
* Registers
*/
enum
{
SVGA_REG_ID
=
0
,
SVGA_REG_ENABLE
=
1
,
SVGA_REG_WIDTH
=
2
,
SVGA_REG_HEIGHT
=
3
,
SVGA_REG_MAX_WIDTH
=
4
,
SVGA_REG_MAX_HEIGHT
=
5
,
SVGA_REG_DEPTH
=
6
,
SVGA_REG_BITS_PER_PIXEL
=
7
,
/* Current bpp in the guest */
SVGA_REG_PSEUDOCOLOR
=
8
,
SVGA_REG_RED_MASK
=
9
,
SVGA_REG_GREEN_MASK
=
10
,
SVGA_REG_BLUE_MASK
=
11
,
SVGA_REG_BYTES_PER_LINE
=
12
,
SVGA_REG_FB_START
=
13
,
/* (Deprecated) */
SVGA_REG_FB_OFFSET
=
14
,
SVGA_REG_VRAM_SIZE
=
15
,
SVGA_REG_FB_SIZE
=
16
,
/* ID 0 implementation only had the above registers, then the palette */
SVGA_REG_CAPABILITIES
=
17
,
SVGA_REG_MEM_START
=
18
,
/* (Deprecated) */
SVGA_REG_MEM_SIZE
=
19
,
SVGA_REG_CONFIG_DONE
=
20
,
/* Set when memory area configured */
SVGA_REG_SYNC
=
21
,
/* See "FIFO Synchronization Registers" */
SVGA_REG_BUSY
=
22
,
/* See "FIFO Synchronization Registers" */
SVGA_REG_GUEST_ID
=
23
,
/* Set guest OS identifier */
SVGA_REG_CURSOR_ID
=
24
,
/* (Deprecated) */
SVGA_REG_CURSOR_X
=
25
,
/* (Deprecated) */
SVGA_REG_CURSOR_Y
=
26
,
/* (Deprecated) */
SVGA_REG_CURSOR_ON
=
27
,
/* (Deprecated) */
SVGA_REG_HOST_BITS_PER_PIXEL
=
28
,
/* (Deprecated) */
SVGA_REG_SCRATCH_SIZE
=
29
,
/* Number of scratch registers */
SVGA_REG_MEM_REGS
=
30
,
/* Number of FIFO registers */
SVGA_REG_NUM_DISPLAYS
=
31
,
/* (Deprecated) */
SVGA_REG_PITCHLOCK
=
32
,
/* Fixed pitch for all modes */
SVGA_REG_IRQMASK
=
33
,
/* Interrupt mask */
/* Legacy multi-monitor support */
SVGA_REG_NUM_GUEST_DISPLAYS
=
34
,
/* Number of guest displays in X/Y direction */
SVGA_REG_DISPLAY_ID
=
35
,
/* Display ID for the following display attributes */
SVGA_REG_DISPLAY_IS_PRIMARY
=
36
,
/* Whether this is a primary display */
SVGA_REG_DISPLAY_POSITION_X
=
37
,
/* The display position x */
SVGA_REG_DISPLAY_POSITION_Y
=
38
,
/* The display position y */
SVGA_REG_DISPLAY_WIDTH
=
39
,
/* The display's width */
SVGA_REG_DISPLAY_HEIGHT
=
40
,
/* The display's height */
/* See "Guest memory regions" below. */
SVGA_REG_GMR_ID
=
41
,
SVGA_REG_GMR_DESCRIPTOR
=
42
,
SVGA_REG_GMR_MAX_IDS
=
43
,
SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH
=
44
,
SVGA_REG_TRACES
=
45
,
/* Enable trace-based updates even when FIFO is on */
SVGA_REG_TOP
=
46
,
/* Must be 1 more than the last register */
SVGA_PALETTE_BASE
=
1024
,
/* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
SVGA_SCRATCH_BASE
=
SVGA_PALETTE_BASE
+
SVGA_NUM_PALETTE_REGS
/* Base of scratch registers */
/* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
First 4 are reserved for VESA BIOS Extension; any remaining are for
the use of the current SVGA driver. */
};
/*
* Guest memory regions (GMRs):
*
* This is a new memory mapping feature available in SVGA devices
* which have the SVGA_CAP_GMR bit set. Previously, there were two
* fixed memory regions available with which to share data between the
* device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
* are our name for an extensible way of providing arbitrary DMA
* buffers for use between the driver and the SVGA device. They are a
* new alternative to framebuffer memory, usable for both 2D and 3D
* graphics operations.
*
* Since GMR mapping must be done synchronously with guest CPU
* execution, we use a new pair of SVGA registers:
*
* SVGA_REG_GMR_ID --
*
* Read/write.
* This register holds the 32-bit ID (a small positive integer)
* of a GMR to create, delete, or redefine. Writing this register
* has no side-effects.
*
* SVGA_REG_GMR_DESCRIPTOR --
*
* Write-only.
* Writing this register will create, delete, or redefine the GMR
* specified by the above ID register. If this register is zero,
* the GMR is deleted. Any pointers into this GMR (including those
* currently being processed by FIFO commands) will be
* synchronously invalidated.
*
* If this register is nonzero, it must be the physical page
* number (PPN) of a data structure which describes the physical
* layout of the memory region this GMR should describe. The
* descriptor structure will be read synchronously by the SVGA
* device when this register is written. The descriptor need not
* remain allocated for the lifetime of the GMR.
*
* The guest driver should write SVGA_REG_GMR_ID first, then
* SVGA_REG_GMR_DESCRIPTOR.
*
* SVGA_REG_GMR_MAX_IDS --
*
* Read-only.
* The SVGA device may choose to support a maximum number of
* user-defined GMR IDs. This register holds the number of supported
* IDs. (The maximum supported ID plus 1)
*
* SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
*
* Read-only.
* The SVGA device may choose to put a limit on the total number
* of SVGAGuestMemDescriptor structures it will read when defining
* a single GMR.
*
* The descriptor structure is an array of SVGAGuestMemDescriptor
* structures. Each structure may do one of three things:
*
* - Terminate the GMR descriptor list.
* (ppn==0, numPages==0)
*
* - Add a PPN or range of PPNs to the GMR's virtual address space.
* (ppn != 0, numPages != 0)
*
* - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
* support multi-page GMR descriptor tables without forcing the
* driver to allocate physically contiguous memory.
* (ppn != 0, numPages == 0)
*
* Note that each physical page of SVGAGuestMemDescriptor structures
* can describe at least 2MB of guest memory. If the driver needs to
* use more than one page of descriptor structures, it must use one of
* its SVGAGuestMemDescriptors to point to an additional page. The
* device will never automatically cross a page boundary.
*
* Once the driver has described a GMR, it is immediately available
* for use via any FIFO command that uses an SVGAGuestPtr structure.
* These pointers include a GMR identifier plus an offset into that
* GMR.
*
* The driver must check the SVGA_CAP_GMR bit before using the GMR
* registers.
*/
/*
* Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
* memory as well. In the future, these IDs could even be used to
* allow legacy memory regions to be redefined by the guest as GMRs.
*
* Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
* is being phased out. Please try to use user-defined GMRs whenever
* possible.
*/
#define SVGA_GMR_NULL ((uint32) -1)
#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) // Guest Framebuffer (GFB)
typedef
struct
SVGAGuestMemDescriptor
{
uint32
ppn
;
uint32
numPages
;
}
SVGAGuestMemDescriptor
;
typedef
struct
SVGAGuestPtr
{
uint32
gmrId
;
uint32
offset
;
}
SVGAGuestPtr
;
/*
* SVGAGMRImageFormat --
*
* This is a packed representation of the source 2D image format
* for a GMR-to-screen blit. Currently it is defined as an encoding
* of the screen's color depth and bits-per-pixel, however, 16 bits
* are reserved for future use to identify other encodings (such as
* RGBA or higher-precision images).
*
* Currently supported formats:
*
* bpp depth Format Name
* --- ----- -----------
* 32 24 32-bit BGRX
* 24 24 24-bit BGR
* 16 16 RGB 5-6-5
* 16 15 RGB 5-5-5
*
*/
typedef
struct
SVGAGMRImageFormat
{
union
{
struct
{
uint32
bitsPerPixel
:
8
;
uint32
colorDepth
:
8
;
uint32
reserved
:
16
;
// Must be zero
};
uint32
value
;
};
}
SVGAGMRImageFormat
;
/*
* SVGAColorBGRX --
*
* A 24-bit color format (BGRX), which does not depend on the
* format of the legacy guest framebuffer (GFB) or the current
* GMRFB state.
*/
typedef
struct
SVGAColorBGRX
{
union
{
struct
{
uint32
b
:
8
;
uint32
g
:
8
;
uint32
r
:
8
;
uint32
x
:
8
;
// Unused
};
uint32
value
;
};
}
SVGAColorBGRX
;
/*
* SVGASignedRect --
* SVGASignedPoint --
*
* Signed rectangle and point primitives. These are used by the new
* 2D primitives for drawing to Screen Objects, which can occupy a
* signed virtual coordinate space.
*
* SVGASignedRect specifies a half-open interval: the (left, top)
* pixel is part of the rectangle, but the (right, bottom) pixel is
* not.
*/
typedef
struct
SVGASignedRect
{
int32
left
;
int32
top
;
int32
right
;
int32
bottom
;
}
SVGASignedRect
;
typedef
struct
SVGASignedPoint
{
int32
x
;
int32
y
;
}
SVGASignedPoint
;
/*
* Capabilities
*
* Note the holes in the bitfield. Missing bits have been deprecated,
* and must not be reused. Those capabilities will never be reported
* by new versions of the SVGA device.
*/
#define SVGA_CAP_NONE 0x00000000
#define SVGA_CAP_RECT_COPY 0x00000002
#define SVGA_CAP_CURSOR 0x00000020
#define SVGA_CAP_CURSOR_BYPASS 0x00000040 // Legacy (Use Cursor Bypass 3 instead)
#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 // Legacy (Use Cursor Bypass 3 instead)
#define SVGA_CAP_8BIT_EMULATION 0x00000100
#define SVGA_CAP_ALPHA_CURSOR 0x00000200
#define SVGA_CAP_3D 0x00004000
#define SVGA_CAP_EXTENDED_FIFO 0x00008000
#define SVGA_CAP_MULTIMON 0x00010000 // Legacy multi-monitor support
#define SVGA_CAP_PITCHLOCK 0x00020000
#define SVGA_CAP_IRQMASK 0x00040000
#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 // Legacy multi-monitor support
#define SVGA_CAP_GMR 0x00100000
#define SVGA_CAP_TRACES 0x00200000
/*
* FIFO register indices.
*
* The FIFO is a chunk of device memory mapped into guest physmem. It
* is always treated as 32-bit words.
*
* The guest driver gets to decide how to partition it between
* - FIFO registers (there are always at least 4, specifying where the
* following data area is and how much data it contains; there may be
* more registers following these, depending on the FIFO protocol
* version in use)
* - FIFO data, written by the guest and slurped out by the VMX.
* These indices are 32-bit word offsets into the FIFO.
*/
enum
{
/*
* Block 1 (basic registers): The originally defined FIFO registers.
* These exist and are valid for all versions of the FIFO protocol.
*/
SVGA_FIFO_MIN
=
0
,
SVGA_FIFO_MAX
,
/* The distance from MIN to MAX must be at least 10K */
SVGA_FIFO_NEXT_CMD
,
SVGA_FIFO_STOP
,
/*
* Block 2 (extended registers): Mandatory registers for the extended
* FIFO. These exist if the SVGA caps register includes
* SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
* associated capability bit is enabled.
*
* Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
* support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
* This means that the guest has to test individually (in most cases
* using FIFO caps) for the presence of registers after this; the VMX
* can define "extended FIFO" to mean whatever it wants, and currently
* won't enable it unless there's room for that set and much more.
*/
SVGA_FIFO_CAPABILITIES
=
4
,
SVGA_FIFO_FLAGS
,
// Valid with SVGA_FIFO_CAP_FENCE:
SVGA_FIFO_FENCE
,
/*
* Block 3a (optional extended registers): Additional registers for the
* extended FIFO, whose presence isn't actually implied by
* SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
* leave room for them.
*
* These in block 3a, the VMX currently considers mandatory for the
* extended FIFO.
*/
// Valid if exists (i.e. if extended FIFO enabled):
SVGA_FIFO_3D_HWVERSION
,
/* See SVGA3dHardwareVersion in svga3d_reg.h */
// Valid with SVGA_FIFO_CAP_PITCHLOCK:
SVGA_FIFO_PITCHLOCK
,
// Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3:
SVGA_FIFO_CURSOR_ON
,
/* Cursor bypass 3 show/hide register */
SVGA_FIFO_CURSOR_X
,
/* Cursor bypass 3 x register */
SVGA_FIFO_CURSOR_Y
,
/* Cursor bypass 3 y register */
SVGA_FIFO_CURSOR_COUNT
,
/* Incremented when any of the other 3 change */
SVGA_FIFO_CURSOR_LAST_UPDATED
,
/* Last time the host updated the cursor */
// Valid with SVGA_FIFO_CAP_RESERVE:
SVGA_FIFO_RESERVED
,
/* Bytes past NEXT_CMD with real contents */
/*
* Valid with SVGA_FIFO_CAP_SCREEN_OBJECT:
*
* By default this is SVGA_ID_INVALID, to indicate that the cursor
* coordinates are specified relative to the virtual root. If this
* is set to a specific screen ID, cursor position is reinterpreted
* as a signed offset relative to that screen's origin. This is the
* only way to place the cursor on a non-rooted screen.
*/
SVGA_FIFO_CURSOR_SCREEN_ID
,
/*
* XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
* registers, but this must be done carefully and with judicious use of
* capability bits, since comparisons based on SVGA_FIFO_MIN aren't
* enough to tell you whether the register exists: we've shipped drivers
* and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
* the earlier ones. The actual order of introduction was:
* - PITCHLOCK
* - 3D_CAPS
* - CURSOR_* (cursor bypass 3)
* - RESERVED
* So, code that wants to know whether it can use any of the
* aforementioned registers, or anything else added after PITCHLOCK and
* before 3D_CAPS, needs to reason about something other than
* SVGA_FIFO_MIN.
*/
/*
* 3D caps block space; valid with 3D hardware version >=
* SVGA3D_HWVERSION_WS6_B1.
*/
SVGA_FIFO_3D_CAPS
=
32
,
SVGA_FIFO_3D_CAPS_LAST
=
32
+
255
,
/*
* End of VMX's current definition of "extended-FIFO registers".
* Registers before here are always enabled/disabled as a block; either
* the extended FIFO is enabled and includes all preceding registers, or
* it's disabled entirely.
*
* Block 3b (truly optional extended registers): Additional registers for
* the extended FIFO, which the VMX already knows how to enable and
* disable with correct granularity.
*
* Registers after here exist if and only if the guest SVGA driver
* sets SVGA_FIFO_MIN high enough to leave room for them.
*/
// Valid if register exists:
SVGA_FIFO_GUEST_3D_HWVERSION
,
/* Guest driver's 3D version */
SVGA_FIFO_FENCE_GOAL
,
/* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
SVGA_FIFO_BUSY
,
/* See "FIFO Synchronization Registers" */
/*
* Always keep this last. This defines the maximum number of
* registers we know about. At power-on, this value is placed in
* the SVGA_REG_MEM_REGS register, and we expect the guest driver
* to allocate this much space in FIFO memory for registers.
*/
SVGA_FIFO_NUM_REGS
};
/*
* Definition of registers included in extended FIFO support.
*
* The guest SVGA driver gets to allocate the FIFO between registers
* and data. It must always allocate at least 4 registers, but old
* drivers stopped there.
*
* The VMX will enable extended FIFO support if and only if the guest
* left enough room for all registers defined as part of the mandatory
* set for the extended FIFO.
*
* Note that the guest drivers typically allocate the FIFO only at
* initialization time, not at mode switches, so it's likely that the
* number of FIFO registers won't change without a reboot.
*
* All registers less than this value are guaranteed to be present if
* svgaUser->fifo.extended is set. Any later registers must be tested
* individually for compatibility at each use (in the VMX).
*
* This value is used only by the VMX, so it can change without
* affecting driver compatibility; keep it that way?
*/
#define SVGA_FIFO_EXTENDED_MANDATORY_REGS (SVGA_FIFO_3D_CAPS_LAST + 1)
/*
* FIFO Synchronization Registers
*
* This explains the relationship between the various FIFO
* sync-related registers in IOSpace and in FIFO space.
*
* SVGA_REG_SYNC --
*
* The SYNC register can be used in two different ways by the guest:
*
* 1. If the guest wishes to fully sync (drain) the FIFO,
* it will write once to SYNC then poll on the BUSY
* register. The FIFO is sync'ed once BUSY is zero.
*
* 2. If the guest wants to asynchronously wake up the host,
* it will write once to SYNC without polling on BUSY.
* Ideally it will do this after some new commands have
* been placed in the FIFO, and after reading a zero
* from SVGA_FIFO_BUSY.
*
* (1) is the original behaviour that SYNC was designed to
* support. Originally, a write to SYNC would implicitly
* trigger a read from BUSY. This causes us to synchronously
* process the FIFO.
*
* This behaviour has since been changed so that writing SYNC
* will *not* implicitly cause a read from BUSY. Instead, it
* makes a channel call which asynchronously wakes up the MKS
* thread.
*
* New guests can use this new behaviour to implement (2)
* efficiently. This lets guests get the host's attention
* without waiting for the MKS to poll, which gives us much
* better CPU utilization on SMP hosts and on UP hosts while
* we're blocked on the host GPU.
*
* Old guests shouldn't notice the behaviour change. SYNC was
* never guaranteed to process the entire FIFO, since it was
* bounded to a particular number of CPU cycles. Old guests will
* still loop on the BUSY register until the FIFO is empty.
*
* Writing to SYNC currently has the following side-effects:
*
* - Sets SVGA_REG_BUSY to TRUE (in the monitor)
* - Asynchronously wakes up the MKS thread for FIFO processing
* - The value written to SYNC is recorded as a "reason", for
* stats purposes.
*
* If SVGA_FIFO_BUSY is available, drivers are advised to only
* write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
* SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
* eventually set SVGA_FIFO_BUSY on its own, but this approach
* lets the driver avoid sending multiple asynchronous wakeup
* messages to the MKS thread.
*
* SVGA_REG_BUSY --
*
* This register is set to TRUE when SVGA_REG_SYNC is written,
* and it reads as FALSE when the FIFO has been completely
* drained.
*
* Every read from this register causes us to synchronously
* process FIFO commands. There is no guarantee as to how many
* commands each read will process.
*
* CPU time spent processing FIFO commands will be billed to
* the guest.
*
* New drivers should avoid using this register unless they
* need to guarantee that the FIFO is completely drained. It
* is overkill for performing a sync-to-fence. Older drivers
* will use this register for any type of synchronization.
*
* SVGA_FIFO_BUSY --
*
* This register is a fast way for the guest driver to check
* whether the FIFO is already being processed. It reads and
* writes at normal RAM speeds, with no monitor intervention.
*
* If this register reads as TRUE, the host is guaranteeing that
* any new commands written into the FIFO will be noticed before
* the MKS goes back to sleep.
*
* If this register reads as FALSE, no such guarantee can be
* made.
*
* The guest should use this register to quickly determine
* whether or not it needs to wake up the host. If the guest
* just wrote a command or group of commands that it would like
* the host to begin processing, it should:
*
* 1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
* action is necessary.
*
* 2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
* code that we've already sent a SYNC to the host and we
* don't need to send a duplicate.
*
* 3. Write a reason to SVGA_REG_SYNC. This will send an
* asynchronous wakeup to the MKS thread.
*/
/*
* FIFO Capabilities
*
* Fence -- Fence register and command are supported
* Accel Front -- Front buffer only commands are supported
* Pitch Lock -- Pitch lock register is supported
* Video -- SVGA Video overlay units are supported
* Escape -- Escape command is supported
*
* XXX: Add longer descriptions for each capability, including a list
* of the new features that each capability provides.
*
* SVGA_FIFO_CAP_SCREEN_OBJECT --
*
* Provides dynamic multi-screen rendering, for improved Unity and
* multi-monitor modes. With Screen Object, the guest can
* dynamically create and destroy 'screens', which can represent
* Unity windows or virtual monitors. Screen Object also provides
* strong guarantees that DMA operations happen only when
* guest-initiated. Screen Object deprecates the BAR1 guest
* framebuffer (GFB) and all commands that work only with the GFB.
*
* New registers:
* FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
*
* New 2D commands:
* DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
* BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
*
* New 3D commands:
* BLIT_SURFACE_TO_SCREEN
*
* New guarantees:
*
* - The host will not read or write guest memory, including the GFB,
* except when explicitly initiated by a DMA command.
*
* - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
* is guaranteed to complete before any subsequent FENCEs.
*
* - All legacy commands which affect a Screen (UPDATE, PRESENT,
* PRESENT_READBACK) as well as new Screen blit commands will
* all behave consistently as blits, and memory will be read
* or written in FIFO order.
*
* For example, if you PRESENT from one SVGA3D surface to multiple
* places on the screen, the data copied will always be from the
* SVGA3D surface at the time the PRESENT was issued in the FIFO.
* This was not necessarily true on devices without Screen Object.
*
* This means that on devices that support Screen Object, the
* PRESENT_READBACK command should not be necessary unless you
* actually want to read back the results of 3D rendering into
* system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
* command provides a strict superset of functionality.)
*
* - When a screen is resized, either using Screen Object commands or
* legacy multimon registers, its contents are preserved.
*/
#define SVGA_FIFO_CAP_NONE 0
#define SVGA_FIFO_CAP_FENCE (1<<0)
#define SVGA_FIFO_CAP_ACCELFRONT (1<<1)
#define SVGA_FIFO_CAP_PITCHLOCK (1<<2)
#define SVGA_FIFO_CAP_VIDEO (1<<3)
#define SVGA_FIFO_CAP_CURSOR_BYPASS_3 (1<<4)
#define SVGA_FIFO_CAP_ESCAPE (1<<5)
#define SVGA_FIFO_CAP_RESERVE (1<<6)
#define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7)
/*
* FIFO Flags
*
* Accel Front -- Driver should use front buffer only commands
*/
#define SVGA_FIFO_FLAG_NONE 0
#define SVGA_FIFO_FLAG_ACCELFRONT (1<<0)
#define SVGA_FIFO_FLAG_RESERVED (1<<31) // Internal use only
/*
* FIFO reservation sentinel value
*/
#define SVGA_FIFO_RESERVED_UNKNOWN 0xffffffff
/*
* Video overlay support
*/
#define SVGA_NUM_OVERLAY_UNITS 32
/*
* Video capabilities that the guest is currently using
*/
#define SVGA_VIDEO_FLAG_COLORKEY 0x0001
/*
* Offsets for the video overlay registers
*/
enum
{
SVGA_VIDEO_ENABLED
=
0
,
SVGA_VIDEO_FLAGS
,
SVGA_VIDEO_DATA_OFFSET
,
SVGA_VIDEO_FORMAT
,
SVGA_VIDEO_COLORKEY
,
SVGA_VIDEO_SIZE
,
// Deprecated
SVGA_VIDEO_WIDTH
,
SVGA_VIDEO_HEIGHT
,
SVGA_VIDEO_SRC_X
,
SVGA_VIDEO_SRC_Y
,
SVGA_VIDEO_SRC_WIDTH
,
SVGA_VIDEO_SRC_HEIGHT
,
SVGA_VIDEO_DST_X
,
// Signed int32
SVGA_VIDEO_DST_Y
,
// Signed int32
SVGA_VIDEO_DST_WIDTH
,
SVGA_VIDEO_DST_HEIGHT
,
SVGA_VIDEO_PITCH_1
,
SVGA_VIDEO_PITCH_2
,
SVGA_VIDEO_PITCH_3
,
SVGA_VIDEO_DATA_GMRID
,
// Optional, defaults to SVGA_GMR_FRAMEBUFFER
SVGA_VIDEO_DST_SCREEN_ID
,
// Optional, defaults to virtual coords (SVGA_ID_INVALID)
SVGA_VIDEO_NUM_REGS
};
/*
* SVGA Overlay Units
*
* width and height relate to the entire source video frame.
* srcX, srcY, srcWidth and srcHeight represent subset of the source
* video frame to be displayed.
*/
typedef
struct
SVGAOverlayUnit
{
uint32
enabled
;
uint32
flags
;
uint32
dataOffset
;
uint32
format
;
uint32
colorKey
;
uint32
size
;
uint32
width
;
uint32
height
;
uint32
srcX
;
uint32
srcY
;
uint32
srcWidth
;
uint32
srcHeight
;
int32
dstX
;
int32
dstY
;
uint32
dstWidth
;
uint32
dstHeight
;
uint32
pitches
[
3
];
uint32
dataGMRId
;
uint32
dstScreenId
;
}
SVGAOverlayUnit
;
/*
* SVGAScreenObject --
*
* This is a new way to represent a guest's multi-monitor screen or
* Unity window. Screen objects are only supported if the
* SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
*
* If Screen Objects are supported, they can be used to fully
* replace the functionality provided by the framebuffer registers
* (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
*
* The screen object is a struct with guaranteed binary
* compatibility. New flags can be added, and the struct may grow,
* but existing fields must retain their meaning.
*
*/
#define SVGA_SCREEN_HAS_ROOT (1 << 0) // Screen is present in the virtual coord space
#define SVGA_SCREEN_IS_PRIMARY (1 << 1) // Guest considers this screen to be 'primary'
#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) // Guest is running a fullscreen app here
typedef
struct
SVGAScreenObject
{
uint32
structSize
;
// sizeof(SVGAScreenObject)
uint32
id
;
uint32
flags
;
struct
{
uint32
width
;
uint32
height
;
}
size
;
struct
{
int32
x
;
int32
y
;
}
root
;
// Only used if SVGA_SCREEN_HAS_ROOT is set.
}
SVGAScreenObject
;
/*
* Commands in the command FIFO:
*
* Command IDs defined below are used for the traditional 2D FIFO
* communication (not all commands are available for all versions of the
* SVGA FIFO protocol).
*
* Note the holes in the command ID numbers: These commands have been
* deprecated, and the old IDs must not be reused.
*
* Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
* protocol.
*
* Each command's parameters are described by the comments and
* structs below.
*/
typedef
enum
{
SVGA_CMD_INVALID_CMD
=
0
,
SVGA_CMD_UPDATE
=
1
,
SVGA_CMD_RECT_COPY
=
3
,
SVGA_CMD_DEFINE_CURSOR
=
19
,
SVGA_CMD_DEFINE_ALPHA_CURSOR
=
22
,
SVGA_CMD_UPDATE_VERBOSE
=
25
,
SVGA_CMD_FRONT_ROP_FILL
=
29
,
SVGA_CMD_FENCE
=
30
,
SVGA_CMD_ESCAPE
=
33
,
SVGA_CMD_DEFINE_SCREEN
=
34
,
SVGA_CMD_DESTROY_SCREEN
=
35
,
SVGA_CMD_DEFINE_GMRFB
=
36
,
SVGA_CMD_BLIT_GMRFB_TO_SCREEN
=
37
,
SVGA_CMD_BLIT_SCREEN_TO_GMRFB
=
38
,
SVGA_CMD_ANNOTATION_FILL
=
39
,
SVGA_CMD_ANNOTATION_COPY
=
40
,
SVGA_CMD_MAX
}
SVGAFifoCmdId
;
#define SVGA_CMD_MAX_ARGS 64
/*
* SVGA_CMD_UPDATE --
*
* This is a DMA transfer which copies from the Guest Framebuffer
* (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
* intersect with the provided virtual rectangle.
*
* This command does not support using arbitrary guest memory as a
* data source- it only works with the pre-defined GFB memory.
* This command also does not support signed virtual coordinates.
* If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
* negative root x/y coordinates, the negative portion of those
* screens will not be reachable by this command.
*
* This command is not necessary when using framebuffer
* traces. Traces are automatically enabled if the SVGA FIFO is
* disabled, and you may explicitly enable/disable traces using
* SVGA_REG_TRACES. With traces enabled, any write to the GFB will
* automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
*
* Traces and SVGA_CMD_UPDATE are the only supported ways to render
* pseudocolor screen updates. The newer Screen Object commands
* only support true color formats.
*
* Availability:
* Always available.
*/
typedef
struct
{
uint32
x
;
uint32
y
;
uint32
width
;
uint32
height
;
}
SVGAFifoCmdUpdate
;
/*
* SVGA_CMD_RECT_COPY --
*
* Perform a rectangular DMA transfer from one area of the GFB to
* another, and copy the result to any screens which intersect it.
*
* Availability:
* SVGA_CAP_RECT_COPY
*/
typedef
struct
{
uint32
srcX
;
uint32
srcY
;
uint32
destX
;
uint32
destY
;
uint32
width
;
uint32
height
;
}
SVGAFifoCmdRectCopy
;
/*
* SVGA_CMD_DEFINE_CURSOR --
*
* Provide a new cursor image, as an AND/XOR mask.
*
* The recommended way to position the cursor overlay is by using
* the SVGA_FIFO_CURSOR_* registers, supported by the
* SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
*
* Availability:
* SVGA_CAP_CURSOR
*/
typedef
struct
{
uint32
id
;
// Reserved, must be zero.
uint32
hotspotX
;
uint32
hotspotY
;
uint32
width
;
uint32
height
;
uint32
andMaskDepth
;
// Value must be 1 or equal to BITS_PER_PIXEL
uint32
xorMaskDepth
;
// Value must be 1 or equal to BITS_PER_PIXEL
/*
* Followed by scanline data for AND mask, then XOR mask.
* Each scanline is padded to a 32-bit boundary.
*/
}
SVGAFifoCmdDefineCursor
;
/*
* SVGA_CMD_DEFINE_ALPHA_CURSOR --
*
* Provide a new cursor image, in 32-bit BGRA format.
*
* The recommended way to position the cursor overlay is by using
* the SVGA_FIFO_CURSOR_* registers, supported by the
* SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
*
* Availability:
* SVGA_CAP_ALPHA_CURSOR
*/
typedef
struct
{
uint32
id
;
// Reserved, must be zero.
uint32
hotspotX
;
uint32
hotspotY
;
uint32
width
;
uint32
height
;
/* Followed by scanline data */
}
SVGAFifoCmdDefineAlphaCursor
;
/*
* SVGA_CMD_UPDATE_VERBOSE --
*
* Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
* 'reason' value, an opaque cookie which is used by internal
* debugging tools. Third party drivers should not use this
* command.
*
* Availability:
* SVGA_CAP_EXTENDED_FIFO
*/
typedef
struct
{
uint32
x
;
uint32
y
;
uint32
width
;
uint32
height
;
uint32
reason
;
}
SVGAFifoCmdUpdateVerbose
;
/*
* SVGA_CMD_FRONT_ROP_FILL --
*
* This is a hint which tells the SVGA device that the driver has
* just filled a rectangular region of the GFB with a solid
* color. Instead of reading these pixels from the GFB, the device
* can assume that they all equal 'color'. This is primarily used
* for remote desktop protocols.
*
* Availability:
* SVGA_FIFO_CAP_ACCELFRONT
*/
#define SVGA_ROP_COPY 0x03
typedef
struct
{
uint32
color
;
// In the same format as the GFB
uint32
x
;
uint32
y
;
uint32
width
;
uint32
height
;
uint32
rop
;
// Must be SVGA_ROP_COPY
}
SVGAFifoCmdFrontRopFill
;
/*
* SVGA_CMD_FENCE --
*
* Insert a synchronization fence. When the SVGA device reaches
* this command, it will copy the 'fence' value into the
* SVGA_FIFO_FENCE register. It will also compare the fence against
* SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
* SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
* raise this interrupt.
*
* Availability:
* SVGA_FIFO_FENCE for this command,
* SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
*/
typedef
struct
{
uint32
fence
;
}
SVGAFifoCmdFence
;
/*
* SVGA_CMD_ESCAPE --
*
* Send an extended or vendor-specific variable length command.
* This is used for video overlay, third party plugins, and
* internal debugging tools. See svga_escape.h
*
* Availability:
* SVGA_FIFO_CAP_ESCAPE
*/
typedef
struct
{
uint32
nsid
;
uint32
size
;
/* followed by 'size' bytes of data */
}
SVGAFifoCmdEscape
;
/*
* SVGA_CMD_DEFINE_SCREEN --
*
* Define or redefine an SVGAScreenObject. See the description of
* SVGAScreenObject above. The video driver is responsible for
* generating new screen IDs. They should be small positive
* integers. The virtual device will have an implementation
* specific upper limit on the number of screen IDs
* supported. Drivers are responsible for recycling IDs. The first
* valid ID is zero.
*
* - Interaction with other registers:
*
* For backwards compatibility, when the GFB mode registers (WIDTH,
* HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
* deletes all screens other than screen #0, and redefines screen
* #0 according to the specified mode. Drivers that use
* SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
*
* If you use screen objects, do not use the legacy multi-mon
* registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT
*/
typedef
struct
{
SVGAScreenObject
screen
;
// Variable-length according to version
}
SVGAFifoCmdDefineScreen
;
/*
* SVGA_CMD_DESTROY_SCREEN --
*
* Destroy an SVGAScreenObject. Its ID is immediately available for
* re-use.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT
*/
typedef
struct
{
uint32
screenId
;
}
SVGAFifoCmdDestroyScreen
;
/*
* SVGA_CMD_DEFINE_GMRFB --
*
* This command sets a piece of SVGA device state called the
* Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
* piece of light-weight state which identifies the location and
* format of an image in guest memory or in BAR1. The GMRFB has
* an arbitrary size, and it doesn't need to match the geometry
* of the GFB or any screen object.
*
* The GMRFB can be redefined as often as you like. You could
* always use the same GMRFB, you could redefine it before
* rendering from a different guest screen, or you could even
* redefine it before every blit.
*
* There are multiple ways to use this command. The simplest way is
* to use it to move the framebuffer either to elsewhere in the GFB
* (BAR1) memory region, or to a user-defined GMR. This lets a
* driver use a framebuffer allocated entirely out of normal system
* memory, which we encourage.
*
* Another way to use this command is to set up a ring buffer of
* updates in GFB memory. If a driver wants to ensure that no
* frames are skipped by the SVGA device, it is important that the
* driver not modify the source data for a blit until the device is
* done processing the command. One efficient way to accomplish
* this is to use a ring of small DMA buffers. Each buffer is used
* for one blit, then we move on to the next buffer in the
* ring. The FENCE mechanism is used to protect each buffer from
* re-use until the device is finished with that buffer's
* corresponding blit.
*
* This command does not affect the meaning of SVGA_CMD_UPDATE.
* UPDATEs always occur from the legacy GFB memory area. This
* command has no support for pseudocolor GMRFBs. Currently only
* true-color 15, 16, and 24-bit depths are supported. Future
* devices may expose capabilities for additional framebuffer
* formats.
*
* The default GMRFB value is undefined. Drivers must always send
* this command at least once before performing any blit from the
* GMRFB.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT
*/
typedef
struct
{
SVGAGuestPtr
ptr
;
uint32
bytesPerLine
;
SVGAGMRImageFormat
format
;
}
SVGAFifoCmdDefineGMRFB
;
/*
* SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
*
* This is a guest-to-host blit. It performs a DMA operation to
* copy a rectangular region of pixels from the current GMRFB to
* one or more Screen Objects.
*
* The destination coordinate may be specified relative to a
* screen's origin (if a screen ID is specified) or relative to the
* virtual coordinate system's origin (if the screen ID is
* SVGA_ID_INVALID). The actual destination may span zero or more
* screens, in the case of a virtual destination rect or a rect
* which extends off the edge of the specified screen.
*
* This command writes to the screen's "base layer": the underlying
* framebuffer which exists below any cursor or video overlays. No
* action is necessary to explicitly hide or update any overlays
* which exist on top of the updated region.
*
* The SVGA device is guaranteed to finish reading from the GMRFB
* by the time any subsequent FENCE commands are reached.
*
* This command consumes an annotation. See the
* SVGA_CMD_ANNOTATION_* commands for details.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT
*/
typedef
struct
{
SVGASignedPoint
srcOrigin
;
SVGASignedRect
destRect
;
uint32
destScreenId
;
}
SVGAFifoCmdBlitGMRFBToScreen
;
/*
* SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
*
* This is a host-to-guest blit. It performs a DMA operation to
* copy a rectangular region of pixels from a single Screen Object
* back to the current GMRFB.
*
* Usage note: This command should be used rarely. It will
* typically be inefficient, but it is necessary for some types of
* synchronization between 3D (GPU) and 2D (CPU) rendering into
* overlapping areas of a screen.
*
* The source coordinate is specified relative to a screen's
* origin. The provided screen ID must be valid. If any parameters
* are invalid, the resulting pixel values are undefined.
*
* This command reads the screen's "base layer". Overlays like
* video and cursor are not included, but any data which was sent
* using a blit-to-screen primitive will be available, no matter
* whether the data's original source was the GMRFB or the 3D
* acceleration hardware.
*
* Note that our guest-to-host blits and host-to-guest blits aren't
* symmetric in their current implementation. While the parameters
* are identical, host-to-guest blits are a lot less featureful.
* They do not support clipping: If the source parameters don't
* fully fit within a screen, the blit fails. They must originate
* from exactly one screen. Virtual coordinates are not directly
* supported.
*
* Host-to-guest blits do support the same set of GMRFB formats
* offered by guest-to-host blits.
*
* The SVGA device is guaranteed to finish writing to the GMRFB by
* the time any subsequent FENCE commands are reached.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT
*/
typedef
struct
{
SVGASignedPoint
destOrigin
;
SVGASignedRect
srcRect
;
uint32
srcScreenId
;
}
SVGAFifoCmdBlitScreenToGMRFB
;
/*
* SVGA_CMD_ANNOTATION_FILL --
*
* This is a blit annotation. This command stores a small piece of
* device state which is consumed by the next blit-to-screen
* command. The state is only cleared by commands which are
* specifically documented as consuming an annotation. Other
* commands (such as ESCAPEs for debugging) may intervene between
* the annotation and its associated blit.
*
* This annotation is a promise about the contents of the next
* blit: The video driver is guaranteeing that all pixels in that
* blit will have the same value, specified here as a color in
* SVGAColorBGRX format.
*
* The SVGA device can still render the blit correctly even if it
* ignores this annotation, but the annotation may allow it to
* perform the blit more efficiently, for example by ignoring the
* source data and performing a fill in hardware.
*
* This annotation is most important for performance when the
* user's display is being remoted over a network connection.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT
*/
typedef
struct
{
SVGAColorBGRX
color
;
}
SVGAFifoCmdAnnotationFill
;
/*
* SVGA_CMD_ANNOTATION_COPY --
*
* This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
* information about annotations.
*
* This annotation is a promise about the contents of the next
* blit: The video driver is guaranteeing that all pixels in that
* blit will have the same value as those which already exist at an
* identically-sized region on the same or a different screen.
*
* Note that the source pixels for the COPY in this annotation are
* sampled before applying the anqnotation's associated blit. They
* are allowed to overlap with the blit's destination pixels.
*
* The copy source rectangle is specified the same way as the blit
* destination: it can be a rectangle which spans zero or more
* screens, specified relative to either a screen or to the virtual
* coordinate system's origin. If the source rectangle includes
* pixels which are not from exactly one screen, the results are
* undefined.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT
*/
typedef
struct
{
SVGASignedPoint
srcOrigin
;
uint32
srcScreenId
;
}
SVGAFifoCmdAnnotationCopy
;
#endif
drivers/gpu/drm/vmwgfx/svga_types.h
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* Silly typedefs for the svga headers. Currently the headers are shared
* between all components that talk to svga. And as such the headers are
* are in a completely different style and use weird defines.
*
* This file lets all the ugly be prefixed with svga*.
*/
#ifndef _SVGA_TYPES_H_
#define _SVGA_TYPES_H_
typedef
uint16_t
uint16
;
typedef
uint32_t
uint32
;
typedef
uint8_t
uint8
;
typedef
int32_t
int32
;
typedef
bool
Bool
;
#endif
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
static
uint32_t
vram_placement_flags
=
TTM_PL_FLAG_VRAM
|
TTM_PL_FLAG_CACHED
;
static
uint32_t
vram_ne_placement_flags
=
TTM_PL_FLAG_VRAM
|
TTM_PL_FLAG_CACHED
|
TTM_PL_FLAG_NO_EVICT
;
static
uint32_t
sys_placement_flags
=
TTM_PL_FLAG_SYSTEM
|
TTM_PL_FLAG_CACHED
;
struct
ttm_placement
vmw_vram_placement
=
{
.
fpfn
=
0
,
.
lpfn
=
0
,
.
num_placement
=
1
,
.
placement
=
&
vram_placement_flags
,
.
num_busy_placement
=
1
,
.
busy_placement
=
&
vram_placement_flags
};
struct
ttm_placement
vmw_vram_ne_placement
=
{
.
fpfn
=
0
,
.
lpfn
=
0
,
.
num_placement
=
1
,
.
placement
=
&
vram_ne_placement_flags
,
.
num_busy_placement
=
1
,
.
busy_placement
=
&
vram_ne_placement_flags
};
struct
ttm_placement
vmw_sys_placement
=
{
.
fpfn
=
0
,
.
lpfn
=
0
,
.
num_placement
=
1
,
.
placement
=
&
sys_placement_flags
,
.
num_busy_placement
=
1
,
.
busy_placement
=
&
sys_placement_flags
};
struct
vmw_ttm_backend
{
struct
ttm_backend
backend
;
};
static
int
vmw_ttm_populate
(
struct
ttm_backend
*
backend
,
unsigned
long
num_pages
,
struct
page
**
pages
,
struct
page
*
dummy_read_page
)
{
return
0
;
}
static
int
vmw_ttm_bind
(
struct
ttm_backend
*
backend
,
struct
ttm_mem_reg
*
bo_mem
)
{
return
0
;
}
static
int
vmw_ttm_unbind
(
struct
ttm_backend
*
backend
)
{
return
0
;
}
static
void
vmw_ttm_clear
(
struct
ttm_backend
*
backend
)
{
}
static
void
vmw_ttm_destroy
(
struct
ttm_backend
*
backend
)
{
struct
vmw_ttm_backend
*
vmw_be
=
container_of
(
backend
,
struct
vmw_ttm_backend
,
backend
);
kfree
(
vmw_be
);
}
static
struct
ttm_backend_func
vmw_ttm_func
=
{
.
populate
=
vmw_ttm_populate
,
.
clear
=
vmw_ttm_clear
,
.
bind
=
vmw_ttm_bind
,
.
unbind
=
vmw_ttm_unbind
,
.
destroy
=
vmw_ttm_destroy
,
};
struct
ttm_backend
*
vmw_ttm_backend_init
(
struct
ttm_bo_device
*
bdev
)
{
struct
vmw_ttm_backend
*
vmw_be
;
vmw_be
=
kmalloc
(
sizeof
(
*
vmw_be
),
GFP_KERNEL
);
if
(
!
vmw_be
)
return
NULL
;
vmw_be
->
backend
.
func
=
&
vmw_ttm_func
;
return
&
vmw_be
->
backend
;
}
int
vmw_invalidate_caches
(
struct
ttm_bo_device
*
bdev
,
uint32_t
flags
)
{
return
0
;
}
int
vmw_init_mem_type
(
struct
ttm_bo_device
*
bdev
,
uint32_t
type
,
struct
ttm_mem_type_manager
*
man
)
{
struct
vmw_private
*
dev_priv
=
container_of
(
bdev
,
struct
vmw_private
,
bdev
);
switch
(
type
)
{
case
TTM_PL_SYSTEM
:
/* System memory */
man
->
flags
=
TTM_MEMTYPE_FLAG_MAPPABLE
;
man
->
available_caching
=
TTM_PL_MASK_CACHING
;
man
->
default_caching
=
TTM_PL_FLAG_CACHED
;
break
;
case
TTM_PL_VRAM
:
/* "On-card" video ram */
man
->
gpu_offset
=
0
;
man
->
io_offset
=
dev_priv
->
vram_start
;
man
->
io_size
=
dev_priv
->
vram_size
;
man
->
flags
=
TTM_MEMTYPE_FLAG_FIXED
|
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
|
TTM_MEMTYPE_FLAG_MAPPABLE
;
man
->
io_addr
=
NULL
;
man
->
available_caching
=
TTM_PL_MASK_CACHING
;
man
->
default_caching
=
TTM_PL_FLAG_WC
;
break
;
default:
DRM_ERROR
(
"Unsupported memory type %u
\n
"
,
(
unsigned
)
type
);
return
-
EINVAL
;
}
return
0
;
}
void
vmw_evict_flags
(
struct
ttm_buffer_object
*
bo
,
struct
ttm_placement
*
placement
)
{
*
placement
=
vmw_sys_placement
;
}
/**
* FIXME: Proper access checks on buffers.
*/
static
int
vmw_verify_access
(
struct
ttm_buffer_object
*
bo
,
struct
file
*
filp
)
{
return
0
;
}
/**
* FIXME: We're using the old vmware polling method to sync.
* Do this with fences instead.
*/
static
void
*
vmw_sync_obj_ref
(
void
*
sync_obj
)
{
return
sync_obj
;
}
static
void
vmw_sync_obj_unref
(
void
**
sync_obj
)
{
*
sync_obj
=
NULL
;
}
static
int
vmw_sync_obj_flush
(
void
*
sync_obj
,
void
*
sync_arg
)
{
struct
vmw_private
*
dev_priv
=
(
struct
vmw_private
*
)
sync_arg
;
mutex_lock
(
&
dev_priv
->
hw_mutex
);
vmw_write
(
dev_priv
,
SVGA_REG_SYNC
,
SVGA_SYNC_GENERIC
);
mutex_unlock
(
&
dev_priv
->
hw_mutex
);
return
0
;
}
static
bool
vmw_sync_obj_signaled
(
void
*
sync_obj
,
void
*
sync_arg
)
{
struct
vmw_private
*
dev_priv
=
(
struct
vmw_private
*
)
sync_arg
;
uint32_t
sequence
=
(
unsigned
long
)
sync_obj
;
return
vmw_fence_signaled
(
dev_priv
,
sequence
);
}
static
int
vmw_sync_obj_wait
(
void
*
sync_obj
,
void
*
sync_arg
,
bool
lazy
,
bool
interruptible
)
{
struct
vmw_private
*
dev_priv
=
(
struct
vmw_private
*
)
sync_arg
;
uint32_t
sequence
=
(
unsigned
long
)
sync_obj
;
return
vmw_wait_fence
(
dev_priv
,
false
,
sequence
,
false
,
3
*
HZ
);
}
struct
ttm_bo_driver
vmw_bo_driver
=
{
.
create_ttm_backend_entry
=
vmw_ttm_backend_init
,
.
invalidate_caches
=
vmw_invalidate_caches
,
.
init_mem_type
=
vmw_init_mem_type
,
.
evict_flags
=
vmw_evict_flags
,
.
move
=
NULL
,
.
verify_access
=
vmw_verify_access
,
.
sync_obj_signaled
=
vmw_sync_obj_signaled
,
.
sync_obj_wait
=
vmw_sync_obj_wait
,
.
sync_obj_flush
=
vmw_sync_obj_flush
,
.
sync_obj_unref
=
vmw_sync_obj_unref
,
.
sync_obj_ref
=
vmw_sync_obj_ref
};
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "drmP.h"
#include "vmwgfx_drv.h"
#include "ttm/ttm_placement.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_object.h"
#include "ttm/ttm_module.h"
#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0
#define VMW_FB_RESERVATION 0
/**
* Fully encoded drm commands. Might move to vmw_drm.h
*/
#define DRM_IOCTL_VMW_GET_PARAM \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
struct drm_vmw_getparam_arg)
#define DRM_IOCTL_VMW_ALLOC_DMABUF \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
union drm_vmw_alloc_dmabuf_arg)
#define DRM_IOCTL_VMW_UNREF_DMABUF \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
struct drm_vmw_unref_dmabuf_arg)
#define DRM_IOCTL_VMW_CURSOR_BYPASS \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
struct drm_vmw_cursor_bypass_arg)
#define DRM_IOCTL_VMW_CONTROL_STREAM \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
struct drm_vmw_control_stream_arg)
#define DRM_IOCTL_VMW_CLAIM_STREAM \
DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
struct drm_vmw_stream_arg)
#define DRM_IOCTL_VMW_UNREF_STREAM \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
struct drm_vmw_stream_arg)
#define DRM_IOCTL_VMW_CREATE_CONTEXT \
DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
struct drm_vmw_context_arg)
#define DRM_IOCTL_VMW_UNREF_CONTEXT \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
struct drm_vmw_context_arg)
#define DRM_IOCTL_VMW_CREATE_SURFACE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
union drm_vmw_surface_create_arg)
#define DRM_IOCTL_VMW_UNREF_SURFACE \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
struct drm_vmw_surface_arg)
#define DRM_IOCTL_VMW_REF_SURFACE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
union drm_vmw_surface_reference_arg)
#define DRM_IOCTL_VMW_EXECBUF \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
struct drm_vmw_execbuf_arg)
#define DRM_IOCTL_VMW_FIFO_DEBUG \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \
struct drm_vmw_fifo_debug_arg)
#define DRM_IOCTL_VMW_FENCE_WAIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
struct drm_vmw_fence_wait_arg)
/**
* The core DRM version of this macro doesn't account for
* DRM_COMMAND_BASE.
*/
#define VMW_IOCTL_DEF(ioctl, func, flags) \
[DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
/**
* Ioctl definitions.
*/
static
struct
drm_ioctl_desc
vmw_ioctls
[]
=
{
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_GET_PARAM
,
vmw_getparam_ioctl
,
0
),
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_ALLOC_DMABUF
,
vmw_dmabuf_alloc_ioctl
,
0
),
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_UNREF_DMABUF
,
vmw_dmabuf_unref_ioctl
,
0
),
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_CURSOR_BYPASS
,
vmw_kms_cursor_bypass_ioctl
,
0
),
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_CONTROL_STREAM
,
vmw_overlay_ioctl
,
0
),
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_CLAIM_STREAM
,
vmw_stream_claim_ioctl
,
0
),
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_UNREF_STREAM
,
vmw_stream_unref_ioctl
,
0
),
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_CREATE_CONTEXT
,
vmw_context_define_ioctl
,
0
),
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_UNREF_CONTEXT
,
vmw_context_destroy_ioctl
,
0
),
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_CREATE_SURFACE
,
vmw_surface_define_ioctl
,
0
),
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_UNREF_SURFACE
,
vmw_surface_destroy_ioctl
,
0
),
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_REF_SURFACE
,
vmw_surface_reference_ioctl
,
0
),
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_EXECBUF
,
vmw_execbuf_ioctl
,
0
),
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_FIFO_DEBUG
,
vmw_fifo_debug_ioctl
,
0
),
VMW_IOCTL_DEF
(
DRM_IOCTL_VMW_FENCE_WAIT
,
vmw_fence_wait_ioctl
,
0
)
};
static
struct
pci_device_id
vmw_pci_id_list
[]
=
{
{
0x15ad
,
0x0405
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
VMWGFX_CHIP_SVGAII
},
{
0
,
0
,
0
}
};
static
char
*
vmw_devname
=
"vmwgfx"
;
static
int
vmw_probe
(
struct
pci_dev
*
,
const
struct
pci_device_id
*
);
static
void
vmw_master_init
(
struct
vmw_master
*
);
static
void
vmw_print_capabilities
(
uint32_t
capabilities
)
{
DRM_INFO
(
"Capabilities:
\n
"
);
if
(
capabilities
&
SVGA_CAP_RECT_COPY
)
DRM_INFO
(
" Rect copy.
\n
"
);
if
(
capabilities
&
SVGA_CAP_CURSOR
)
DRM_INFO
(
" Cursor.
\n
"
);
if
(
capabilities
&
SVGA_CAP_CURSOR_BYPASS
)
DRM_INFO
(
" Cursor bypass.
\n
"
);
if
(
capabilities
&
SVGA_CAP_CURSOR_BYPASS_2
)
DRM_INFO
(
" Cursor bypass 2.
\n
"
);
if
(
capabilities
&
SVGA_CAP_8BIT_EMULATION
)
DRM_INFO
(
" 8bit emulation.
\n
"
);
if
(
capabilities
&
SVGA_CAP_ALPHA_CURSOR
)
DRM_INFO
(
" Alpha cursor.
\n
"
);
if
(
capabilities
&
SVGA_CAP_3D
)
DRM_INFO
(
" 3D.
\n
"
);
if
(
capabilities
&
SVGA_CAP_EXTENDED_FIFO
)
DRM_INFO
(
" Extended Fifo.
\n
"
);
if
(
capabilities
&
SVGA_CAP_MULTIMON
)
DRM_INFO
(
" Multimon.
\n
"
);
if
(
capabilities
&
SVGA_CAP_PITCHLOCK
)
DRM_INFO
(
" Pitchlock.
\n
"
);
if
(
capabilities
&
SVGA_CAP_IRQMASK
)
DRM_INFO
(
" Irq mask.
\n
"
);
if
(
capabilities
&
SVGA_CAP_DISPLAY_TOPOLOGY
)
DRM_INFO
(
" Display Topology.
\n
"
);
if
(
capabilities
&
SVGA_CAP_GMR
)
DRM_INFO
(
" GMR.
\n
"
);
if
(
capabilities
&
SVGA_CAP_TRACES
)
DRM_INFO
(
" Traces.
\n
"
);
}
static
int
vmw_request_device
(
struct
vmw_private
*
dev_priv
)
{
int
ret
;
vmw_kms_save_vga
(
dev_priv
);
ret
=
vmw_fifo_init
(
dev_priv
,
&
dev_priv
->
fifo
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"Unable to initialize FIFO.
\n
"
);
return
ret
;
}
return
0
;
}
static
void
vmw_release_device
(
struct
vmw_private
*
dev_priv
)
{
vmw_fifo_release
(
dev_priv
,
&
dev_priv
->
fifo
);
vmw_kms_restore_vga
(
dev_priv
);
}
static
int
vmw_driver_load
(
struct
drm_device
*
dev
,
unsigned
long
chipset
)
{
struct
vmw_private
*
dev_priv
;
int
ret
;
dev_priv
=
kzalloc
(
sizeof
(
*
dev_priv
),
GFP_KERNEL
);
if
(
unlikely
(
dev_priv
==
NULL
))
{
DRM_ERROR
(
"Failed allocating a device private struct.
\n
"
);
return
-
ENOMEM
;
}
memset
(
dev_priv
,
0
,
sizeof
(
*
dev_priv
));
dev_priv
->
dev
=
dev
;
dev_priv
->
vmw_chipset
=
chipset
;
mutex_init
(
&
dev_priv
->
hw_mutex
);
mutex_init
(
&
dev_priv
->
cmdbuf_mutex
);
rwlock_init
(
&
dev_priv
->
resource_lock
);
idr_init
(
&
dev_priv
->
context_idr
);
idr_init
(
&
dev_priv
->
surface_idr
);
idr_init
(
&
dev_priv
->
stream_idr
);
ida_init
(
&
dev_priv
->
gmr_ida
);
mutex_init
(
&
dev_priv
->
init_mutex
);
init_waitqueue_head
(
&
dev_priv
->
fence_queue
);
init_waitqueue_head
(
&
dev_priv
->
fifo_queue
);
atomic_set
(
&
dev_priv
->
fence_queue_waiters
,
0
);
atomic_set
(
&
dev_priv
->
fifo_queue_waiters
,
0
);
INIT_LIST_HEAD
(
&
dev_priv
->
gmr_lru
);
dev_priv
->
io_start
=
pci_resource_start
(
dev
->
pdev
,
0
);
dev_priv
->
vram_start
=
pci_resource_start
(
dev
->
pdev
,
1
);
dev_priv
->
mmio_start
=
pci_resource_start
(
dev
->
pdev
,
2
);
mutex_lock
(
&
dev_priv
->
hw_mutex
);
dev_priv
->
capabilities
=
vmw_read
(
dev_priv
,
SVGA_REG_CAPABILITIES
);
if
(
dev_priv
->
capabilities
&
SVGA_CAP_GMR
)
{
dev_priv
->
max_gmr_descriptors
=
vmw_read
(
dev_priv
,
SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH
);
dev_priv
->
max_gmr_ids
=
vmw_read
(
dev_priv
,
SVGA_REG_GMR_MAX_IDS
);
}
dev_priv
->
vram_size
=
vmw_read
(
dev_priv
,
SVGA_REG_VRAM_SIZE
);
dev_priv
->
mmio_size
=
vmw_read
(
dev_priv
,
SVGA_REG_MEM_SIZE
);
dev_priv
->
fb_max_width
=
vmw_read
(
dev_priv
,
SVGA_REG_MAX_WIDTH
);
dev_priv
->
fb_max_height
=
vmw_read
(
dev_priv
,
SVGA_REG_MAX_HEIGHT
);
mutex_unlock
(
&
dev_priv
->
hw_mutex
);
vmw_print_capabilities
(
dev_priv
->
capabilities
);
if
(
dev_priv
->
capabilities
&
SVGA_CAP_GMR
)
{
DRM_INFO
(
"Max GMR ids is %u
\n
"
,
(
unsigned
)
dev_priv
->
max_gmr_ids
);
DRM_INFO
(
"Max GMR descriptors is %u
\n
"
,
(
unsigned
)
dev_priv
->
max_gmr_descriptors
);
}
DRM_INFO
(
"VRAM at 0x%08x size is %u kiB
\n
"
,
dev_priv
->
vram_start
,
dev_priv
->
vram_size
/
1024
);
DRM_INFO
(
"MMIO at 0x%08x size is %u kiB
\n
"
,
dev_priv
->
mmio_start
,
dev_priv
->
mmio_size
/
1024
);
ret
=
vmw_ttm_global_init
(
dev_priv
);
if
(
unlikely
(
ret
!=
0
))
goto
out_err0
;
vmw_master_init
(
&
dev_priv
->
fbdev_master
);
ttm_lock_set_kill
(
&
dev_priv
->
fbdev_master
.
lock
,
false
,
SIGTERM
);
dev_priv
->
active_master
=
&
dev_priv
->
fbdev_master
;
ret
=
ttm_bo_device_init
(
&
dev_priv
->
bdev
,
dev_priv
->
bo_global_ref
.
ref
.
object
,
&
vmw_bo_driver
,
VMWGFX_FILE_PAGE_OFFSET
,
false
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"Failed initializing TTM buffer object driver.
\n
"
);
goto
out_err1
;
}
ret
=
ttm_bo_init_mm
(
&
dev_priv
->
bdev
,
TTM_PL_VRAM
,
(
dev_priv
->
vram_size
>>
PAGE_SHIFT
));
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"Failed initializing memory manager for VRAM.
\n
"
);
goto
out_err2
;
}
dev_priv
->
mmio_mtrr
=
drm_mtrr_add
(
dev_priv
->
mmio_start
,
dev_priv
->
mmio_size
,
DRM_MTRR_WC
);
dev_priv
->
mmio_virt
=
ioremap_wc
(
dev_priv
->
mmio_start
,
dev_priv
->
mmio_size
);
if
(
unlikely
(
dev_priv
->
mmio_virt
==
NULL
))
{
ret
=
-
ENOMEM
;
DRM_ERROR
(
"Failed mapping MMIO.
\n
"
);
goto
out_err3
;
}
dev_priv
->
tdev
=
ttm_object_device_init
(
dev_priv
->
mem_global_ref
.
object
,
12
);
if
(
unlikely
(
dev_priv
->
tdev
==
NULL
))
{
DRM_ERROR
(
"Unable to initialize TTM object management.
\n
"
);
ret
=
-
ENOMEM
;
goto
out_err4
;
}
dev
->
dev_private
=
dev_priv
;
if
(
!
dev
->
devname
)
dev
->
devname
=
vmw_devname
;
if
(
dev_priv
->
capabilities
&
SVGA_CAP_IRQMASK
)
{
ret
=
drm_irq_install
(
dev
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"Failed installing irq: %d
\n
"
,
ret
);
goto
out_no_irq
;
}
}
ret
=
pci_request_regions
(
dev
->
pdev
,
"vmwgfx probe"
);
dev_priv
->
stealth
=
(
ret
!=
0
);
if
(
dev_priv
->
stealth
)
{
/**
* Request at least the mmio PCI resource.
*/
DRM_INFO
(
"It appears like vesafb is loaded. "
"Ignore above error if any. Entering stealth mode.
\n
"
);
ret
=
pci_request_region
(
dev
->
pdev
,
2
,
"vmwgfx stealth probe"
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"Failed reserving the SVGA MMIO resource.
\n
"
);
goto
out_no_device
;
}
vmw_kms_init
(
dev_priv
);
vmw_overlay_init
(
dev_priv
);
}
else
{
ret
=
vmw_request_device
(
dev_priv
);
if
(
unlikely
(
ret
!=
0
))
goto
out_no_device
;
vmw_kms_init
(
dev_priv
);
vmw_overlay_init
(
dev_priv
);
vmw_fb_init
(
dev_priv
);
}
return
0
;
out_no_device:
if
(
dev_priv
->
capabilities
&
SVGA_CAP_IRQMASK
)
drm_irq_uninstall
(
dev_priv
->
dev
);
if
(
dev
->
devname
==
vmw_devname
)
dev
->
devname
=
NULL
;
out_no_irq:
ttm_object_device_release
(
&
dev_priv
->
tdev
);
out_err4:
iounmap
(
dev_priv
->
mmio_virt
);
out_err3:
drm_mtrr_del
(
dev_priv
->
mmio_mtrr
,
dev_priv
->
mmio_start
,
dev_priv
->
mmio_size
,
DRM_MTRR_WC
);
(
void
)
ttm_bo_clean_mm
(
&
dev_priv
->
bdev
,
TTM_PL_VRAM
);
out_err2:
(
void
)
ttm_bo_device_release
(
&
dev_priv
->
bdev
);
out_err1:
vmw_ttm_global_release
(
dev_priv
);
out_err0:
ida_destroy
(
&
dev_priv
->
gmr_ida
);
idr_destroy
(
&
dev_priv
->
surface_idr
);
idr_destroy
(
&
dev_priv
->
context_idr
);
idr_destroy
(
&
dev_priv
->
stream_idr
);
kfree
(
dev_priv
);
return
ret
;
}
static
int
vmw_driver_unload
(
struct
drm_device
*
dev
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
DRM_INFO
(
VMWGFX_DRIVER_NAME
" unload.
\n
"
);
if
(
!
dev_priv
->
stealth
)
{
vmw_fb_close
(
dev_priv
);
vmw_kms_close
(
dev_priv
);
vmw_overlay_close
(
dev_priv
);
vmw_release_device
(
dev_priv
);
pci_release_regions
(
dev
->
pdev
);
}
else
{
vmw_kms_close
(
dev_priv
);
vmw_overlay_close
(
dev_priv
);
pci_release_region
(
dev
->
pdev
,
2
);
}
if
(
dev_priv
->
capabilities
&
SVGA_CAP_IRQMASK
)
drm_irq_uninstall
(
dev_priv
->
dev
);
if
(
dev
->
devname
==
vmw_devname
)
dev
->
devname
=
NULL
;
ttm_object_device_release
(
&
dev_priv
->
tdev
);
iounmap
(
dev_priv
->
mmio_virt
);
drm_mtrr_del
(
dev_priv
->
mmio_mtrr
,
dev_priv
->
mmio_start
,
dev_priv
->
mmio_size
,
DRM_MTRR_WC
);
(
void
)
ttm_bo_clean_mm
(
&
dev_priv
->
bdev
,
TTM_PL_VRAM
);
(
void
)
ttm_bo_device_release
(
&
dev_priv
->
bdev
);
vmw_ttm_global_release
(
dev_priv
);
ida_destroy
(
&
dev_priv
->
gmr_ida
);
idr_destroy
(
&
dev_priv
->
surface_idr
);
idr_destroy
(
&
dev_priv
->
context_idr
);
idr_destroy
(
&
dev_priv
->
stream_idr
);
kfree
(
dev_priv
);
return
0
;
}
static
void
vmw_postclose
(
struct
drm_device
*
dev
,
struct
drm_file
*
file_priv
)
{
struct
vmw_fpriv
*
vmw_fp
;
vmw_fp
=
vmw_fpriv
(
file_priv
);
ttm_object_file_release
(
&
vmw_fp
->
tfile
);
if
(
vmw_fp
->
locked_master
)
drm_master_put
(
&
vmw_fp
->
locked_master
);
kfree
(
vmw_fp
);
}
static
int
vmw_driver_open
(
struct
drm_device
*
dev
,
struct
drm_file
*
file_priv
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
vmw_fpriv
*
vmw_fp
;
int
ret
=
-
ENOMEM
;
vmw_fp
=
kzalloc
(
sizeof
(
*
vmw_fp
),
GFP_KERNEL
);
if
(
unlikely
(
vmw_fp
==
NULL
))
return
ret
;
vmw_fp
->
tfile
=
ttm_object_file_init
(
dev_priv
->
tdev
,
10
);
if
(
unlikely
(
vmw_fp
->
tfile
==
NULL
))
goto
out_no_tfile
;
file_priv
->
driver_priv
=
vmw_fp
;
if
(
unlikely
(
dev_priv
->
bdev
.
dev_mapping
==
NULL
))
dev_priv
->
bdev
.
dev_mapping
=
file_priv
->
filp
->
f_path
.
dentry
->
d_inode
->
i_mapping
;
return
0
;
out_no_tfile:
kfree
(
vmw_fp
);
return
ret
;
}
static
long
vmw_unlocked_ioctl
(
struct
file
*
filp
,
unsigned
int
cmd
,
unsigned
long
arg
)
{
struct
drm_file
*
file_priv
=
filp
->
private_data
;
struct
drm_device
*
dev
=
file_priv
->
minor
->
dev
;
unsigned
int
nr
=
DRM_IOCTL_NR
(
cmd
);
long
ret
;
/*
* The driver private ioctls and TTM ioctls should be
* thread-safe.
*/
if
((
nr
>=
DRM_COMMAND_BASE
)
&&
(
nr
<
DRM_COMMAND_END
)
&&
(
nr
<
DRM_COMMAND_BASE
+
dev
->
driver
->
num_ioctls
))
{
struct
drm_ioctl_desc
*
ioctl
=
&
vmw_ioctls
[
nr
-
DRM_COMMAND_BASE
];
if
(
unlikely
(
ioctl
->
cmd
!=
cmd
))
{
DRM_ERROR
(
"Invalid command format, ioctl %d
\n
"
,
nr
-
DRM_COMMAND_BASE
);
return
-
EINVAL
;
}
return
drm_ioctl
(
filp
->
f_path
.
dentry
->
d_inode
,
filp
,
cmd
,
arg
);
}
/*
* Not all old drm ioctls are thread-safe.
*/
lock_kernel
();
ret
=
drm_ioctl
(
filp
->
f_path
.
dentry
->
d_inode
,
filp
,
cmd
,
arg
);
unlock_kernel
();
return
ret
;
}
static
int
vmw_firstopen
(
struct
drm_device
*
dev
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
dev_priv
->
is_opened
=
true
;
return
0
;
}
static
void
vmw_lastclose
(
struct
drm_device
*
dev
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
drm_crtc
*
crtc
;
struct
drm_mode_set
set
;
int
ret
;
/**
* Do nothing on the lastclose call from drm_unload.
*/
if
(
!
dev_priv
->
is_opened
)
return
;
dev_priv
->
is_opened
=
false
;
set
.
x
=
0
;
set
.
y
=
0
;
set
.
fb
=
NULL
;
set
.
mode
=
NULL
;
set
.
connectors
=
NULL
;
set
.
num_connectors
=
0
;
list_for_each_entry
(
crtc
,
&
dev
->
mode_config
.
crtc_list
,
head
)
{
set
.
crtc
=
crtc
;
ret
=
crtc
->
funcs
->
set_config
(
&
set
);
WARN_ON
(
ret
!=
0
);
}
}
static
void
vmw_master_init
(
struct
vmw_master
*
vmaster
)
{
ttm_lock_init
(
&
vmaster
->
lock
);
}
static
int
vmw_master_create
(
struct
drm_device
*
dev
,
struct
drm_master
*
master
)
{
struct
vmw_master
*
vmaster
;
DRM_INFO
(
"Master create.
\n
"
);
vmaster
=
kzalloc
(
sizeof
(
*
vmaster
),
GFP_KERNEL
);
if
(
unlikely
(
vmaster
==
NULL
))
return
-
ENOMEM
;
ttm_lock_init
(
&
vmaster
->
lock
);
ttm_lock_set_kill
(
&
vmaster
->
lock
,
true
,
SIGTERM
);
master
->
driver_priv
=
vmaster
;
return
0
;
}
static
void
vmw_master_destroy
(
struct
drm_device
*
dev
,
struct
drm_master
*
master
)
{
struct
vmw_master
*
vmaster
=
vmw_master
(
master
);
DRM_INFO
(
"Master destroy.
\n
"
);
master
->
driver_priv
=
NULL
;
kfree
(
vmaster
);
}
static
int
vmw_master_set
(
struct
drm_device
*
dev
,
struct
drm_file
*
file_priv
,
bool
from_open
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
vmw_fpriv
*
vmw_fp
=
vmw_fpriv
(
file_priv
);
struct
vmw_master
*
active
=
dev_priv
->
active_master
;
struct
vmw_master
*
vmaster
=
vmw_master
(
file_priv
->
master
);
int
ret
=
0
;
DRM_INFO
(
"Master set.
\n
"
);
if
(
dev_priv
->
stealth
)
{
ret
=
vmw_request_device
(
dev_priv
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
}
if
(
active
)
{
BUG_ON
(
active
!=
&
dev_priv
->
fbdev_master
);
ret
=
ttm_vt_lock
(
&
active
->
lock
,
false
,
vmw_fp
->
tfile
);
if
(
unlikely
(
ret
!=
0
))
goto
out_no_active_lock
;
ttm_lock_set_kill
(
&
active
->
lock
,
true
,
SIGTERM
);
ret
=
ttm_bo_evict_mm
(
&
dev_priv
->
bdev
,
TTM_PL_VRAM
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"Unable to clean VRAM on "
"master drop.
\n
"
);
}
dev_priv
->
active_master
=
NULL
;
}
ttm_lock_set_kill
(
&
vmaster
->
lock
,
false
,
SIGTERM
);
if
(
!
from_open
)
{
ttm_vt_unlock
(
&
vmaster
->
lock
);
BUG_ON
(
vmw_fp
->
locked_master
!=
file_priv
->
master
);
drm_master_put
(
&
vmw_fp
->
locked_master
);
}
dev_priv
->
active_master
=
vmaster
;
return
0
;
out_no_active_lock:
vmw_release_device
(
dev_priv
);
return
ret
;
}
static
void
vmw_master_drop
(
struct
drm_device
*
dev
,
struct
drm_file
*
file_priv
,
bool
from_release
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
vmw_fpriv
*
vmw_fp
=
vmw_fpriv
(
file_priv
);
struct
vmw_master
*
vmaster
=
vmw_master
(
file_priv
->
master
);
int
ret
;
DRM_INFO
(
"Master drop.
\n
"
);
/**
* Make sure the master doesn't disappear while we have
* it locked.
*/
vmw_fp
->
locked_master
=
drm_master_get
(
file_priv
->
master
);
ret
=
ttm_vt_lock
(
&
vmaster
->
lock
,
false
,
vmw_fp
->
tfile
);
if
(
unlikely
((
ret
!=
0
)))
{
DRM_ERROR
(
"Unable to lock TTM at VT switch.
\n
"
);
drm_master_put
(
&
vmw_fp
->
locked_master
);
}
ttm_lock_set_kill
(
&
vmaster
->
lock
,
true
,
SIGTERM
);
if
(
dev_priv
->
stealth
)
{
ret
=
ttm_bo_evict_mm
(
&
dev_priv
->
bdev
,
TTM_PL_VRAM
);
if
(
unlikely
(
ret
!=
0
))
DRM_ERROR
(
"Unable to clean VRAM on master drop.
\n
"
);
vmw_release_device
(
dev_priv
);
}
dev_priv
->
active_master
=
&
dev_priv
->
fbdev_master
;
ttm_lock_set_kill
(
&
dev_priv
->
fbdev_master
.
lock
,
false
,
SIGTERM
);
ttm_vt_unlock
(
&
dev_priv
->
fbdev_master
.
lock
);
if
(
!
dev_priv
->
stealth
)
vmw_fb_on
(
dev_priv
);
}
static
void
vmw_remove
(
struct
pci_dev
*
pdev
)
{
struct
drm_device
*
dev
=
pci_get_drvdata
(
pdev
);
drm_put_dev
(
dev
);
}
static
struct
drm_driver
driver
=
{
.
driver_features
=
DRIVER_HAVE_IRQ
|
DRIVER_IRQ_SHARED
|
DRIVER_MODESET
,
.
load
=
vmw_driver_load
,
.
unload
=
vmw_driver_unload
,
.
firstopen
=
vmw_firstopen
,
.
lastclose
=
vmw_lastclose
,
.
irq_preinstall
=
vmw_irq_preinstall
,
.
irq_postinstall
=
vmw_irq_postinstall
,
.
irq_uninstall
=
vmw_irq_uninstall
,
.
irq_handler
=
vmw_irq_handler
,
.
reclaim_buffers_locked
=
NULL
,
.
get_map_ofs
=
drm_core_get_map_ofs
,
.
get_reg_ofs
=
drm_core_get_reg_ofs
,
.
ioctls
=
vmw_ioctls
,
.
num_ioctls
=
DRM_ARRAY_SIZE
(
vmw_ioctls
),
.
dma_quiescent
=
NULL
,
/*vmw_dma_quiescent, */
.
master_create
=
vmw_master_create
,
.
master_destroy
=
vmw_master_destroy
,
.
master_set
=
vmw_master_set
,
.
master_drop
=
vmw_master_drop
,
.
open
=
vmw_driver_open
,
.
postclose
=
vmw_postclose
,
.
fops
=
{
.
owner
=
THIS_MODULE
,
.
open
=
drm_open
,
.
release
=
drm_release
,
.
unlocked_ioctl
=
vmw_unlocked_ioctl
,
.
mmap
=
vmw_mmap
,
.
poll
=
drm_poll
,
.
fasync
=
drm_fasync
,
#if defined(CONFIG_COMPAT)
.
compat_ioctl
=
drm_compat_ioctl
,
#endif
},
.
pci_driver
=
{
.
name
=
VMWGFX_DRIVER_NAME
,
.
id_table
=
vmw_pci_id_list
,
.
probe
=
vmw_probe
,
.
remove
=
vmw_remove
},
.
name
=
VMWGFX_DRIVER_NAME
,
.
desc
=
VMWGFX_DRIVER_DESC
,
.
date
=
VMWGFX_DRIVER_DATE
,
.
major
=
VMWGFX_DRIVER_MAJOR
,
.
minor
=
VMWGFX_DRIVER_MINOR
,
.
patchlevel
=
VMWGFX_DRIVER_PATCHLEVEL
};
static
int
vmw_probe
(
struct
pci_dev
*
pdev
,
const
struct
pci_device_id
*
ent
)
{
return
drm_get_dev
(
pdev
,
ent
,
&
driver
);
}
static
int
__init
vmwgfx_init
(
void
)
{
int
ret
;
ret
=
drm_init
(
&
driver
);
if
(
ret
)
DRM_ERROR
(
"Failed initializing DRM.
\n
"
);
return
ret
;
}
static
void
__exit
vmwgfx_exit
(
void
)
{
drm_exit
(
&
driver
);
}
module_init
(
vmwgfx_init
);
module_exit
(
vmwgfx_exit
);
MODULE_AUTHOR
(
"VMware Inc. and others"
);
MODULE_DESCRIPTION
(
"Standalone drm driver for the VMware SVGA device"
);
MODULE_LICENSE
(
"GPL and additional rights"
);
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _VMWGFX_DRV_H_
#define _VMWGFX_DRV_H_
#include "vmwgfx_reg.h"
#include "drmP.h"
#include "vmwgfx_drm.h"
#include "drm_hashtab.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_object.h"
#include "ttm/ttm_lock.h"
#include "ttm/ttm_execbuf_util.h"
#include "ttm/ttm_module.h"
#define VMWGFX_DRIVER_DATE "20090724"
#define VMWGFX_DRIVER_MAJOR 0
#define VMWGFX_DRIVER_MINOR 1
#define VMWGFX_DRIVER_PATCHLEVEL 2
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_GMRS 2048
struct
vmw_fpriv
{
struct
drm_master
*
locked_master
;
struct
ttm_object_file
*
tfile
;
};
struct
vmw_dma_buffer
{
struct
ttm_buffer_object
base
;
struct
list_head
validate_list
;
struct
list_head
gmr_lru
;
uint32_t
gmr_id
;
bool
gmr_bound
;
uint32_t
cur_validate_node
;
bool
on_validate_list
;
};
struct
vmw_resource
{
struct
kref
kref
;
struct
vmw_private
*
dev_priv
;
struct
idr
*
idr
;
int
id
;
enum
ttm_object_type
res_type
;
bool
avail
;
void
(
*
hw_destroy
)
(
struct
vmw_resource
*
res
);
void
(
*
res_free
)
(
struct
vmw_resource
*
res
);
/* TODO is a generic snooper needed? */
#if 0
void (*snoop)(struct vmw_resource *res,
struct ttm_object_file *tfile,
SVGA3dCmdHeader *header);
void *snoop_priv;
#endif
};
struct
vmw_cursor_snooper
{
struct
drm_crtc
*
crtc
;
size_t
age
;
uint32_t
*
image
;
};
struct
vmw_surface
{
struct
vmw_resource
res
;
uint32_t
flags
;
uint32_t
format
;
uint32_t
mip_levels
[
DRM_VMW_MAX_SURFACE_FACES
];
struct
drm_vmw_size
*
sizes
;
uint32_t
num_sizes
;
/* TODO so far just a extra pointer */
struct
vmw_cursor_snooper
snooper
;
};
struct
vmw_fifo_state
{
unsigned
long
reserved_size
;
__le32
*
dynamic_buffer
;
__le32
*
static_buffer
;
__le32
*
last_buffer
;
uint32_t
last_data_size
;
uint32_t
last_buffer_size
;
bool
last_buffer_add
;
unsigned
long
static_buffer_size
;
bool
using_bounce_buffer
;
uint32_t
capabilities
;
struct
rw_semaphore
rwsem
;
};
struct
vmw_relocation
{
SVGAGuestPtr
*
location
;
uint32_t
index
;
};
struct
vmw_sw_context
{
struct
ida
bo_list
;
uint32_t
last_cid
;
bool
cid_valid
;
uint32_t
last_sid
;
bool
sid_valid
;
struct
ttm_object_file
*
tfile
;
struct
list_head
validate_nodes
;
struct
vmw_relocation
relocs
[
VMWGFX_MAX_RELOCATIONS
];
uint32_t
cur_reloc
;
struct
ttm_validate_buffer
val_bufs
[
VMWGFX_MAX_GMRS
];
uint32_t
cur_val_buf
;
};
struct
vmw_legacy_display
;
struct
vmw_overlay
;
struct
vmw_master
{
struct
ttm_lock
lock
;
};
struct
vmw_private
{
struct
ttm_bo_device
bdev
;
struct
ttm_bo_global_ref
bo_global_ref
;
struct
ttm_global_reference
mem_global_ref
;
struct
vmw_fifo_state
fifo
;
struct
drm_device
*
dev
;
unsigned
long
vmw_chipset
;
unsigned
int
io_start
;
uint32_t
vram_start
;
uint32_t
vram_size
;
uint32_t
mmio_start
;
uint32_t
mmio_size
;
uint32_t
fb_max_width
;
uint32_t
fb_max_height
;
__le32
__iomem
*
mmio_virt
;
int
mmio_mtrr
;
uint32_t
capabilities
;
uint32_t
max_gmr_descriptors
;
uint32_t
max_gmr_ids
;
struct
mutex
hw_mutex
;
/*
* VGA registers.
*/
uint32_t
vga_width
;
uint32_t
vga_height
;
uint32_t
vga_depth
;
uint32_t
vga_bpp
;
uint32_t
vga_pseudo
;
uint32_t
vga_red_mask
;
uint32_t
vga_blue_mask
;
uint32_t
vga_green_mask
;
/*
* Framebuffer info.
*/
void
*
fb_info
;
struct
vmw_legacy_display
*
ldu_priv
;
struct
vmw_overlay
*
overlay_priv
;
/*
* Context and surface management.
*/
rwlock_t
resource_lock
;
struct
idr
context_idr
;
struct
idr
surface_idr
;
struct
idr
stream_idr
;
/*
* Block lastclose from racing with firstopen.
*/
struct
mutex
init_mutex
;
/*
* A resource manager for kernel-only surfaces and
* contexts.
*/
struct
ttm_object_device
*
tdev
;
/*
* Fencing and IRQs.
*/
uint32_t
fence_seq
;
wait_queue_head_t
fence_queue
;
wait_queue_head_t
fifo_queue
;
atomic_t
fence_queue_waiters
;
atomic_t
fifo_queue_waiters
;
uint32_t
last_read_sequence
;
spinlock_t
irq_lock
;
/*
* Device state
*/
uint32_t
traces_state
;
uint32_t
enable_state
;
uint32_t
config_done_state
;
/**
* Execbuf
*/
/**
* Protected by the cmdbuf mutex.
*/
struct
vmw_sw_context
ctx
;
uint32_t
val_seq
;
struct
mutex
cmdbuf_mutex
;
/**
* GMR management. Protected by the lru spinlock.
*/
struct
ida
gmr_ida
;
struct
list_head
gmr_lru
;
/**
* Operating mode.
*/
bool
stealth
;
bool
is_opened
;
/**
* Master management.
*/
struct
vmw_master
*
active_master
;
struct
vmw_master
fbdev_master
;
};
static
inline
struct
vmw_private
*
vmw_priv
(
struct
drm_device
*
dev
)
{
return
(
struct
vmw_private
*
)
dev
->
dev_private
;
}
static
inline
struct
vmw_fpriv
*
vmw_fpriv
(
struct
drm_file
*
file_priv
)
{
return
(
struct
vmw_fpriv
*
)
file_priv
->
driver_priv
;
}
static
inline
struct
vmw_master
*
vmw_master
(
struct
drm_master
*
master
)
{
return
(
struct
vmw_master
*
)
master
->
driver_priv
;
}
static
inline
void
vmw_write
(
struct
vmw_private
*
dev_priv
,
unsigned
int
offset
,
uint32_t
value
)
{
outl
(
offset
,
dev_priv
->
io_start
+
VMWGFX_INDEX_PORT
);
outl
(
value
,
dev_priv
->
io_start
+
VMWGFX_VALUE_PORT
);
}
static
inline
uint32_t
vmw_read
(
struct
vmw_private
*
dev_priv
,
unsigned
int
offset
)
{
uint32_t
val
;
outl
(
offset
,
dev_priv
->
io_start
+
VMWGFX_INDEX_PORT
);
val
=
inl
(
dev_priv
->
io_start
+
VMWGFX_VALUE_PORT
);
return
val
;
}
/**
* GMR utilities - vmwgfx_gmr.c
*/
extern
int
vmw_gmr_bind
(
struct
vmw_private
*
dev_priv
,
struct
ttm_buffer_object
*
bo
);
extern
void
vmw_gmr_unbind
(
struct
vmw_private
*
dev_priv
,
int
gmr_id
);
/**
* Resource utilities - vmwgfx_resource.c
*/
extern
struct
vmw_resource
*
vmw_context_alloc
(
struct
vmw_private
*
dev_priv
);
extern
void
vmw_resource_unreference
(
struct
vmw_resource
**
p_res
);
extern
struct
vmw_resource
*
vmw_resource_reference
(
struct
vmw_resource
*
res
);
extern
int
vmw_context_destroy_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
extern
int
vmw_context_define_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
extern
int
vmw_context_check
(
struct
vmw_private
*
dev_priv
,
struct
ttm_object_file
*
tfile
,
int
id
);
extern
void
vmw_surface_res_free
(
struct
vmw_resource
*
res
);
extern
int
vmw_surface_init
(
struct
vmw_private
*
dev_priv
,
struct
vmw_surface
*
srf
,
void
(
*
res_free
)
(
struct
vmw_resource
*
res
));
extern
int
vmw_user_surface_lookup
(
struct
vmw_private
*
dev_priv
,
struct
ttm_object_file
*
tfile
,
int
sid
,
struct
vmw_surface
**
out
);
extern
int
vmw_surface_destroy_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
extern
int
vmw_surface_define_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
extern
int
vmw_surface_reference_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
extern
int
vmw_surface_check
(
struct
vmw_private
*
dev_priv
,
struct
ttm_object_file
*
tfile
,
int
id
);
extern
void
vmw_dmabuf_bo_free
(
struct
ttm_buffer_object
*
bo
);
extern
int
vmw_dmabuf_init
(
struct
vmw_private
*
dev_priv
,
struct
vmw_dma_buffer
*
vmw_bo
,
size_t
size
,
struct
ttm_placement
*
placement
,
bool
interuptable
,
void
(
*
bo_free
)
(
struct
ttm_buffer_object
*
bo
));
extern
int
vmw_dmabuf_alloc_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
extern
int
vmw_dmabuf_unref_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
extern
uint32_t
vmw_dmabuf_validate_node
(
struct
ttm_buffer_object
*
bo
,
uint32_t
cur_validate_node
);
extern
void
vmw_dmabuf_validate_clear
(
struct
ttm_buffer_object
*
bo
);
extern
int
vmw_user_dmabuf_lookup
(
struct
ttm_object_file
*
tfile
,
uint32_t
id
,
struct
vmw_dma_buffer
**
out
);
extern
uint32_t
vmw_dmabuf_gmr
(
struct
ttm_buffer_object
*
bo
);
extern
void
vmw_dmabuf_set_gmr
(
struct
ttm_buffer_object
*
bo
,
uint32_t
id
);
extern
int
vmw_gmr_id_alloc
(
struct
vmw_private
*
dev_priv
,
uint32_t
*
p_id
);
extern
int
vmw_dmabuf_to_start_of_vram
(
struct
vmw_private
*
vmw_priv
,
struct
vmw_dma_buffer
*
bo
);
extern
int
vmw_dmabuf_from_vram
(
struct
vmw_private
*
vmw_priv
,
struct
vmw_dma_buffer
*
bo
);
extern
int
vmw_stream_claim_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
extern
int
vmw_stream_unref_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
extern
int
vmw_user_stream_lookup
(
struct
vmw_private
*
dev_priv
,
struct
ttm_object_file
*
tfile
,
uint32_t
*
inout_id
,
struct
vmw_resource
**
out
);
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
*/
extern
int
vmw_getparam_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
extern
int
vmw_fifo_debug_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
/**
* Fifo utilities - vmwgfx_fifo.c
*/
extern
int
vmw_fifo_init
(
struct
vmw_private
*
dev_priv
,
struct
vmw_fifo_state
*
fifo
);
extern
void
vmw_fifo_release
(
struct
vmw_private
*
dev_priv
,
struct
vmw_fifo_state
*
fifo
);
extern
void
*
vmw_fifo_reserve
(
struct
vmw_private
*
dev_priv
,
uint32_t
bytes
);
extern
void
vmw_fifo_commit
(
struct
vmw_private
*
dev_priv
,
uint32_t
bytes
);
extern
int
vmw_fifo_send_fence
(
struct
vmw_private
*
dev_priv
,
uint32_t
*
sequence
);
extern
void
vmw_fifo_ping_host
(
struct
vmw_private
*
dev_priv
,
uint32_t
reason
);
extern
int
vmw_fifo_mmap
(
struct
file
*
filp
,
struct
vm_area_struct
*
vma
);
/**
* TTM glue - vmwgfx_ttm_glue.c
*/
extern
int
vmw_ttm_global_init
(
struct
vmw_private
*
dev_priv
);
extern
void
vmw_ttm_global_release
(
struct
vmw_private
*
dev_priv
);
extern
int
vmw_mmap
(
struct
file
*
filp
,
struct
vm_area_struct
*
vma
);
/**
* TTM buffer object driver - vmwgfx_buffer.c
*/
extern
struct
ttm_placement
vmw_vram_placement
;
extern
struct
ttm_placement
vmw_vram_ne_placement
;
extern
struct
ttm_placement
vmw_sys_placement
;
extern
struct
ttm_bo_driver
vmw_bo_driver
;
extern
int
vmw_dma_quiescent
(
struct
drm_device
*
dev
);
/**
* Command submission - vmwgfx_execbuf.c
*/
extern
int
vmw_execbuf_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
/**
* IRQs and wating - vmwgfx_irq.c
*/
extern
irqreturn_t
vmw_irq_handler
(
DRM_IRQ_ARGS
);
extern
int
vmw_wait_fence
(
struct
vmw_private
*
dev_priv
,
bool
lazy
,
uint32_t
sequence
,
bool
interruptible
,
unsigned
long
timeout
);
extern
void
vmw_irq_preinstall
(
struct
drm_device
*
dev
);
extern
int
vmw_irq_postinstall
(
struct
drm_device
*
dev
);
extern
void
vmw_irq_uninstall
(
struct
drm_device
*
dev
);
extern
bool
vmw_fence_signaled
(
struct
vmw_private
*
dev_priv
,
uint32_t
sequence
);
extern
int
vmw_fence_wait_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
extern
int
vmw_fallback_wait
(
struct
vmw_private
*
dev_priv
,
bool
lazy
,
bool
fifo_idle
,
uint32_t
sequence
,
bool
interruptible
,
unsigned
long
timeout
);
/**
* Kernel framebuffer - vmwgfx_fb.c
*/
int
vmw_fb_init
(
struct
vmw_private
*
vmw_priv
);
int
vmw_fb_close
(
struct
vmw_private
*
dev_priv
);
int
vmw_fb_off
(
struct
vmw_private
*
vmw_priv
);
int
vmw_fb_on
(
struct
vmw_private
*
vmw_priv
);
/**
* Kernel modesetting - vmwgfx_kms.c
*/
int
vmw_kms_init
(
struct
vmw_private
*
dev_priv
);
int
vmw_kms_close
(
struct
vmw_private
*
dev_priv
);
int
vmw_kms_save_vga
(
struct
vmw_private
*
vmw_priv
);
int
vmw_kms_restore_vga
(
struct
vmw_private
*
vmw_priv
);
int
vmw_kms_cursor_bypass_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
void
vmw_kms_cursor_post_execbuf
(
struct
vmw_private
*
dev_priv
);
void
vmw_kms_cursor_snoop
(
struct
vmw_surface
*
srf
,
struct
ttm_object_file
*
tfile
,
struct
ttm_buffer_object
*
bo
,
SVGA3dCmdHeader
*
header
);
/**
* Overlay control - vmwgfx_overlay.c
*/
int
vmw_overlay_init
(
struct
vmw_private
*
dev_priv
);
int
vmw_overlay_close
(
struct
vmw_private
*
dev_priv
);
int
vmw_overlay_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
);
int
vmw_overlay_stop_all
(
struct
vmw_private
*
dev_priv
);
int
vmw_overlay_resume_all
(
struct
vmw_private
*
dev_priv
);
int
vmw_overlay_pause_all
(
struct
vmw_private
*
dev_priv
);
int
vmw_overlay_claim
(
struct
vmw_private
*
dev_priv
,
uint32_t
*
out
);
int
vmw_overlay_unref
(
struct
vmw_private
*
dev_priv
,
uint32_t
stream_id
);
int
vmw_overlay_num_overlays
(
struct
vmw_private
*
dev_priv
);
int
vmw_overlay_num_free_overlays
(
struct
vmw_private
*
dev_priv
);
/**
* Inline helper functions
*/
static
inline
void
vmw_surface_unreference
(
struct
vmw_surface
**
srf
)
{
struct
vmw_surface
*
tmp_srf
=
*
srf
;
struct
vmw_resource
*
res
=
&
tmp_srf
->
res
;
*
srf
=
NULL
;
vmw_resource_unreference
(
&
res
);
}
static
inline
struct
vmw_surface
*
vmw_surface_reference
(
struct
vmw_surface
*
srf
)
{
(
void
)
vmw_resource_reference
(
&
srf
->
res
);
return
srf
;
}
static
inline
void
vmw_dmabuf_unreference
(
struct
vmw_dma_buffer
**
buf
)
{
struct
vmw_dma_buffer
*
tmp_buf
=
*
buf
;
struct
ttm_buffer_object
*
bo
=
&
tmp_buf
->
base
;
*
buf
=
NULL
;
ttm_bo_unref
(
&
bo
);
}
static
inline
struct
vmw_dma_buffer
*
vmw_dmabuf_reference
(
struct
vmw_dma_buffer
*
buf
)
{
if
(
ttm_bo_reference
(
&
buf
->
base
))
return
buf
;
return
NULL
;
}
#endif
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "vmwgfx_reg.h"
#include "ttm/ttm_bo_api.h"
#include "ttm/ttm_placement.h"
static
int
vmw_cmd_invalid
(
struct
vmw_private
*
dev_priv
,
struct
vmw_sw_context
*
sw_context
,
SVGA3dCmdHeader
*
header
)
{
return
capable
(
CAP_SYS_ADMIN
)
?
:
-
EINVAL
;
}
static
int
vmw_cmd_ok
(
struct
vmw_private
*
dev_priv
,
struct
vmw_sw_context
*
sw_context
,
SVGA3dCmdHeader
*
header
)
{
return
0
;
}
static
int
vmw_cmd_cid_check
(
struct
vmw_private
*
dev_priv
,
struct
vmw_sw_context
*
sw_context
,
SVGA3dCmdHeader
*
header
)
{
struct
vmw_cid_cmd
{
SVGA3dCmdHeader
header
;
__le32
cid
;
}
*
cmd
;
int
ret
;
cmd
=
container_of
(
header
,
struct
vmw_cid_cmd
,
header
);
if
(
likely
(
sw_context
->
cid_valid
&&
cmd
->
cid
==
sw_context
->
last_cid
))
return
0
;
ret
=
vmw_context_check
(
dev_priv
,
sw_context
->
tfile
,
cmd
->
cid
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"Could not find or use context %u
\n
"
,
(
unsigned
)
cmd
->
cid
);
return
ret
;
}
sw_context
->
last_cid
=
cmd
->
cid
;
sw_context
->
cid_valid
=
true
;
return
0
;
}
static
int
vmw_cmd_sid_check
(
struct
vmw_private
*
dev_priv
,
struct
vmw_sw_context
*
sw_context
,
uint32_t
sid
)
{
if
(
unlikely
((
!
sw_context
->
sid_valid
||
sid
!=
sw_context
->
last_sid
)
&&
sid
!=
SVGA3D_INVALID_ID
))
{
int
ret
=
vmw_surface_check
(
dev_priv
,
sw_context
->
tfile
,
sid
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"Could ot find or use surface %u
\n
"
,
(
unsigned
)
sid
);
return
ret
;
}
sw_context
->
last_sid
=
sid
;
sw_context
->
sid_valid
=
true
;
}
return
0
;
}
static
int
vmw_cmd_set_render_target_check
(
struct
vmw_private
*
dev_priv
,
struct
vmw_sw_context
*
sw_context
,
SVGA3dCmdHeader
*
header
)
{
struct
vmw_sid_cmd
{
SVGA3dCmdHeader
header
;
SVGA3dCmdSetRenderTarget
body
;
}
*
cmd
;
int
ret
;
ret
=
vmw_cmd_cid_check
(
dev_priv
,
sw_context
,
header
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
cmd
=
container_of
(
header
,
struct
vmw_sid_cmd
,
header
);
return
vmw_cmd_sid_check
(
dev_priv
,
sw_context
,
cmd
->
body
.
target
.
sid
);
}
static
int
vmw_cmd_surface_copy_check
(
struct
vmw_private
*
dev_priv
,
struct
vmw_sw_context
*
sw_context
,
SVGA3dCmdHeader
*
header
)
{
struct
vmw_sid_cmd
{
SVGA3dCmdHeader
header
;
SVGA3dCmdSurfaceCopy
body
;
}
*
cmd
;
int
ret
;
cmd
=
container_of
(
header
,
struct
vmw_sid_cmd
,
header
);
ret
=
vmw_cmd_sid_check
(
dev_priv
,
sw_context
,
cmd
->
body
.
src
.
sid
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
return
vmw_cmd_sid_check
(
dev_priv
,
sw_context
,
cmd
->
body
.
dest
.
sid
);
}
static
int
vmw_cmd_stretch_blt_check
(
struct
vmw_private
*
dev_priv
,
struct
vmw_sw_context
*
sw_context
,
SVGA3dCmdHeader
*
header
)
{
struct
vmw_sid_cmd
{
SVGA3dCmdHeader
header
;
SVGA3dCmdSurfaceStretchBlt
body
;
}
*
cmd
;
int
ret
;
cmd
=
container_of
(
header
,
struct
vmw_sid_cmd
,
header
);
ret
=
vmw_cmd_sid_check
(
dev_priv
,
sw_context
,
cmd
->
body
.
src
.
sid
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
return
vmw_cmd_sid_check
(
dev_priv
,
sw_context
,
cmd
->
body
.
dest
.
sid
);
}
static
int
vmw_cmd_blt_surf_screen_check
(
struct
vmw_private
*
dev_priv
,
struct
vmw_sw_context
*
sw_context
,
SVGA3dCmdHeader
*
header
)
{
struct
vmw_sid_cmd
{
SVGA3dCmdHeader
header
;
SVGA3dCmdBlitSurfaceToScreen
body
;
}
*
cmd
;
cmd
=
container_of
(
header
,
struct
vmw_sid_cmd
,
header
);
return
vmw_cmd_sid_check
(
dev_priv
,
sw_context
,
cmd
->
body
.
srcImage
.
sid
);
}
static
int
vmw_cmd_present_check
(
struct
vmw_private
*
dev_priv
,
struct
vmw_sw_context
*
sw_context
,
SVGA3dCmdHeader
*
header
)
{
struct
vmw_sid_cmd
{
SVGA3dCmdHeader
header
;
SVGA3dCmdPresent
body
;
}
*
cmd
;
cmd
=
container_of
(
header
,
struct
vmw_sid_cmd
,
header
);
return
vmw_cmd_sid_check
(
dev_priv
,
sw_context
,
cmd
->
body
.
sid
);
}
static
int
vmw_cmd_dma
(
struct
vmw_private
*
dev_priv
,
struct
vmw_sw_context
*
sw_context
,
SVGA3dCmdHeader
*
header
)
{
uint32_t
handle
;
struct
vmw_dma_buffer
*
vmw_bo
=
NULL
;
struct
ttm_buffer_object
*
bo
;
struct
vmw_surface
*
srf
=
NULL
;
struct
vmw_dma_cmd
{
SVGA3dCmdHeader
header
;
SVGA3dCmdSurfaceDMA
dma
;
}
*
cmd
;
struct
vmw_relocation
*
reloc
;
int
ret
;
uint32_t
cur_validate_node
;
struct
ttm_validate_buffer
*
val_buf
;
cmd
=
container_of
(
header
,
struct
vmw_dma_cmd
,
header
);
ret
=
vmw_cmd_sid_check
(
dev_priv
,
sw_context
,
cmd
->
dma
.
host
.
sid
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
handle
=
cmd
->
dma
.
guest
.
ptr
.
gmrId
;
ret
=
vmw_user_dmabuf_lookup
(
sw_context
->
tfile
,
handle
,
&
vmw_bo
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"Could not find or use GMR region.
\n
"
);
return
-
EINVAL
;
}
bo
=
&
vmw_bo
->
base
;
if
(
unlikely
(
sw_context
->
cur_reloc
>=
VMWGFX_MAX_RELOCATIONS
))
{
DRM_ERROR
(
"Max number of DMA commands per submission"
" exceeded
\n
"
);
ret
=
-
EINVAL
;
goto
out_no_reloc
;
}
reloc
=
&
sw_context
->
relocs
[
sw_context
->
cur_reloc
++
];
reloc
->
location
=
&
cmd
->
dma
.
guest
.
ptr
;
cur_validate_node
=
vmw_dmabuf_validate_node
(
bo
,
sw_context
->
cur_val_buf
);
if
(
unlikely
(
cur_validate_node
>=
VMWGFX_MAX_GMRS
))
{
DRM_ERROR
(
"Max number of DMA buffers per submission"
" exceeded.
\n
"
);
ret
=
-
EINVAL
;
goto
out_no_reloc
;
}
reloc
->
index
=
cur_validate_node
;
if
(
unlikely
(
cur_validate_node
==
sw_context
->
cur_val_buf
))
{
val_buf
=
&
sw_context
->
val_bufs
[
cur_validate_node
];
val_buf
->
bo
=
ttm_bo_reference
(
bo
);
val_buf
->
new_sync_obj_arg
=
(
void
*
)
dev_priv
;
list_add_tail
(
&
val_buf
->
head
,
&
sw_context
->
validate_nodes
);
++
sw_context
->
cur_val_buf
;
}
ret
=
vmw_user_surface_lookup
(
dev_priv
,
sw_context
->
tfile
,
cmd
->
dma
.
host
.
sid
,
&
srf
);
if
(
ret
)
{
DRM_ERROR
(
"could not find surface
\n
"
);
goto
out_no_reloc
;
}
vmw_kms_cursor_snoop
(
srf
,
sw_context
->
tfile
,
bo
,
header
);
vmw_surface_unreference
(
&
srf
);
out_no_reloc:
vmw_dmabuf_unreference
(
&
vmw_bo
);
return
ret
;
}
typedef
int
(
*
vmw_cmd_func
)
(
struct
vmw_private
*
,
struct
vmw_sw_context
*
,
SVGA3dCmdHeader
*
);
#define VMW_CMD_DEF(cmd, func) \
[cmd - SVGA_3D_CMD_BASE] = func
static
vmw_cmd_func
vmw_cmd_funcs
[
SVGA_3D_CMD_MAX
]
=
{
VMW_CMD_DEF
(
SVGA_3D_CMD_SURFACE_DEFINE
,
&
vmw_cmd_invalid
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SURFACE_DESTROY
,
&
vmw_cmd_invalid
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SURFACE_COPY
,
&
vmw_cmd_surface_copy_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SURFACE_STRETCHBLT
,
&
vmw_cmd_stretch_blt_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SURFACE_DMA
,
&
vmw_cmd_dma
),
VMW_CMD_DEF
(
SVGA_3D_CMD_CONTEXT_DEFINE
,
&
vmw_cmd_invalid
),
VMW_CMD_DEF
(
SVGA_3D_CMD_CONTEXT_DESTROY
,
&
vmw_cmd_invalid
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SETTRANSFORM
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SETZRANGE
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SETRENDERSTATE
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SETRENDERTARGET
,
&
vmw_cmd_set_render_target_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SETTEXTURESTATE
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SETMATERIAL
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SETLIGHTDATA
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SETLIGHTENABLED
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SETVIEWPORT
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SETCLIPPLANE
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_CLEAR
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_PRESENT
,
&
vmw_cmd_present_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SHADER_DEFINE
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SHADER_DESTROY
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SET_SHADER
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SET_SHADER_CONST
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_DRAW_PRIMITIVES
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_SETSCISSORRECT
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_BEGIN_QUERY
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_END_QUERY
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_WAIT_FOR_QUERY
,
&
vmw_cmd_cid_check
),
VMW_CMD_DEF
(
SVGA_3D_CMD_PRESENT_READBACK
,
&
vmw_cmd_ok
),
VMW_CMD_DEF
(
SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN
,
&
vmw_cmd_blt_surf_screen_check
)
};
static
int
vmw_cmd_check
(
struct
vmw_private
*
dev_priv
,
struct
vmw_sw_context
*
sw_context
,
void
*
buf
,
uint32_t
*
size
)
{
uint32_t
cmd_id
;
SVGA3dCmdHeader
*
header
=
(
SVGA3dCmdHeader
*
)
buf
;
int
ret
;
cmd_id
=
((
uint32_t
*
)
buf
)[
0
];
if
(
cmd_id
==
SVGA_CMD_UPDATE
)
{
*
size
=
5
<<
2
;
return
0
;
}
cmd_id
=
le32_to_cpu
(
header
->
id
);
*
size
=
le32_to_cpu
(
header
->
size
)
+
sizeof
(
SVGA3dCmdHeader
);
cmd_id
-=
SVGA_3D_CMD_BASE
;
if
(
unlikely
(
cmd_id
>=
SVGA_3D_CMD_MAX
-
SVGA_3D_CMD_BASE
))
goto
out_err
;
ret
=
vmw_cmd_funcs
[
cmd_id
](
dev_priv
,
sw_context
,
header
);
if
(
unlikely
(
ret
!=
0
))
goto
out_err
;
return
0
;
out_err:
DRM_ERROR
(
"Illegal / Invalid SVGA3D command: %d
\n
"
,
cmd_id
+
SVGA_3D_CMD_BASE
);
return
-
EINVAL
;
}
static
int
vmw_cmd_check_all
(
struct
vmw_private
*
dev_priv
,
struct
vmw_sw_context
*
sw_context
,
void
*
buf
,
uint32_t
size
)
{
int32_t
cur_size
=
size
;
int
ret
;
while
(
cur_size
>
0
)
{
ret
=
vmw_cmd_check
(
dev_priv
,
sw_context
,
buf
,
&
size
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
buf
=
(
void
*
)((
unsigned
long
)
buf
+
size
);
cur_size
-=
size
;
}
if
(
unlikely
(
cur_size
!=
0
))
{
DRM_ERROR
(
"Command verifier out of sync.
\n
"
);
return
-
EINVAL
;
}
return
0
;
}
static
void
vmw_free_relocations
(
struct
vmw_sw_context
*
sw_context
)
{
sw_context
->
cur_reloc
=
0
;
}
static
void
vmw_apply_relocations
(
struct
vmw_sw_context
*
sw_context
)
{
uint32_t
i
;
struct
vmw_relocation
*
reloc
;
struct
ttm_validate_buffer
*
validate
;
struct
ttm_buffer_object
*
bo
;
for
(
i
=
0
;
i
<
sw_context
->
cur_reloc
;
++
i
)
{
reloc
=
&
sw_context
->
relocs
[
i
];
validate
=
&
sw_context
->
val_bufs
[
reloc
->
index
];
bo
=
validate
->
bo
;
reloc
->
location
->
offset
+=
bo
->
offset
;
reloc
->
location
->
gmrId
=
vmw_dmabuf_gmr
(
bo
);
}
vmw_free_relocations
(
sw_context
);
}
static
void
vmw_clear_validations
(
struct
vmw_sw_context
*
sw_context
)
{
struct
ttm_validate_buffer
*
entry
,
*
next
;
list_for_each_entry_safe
(
entry
,
next
,
&
sw_context
->
validate_nodes
,
head
)
{
list_del
(
&
entry
->
head
);
vmw_dmabuf_validate_clear
(
entry
->
bo
);
ttm_bo_unref
(
&
entry
->
bo
);
sw_context
->
cur_val_buf
--
;
}
BUG_ON
(
sw_context
->
cur_val_buf
!=
0
);
}
static
int
vmw_validate_single_buffer
(
struct
vmw_private
*
dev_priv
,
struct
ttm_buffer_object
*
bo
)
{
int
ret
;
if
(
vmw_dmabuf_gmr
(
bo
)
!=
SVGA_GMR_NULL
)
return
0
;
ret
=
vmw_gmr_bind
(
dev_priv
,
bo
);
if
(
likely
(
ret
==
0
||
ret
==
-
ERESTART
))
return
ret
;
ret
=
ttm_bo_validate
(
bo
,
&
vmw_vram_placement
,
true
,
false
);
return
ret
;
}
static
int
vmw_validate_buffers
(
struct
vmw_private
*
dev_priv
,
struct
vmw_sw_context
*
sw_context
)
{
struct
ttm_validate_buffer
*
entry
;
int
ret
;
list_for_each_entry
(
entry
,
&
sw_context
->
validate_nodes
,
head
)
{
ret
=
vmw_validate_single_buffer
(
dev_priv
,
entry
->
bo
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
}
return
0
;
}
int
vmw_execbuf_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
drm_vmw_execbuf_arg
*
arg
=
(
struct
drm_vmw_execbuf_arg
*
)
data
;
struct
drm_vmw_fence_rep
fence_rep
;
struct
drm_vmw_fence_rep
__user
*
user_fence_rep
;
int
ret
;
void
*
user_cmd
;
void
*
cmd
;
uint32_t
sequence
;
struct
vmw_sw_context
*
sw_context
=
&
dev_priv
->
ctx
;
struct
vmw_master
*
vmaster
=
vmw_master
(
file_priv
->
master
);
ret
=
ttm_read_lock
(
&
vmaster
->
lock
,
true
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
ret
=
mutex_lock_interruptible
(
&
dev_priv
->
cmdbuf_mutex
);
if
(
unlikely
(
ret
!=
0
))
{
ret
=
-
ERESTART
;
goto
out_no_cmd_mutex
;
}
cmd
=
vmw_fifo_reserve
(
dev_priv
,
arg
->
command_size
);
if
(
unlikely
(
cmd
==
NULL
))
{
DRM_ERROR
(
"Failed reserving fifo space for commands.
\n
"
);
ret
=
-
ENOMEM
;
goto
out_unlock
;
}
user_cmd
=
(
void
__user
*
)(
unsigned
long
)
arg
->
commands
;
ret
=
copy_from_user
(
cmd
,
user_cmd
,
arg
->
command_size
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"Failed copying commands.
\n
"
);
goto
out_commit
;
}
sw_context
->
tfile
=
vmw_fpriv
(
file_priv
)
->
tfile
;
sw_context
->
cid_valid
=
false
;
sw_context
->
sid_valid
=
false
;
sw_context
->
cur_reloc
=
0
;
sw_context
->
cur_val_buf
=
0
;
INIT_LIST_HEAD
(
&
sw_context
->
validate_nodes
);
ret
=
vmw_cmd_check_all
(
dev_priv
,
sw_context
,
cmd
,
arg
->
command_size
);
if
(
unlikely
(
ret
!=
0
))
goto
out_err
;
ret
=
ttm_eu_reserve_buffers
(
&
sw_context
->
validate_nodes
,
dev_priv
->
val_seq
++
);
if
(
unlikely
(
ret
!=
0
))
goto
out_err
;
ret
=
vmw_validate_buffers
(
dev_priv
,
sw_context
);
if
(
unlikely
(
ret
!=
0
))
goto
out_err
;
vmw_apply_relocations
(
sw_context
);
vmw_fifo_commit
(
dev_priv
,
arg
->
command_size
);
ret
=
vmw_fifo_send_fence
(
dev_priv
,
&
sequence
);
ttm_eu_fence_buffer_objects
(
&
sw_context
->
validate_nodes
,
(
void
*
)(
unsigned
long
)
sequence
);
vmw_clear_validations
(
sw_context
);
mutex_unlock
(
&
dev_priv
->
cmdbuf_mutex
);
/*
* This error is harmless, because if fence submission fails,
* vmw_fifo_send_fence will sync.
*/
if
(
ret
!=
0
)
DRM_ERROR
(
"Fence submission error. Syncing.
\n
"
);
fence_rep
.
error
=
ret
;
fence_rep
.
fence_seq
=
(
uint64_t
)
sequence
;
user_fence_rep
=
(
struct
drm_vmw_fence_rep
__user
*
)
(
unsigned
long
)
arg
->
fence_rep
;
/*
* copy_to_user errors will be detected by user space not
* seeing fence_rep::error filled in.
*/
ret
=
copy_to_user
(
user_fence_rep
,
&
fence_rep
,
sizeof
(
fence_rep
));
vmw_kms_cursor_post_execbuf
(
dev_priv
);
ttm_read_unlock
(
&
vmaster
->
lock
);
return
0
;
out_err:
vmw_free_relocations
(
sw_context
);
ttm_eu_backoff_reservation
(
&
sw_context
->
validate_nodes
);
vmw_clear_validations
(
sw_context
);
out_commit:
vmw_fifo_commit
(
dev_priv
,
0
);
out_unlock:
mutex_unlock
(
&
dev_priv
->
cmdbuf_mutex
);
out_no_cmd_mutex:
ttm_read_unlock
(
&
vmaster
->
lock
);
return
ret
;
}
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2007 David Airlie
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "drmP.h"
#include "vmwgfx_drv.h"
#include "ttm/ttm_placement.h"
#define VMW_DIRTY_DELAY (HZ / 30)
struct
vmw_fb_par
{
struct
vmw_private
*
vmw_priv
;
void
*
vmalloc
;
struct
vmw_dma_buffer
*
vmw_bo
;
struct
ttm_bo_kmap_obj
map
;
u32
pseudo_palette
[
17
];
unsigned
depth
;
unsigned
bpp
;
unsigned
max_width
;
unsigned
max_height
;
void
*
bo_ptr
;
unsigned
bo_size
;
bool
bo_iowrite
;
struct
{
spinlock_t
lock
;
bool
active
;
unsigned
x1
;
unsigned
y1
;
unsigned
x2
;
unsigned
y2
;
}
dirty
;
};
static
int
vmw_fb_setcolreg
(
unsigned
regno
,
unsigned
red
,
unsigned
green
,
unsigned
blue
,
unsigned
transp
,
struct
fb_info
*
info
)
{
struct
vmw_fb_par
*
par
=
info
->
par
;
u32
*
pal
=
par
->
pseudo_palette
;
if
(
regno
>
15
)
{
DRM_ERROR
(
"Bad regno %u.
\n
"
,
regno
);
return
1
;
}
switch
(
par
->
depth
)
{
case
24
:
case
32
:
pal
[
regno
]
=
((
red
&
0xff00
)
<<
8
)
|
(
green
&
0xff00
)
|
((
blue
&
0xff00
)
>>
8
);
break
;
default:
DRM_ERROR
(
"Bad depth %u, bpp %u.
\n
"
,
par
->
depth
,
par
->
bpp
);
return
1
;
}
return
0
;
}
static
int
vmw_fb_check_var
(
struct
fb_var_screeninfo
*
var
,
struct
fb_info
*
info
)
{
int
depth
=
var
->
bits_per_pixel
;
struct
vmw_fb_par
*
par
=
info
->
par
;
struct
vmw_private
*
vmw_priv
=
par
->
vmw_priv
;
switch
(
var
->
bits_per_pixel
)
{
case
32
:
depth
=
(
var
->
transp
.
length
>
0
)
?
32
:
24
;
break
;
default:
DRM_ERROR
(
"Bad bpp %u.
\n
"
,
var
->
bits_per_pixel
);
return
-
EINVAL
;
}
switch
(
depth
)
{
case
24
:
var
->
red
.
offset
=
16
;
var
->
green
.
offset
=
8
;
var
->
blue
.
offset
=
0
;
var
->
red
.
length
=
8
;
var
->
green
.
length
=
8
;
var
->
blue
.
length
=
8
;
var
->
transp
.
length
=
0
;
var
->
transp
.
offset
=
0
;
break
;
case
32
:
var
->
red
.
offset
=
16
;
var
->
green
.
offset
=
8
;
var
->
blue
.
offset
=
0
;
var
->
red
.
length
=
8
;
var
->
green
.
length
=
8
;
var
->
blue
.
length
=
8
;
var
->
transp
.
length
=
8
;
var
->
transp
.
offset
=
24
;
break
;
default:
DRM_ERROR
(
"Bad depth %u.
\n
"
,
depth
);
return
-
EINVAL
;
}
/* without multimon its hard to resize */
if
(
!
(
vmw_priv
->
capabilities
&
SVGA_CAP_MULTIMON
)
&&
(
var
->
xres
!=
par
->
max_width
||
var
->
yres
!=
par
->
max_height
))
{
DRM_ERROR
(
"Tried to resize, but we don't have multimon
\n
"
);
return
-
EINVAL
;
}
if
(
var
->
xres
>
par
->
max_width
||
var
->
yres
>
par
->
max_height
)
{
DRM_ERROR
(
"Requested geom can not fit in framebuffer
\n
"
);
return
-
EINVAL
;
}
return
0
;
}
static
int
vmw_fb_set_par
(
struct
fb_info
*
info
)
{
struct
vmw_fb_par
*
par
=
info
->
par
;
struct
vmw_private
*
vmw_priv
=
par
->
vmw_priv
;
if
(
vmw_priv
->
capabilities
&
SVGA_CAP_MULTIMON
)
{
vmw_write
(
vmw_priv
,
SVGA_REG_NUM_GUEST_DISPLAYS
,
1
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_ID
,
0
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_IS_PRIMARY
,
true
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_POSITION_X
,
0
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_POSITION_Y
,
0
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_WIDTH
,
0
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_HEIGHT
,
0
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_ID
,
SVGA_ID_INVALID
);
vmw_write
(
vmw_priv
,
SVGA_REG_ENABLE
,
1
);
vmw_write
(
vmw_priv
,
SVGA_REG_WIDTH
,
par
->
max_width
);
vmw_write
(
vmw_priv
,
SVGA_REG_HEIGHT
,
par
->
max_height
);
vmw_write
(
vmw_priv
,
SVGA_REG_BITS_PER_PIXEL
,
par
->
bpp
);
vmw_write
(
vmw_priv
,
SVGA_REG_DEPTH
,
par
->
depth
);
vmw_write
(
vmw_priv
,
SVGA_REG_RED_MASK
,
0x00ff0000
);
vmw_write
(
vmw_priv
,
SVGA_REG_GREEN_MASK
,
0x0000ff00
);
vmw_write
(
vmw_priv
,
SVGA_REG_BLUE_MASK
,
0x000000ff
);
/* TODO check if pitch and offset changes */
vmw_write
(
vmw_priv
,
SVGA_REG_NUM_GUEST_DISPLAYS
,
1
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_ID
,
0
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_IS_PRIMARY
,
true
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_POSITION_X
,
info
->
var
.
xoffset
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_POSITION_Y
,
info
->
var
.
yoffset
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_WIDTH
,
info
->
var
.
xres
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_HEIGHT
,
info
->
var
.
yres
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_ID
,
SVGA_ID_INVALID
);
}
else
{
vmw_write
(
vmw_priv
,
SVGA_REG_WIDTH
,
info
->
var
.
xres
);
vmw_write
(
vmw_priv
,
SVGA_REG_HEIGHT
,
info
->
var
.
yres
);
/* TODO check if pitch and offset changes */
}
return
0
;
}
static
int
vmw_fb_pan_display
(
struct
fb_var_screeninfo
*
var
,
struct
fb_info
*
info
)
{
return
0
;
}
static
int
vmw_fb_blank
(
int
blank
,
struct
fb_info
*
info
)
{
return
0
;
}
/*
* Dirty code
*/
static
void
vmw_fb_dirty_flush
(
struct
vmw_fb_par
*
par
)
{
struct
vmw_private
*
vmw_priv
=
par
->
vmw_priv
;
struct
fb_info
*
info
=
vmw_priv
->
fb_info
;
int
stride
=
(
info
->
fix
.
line_length
/
4
);
int
*
src
=
(
int
*
)
info
->
screen_base
;
__le32
__iomem
*
vram_mem
=
par
->
bo_ptr
;
unsigned
long
flags
;
unsigned
x
,
y
,
w
,
h
;
int
i
,
k
;
struct
{
uint32_t
header
;
SVGAFifoCmdUpdate
body
;
}
*
cmd
;
spin_lock_irqsave
(
&
par
->
dirty
.
lock
,
flags
);
if
(
!
par
->
dirty
.
active
)
{
spin_unlock_irqrestore
(
&
par
->
dirty
.
lock
,
flags
);
return
;
}
x
=
par
->
dirty
.
x1
;
y
=
par
->
dirty
.
y1
;
w
=
min
(
par
->
dirty
.
x2
,
info
->
var
.
xres
)
-
x
;
h
=
min
(
par
->
dirty
.
y2
,
info
->
var
.
yres
)
-
y
;
par
->
dirty
.
x1
=
par
->
dirty
.
x2
=
0
;
par
->
dirty
.
y1
=
par
->
dirty
.
y2
=
0
;
spin_unlock_irqrestore
(
&
par
->
dirty
.
lock
,
flags
);
for
(
i
=
y
*
stride
;
i
<
info
->
fix
.
smem_len
/
4
;
i
+=
stride
)
{
for
(
k
=
i
+
x
;
k
<
i
+
x
+
w
&&
k
<
info
->
fix
.
smem_len
/
4
;
k
++
)
iowrite32
(
src
[
k
],
vram_mem
+
k
);
}
#if 0
DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
#endif
cmd
=
vmw_fifo_reserve
(
vmw_priv
,
sizeof
(
*
cmd
));
if
(
unlikely
(
cmd
==
NULL
))
{
DRM_ERROR
(
"Fifo reserve failed.
\n
"
);
return
;
}
cmd
->
header
=
cpu_to_le32
(
SVGA_CMD_UPDATE
);
cmd
->
body
.
x
=
cpu_to_le32
(
x
);
cmd
->
body
.
y
=
cpu_to_le32
(
y
);
cmd
->
body
.
width
=
cpu_to_le32
(
w
);
cmd
->
body
.
height
=
cpu_to_le32
(
h
);
vmw_fifo_commit
(
vmw_priv
,
sizeof
(
*
cmd
));
}
static
void
vmw_fb_dirty_mark
(
struct
vmw_fb_par
*
par
,
unsigned
x1
,
unsigned
y1
,
unsigned
width
,
unsigned
height
)
{
struct
fb_info
*
info
=
par
->
vmw_priv
->
fb_info
;
unsigned
long
flags
;
unsigned
x2
=
x1
+
width
;
unsigned
y2
=
y1
+
height
;
spin_lock_irqsave
(
&
par
->
dirty
.
lock
,
flags
);
if
(
par
->
dirty
.
x1
==
par
->
dirty
.
x2
)
{
par
->
dirty
.
x1
=
x1
;
par
->
dirty
.
y1
=
y1
;
par
->
dirty
.
x2
=
x2
;
par
->
dirty
.
y2
=
y2
;
/* if we are active start the dirty work
* we share the work with the defio system */
if
(
par
->
dirty
.
active
)
schedule_delayed_work
(
&
info
->
deferred_work
,
VMW_DIRTY_DELAY
);
}
else
{
if
(
x1
<
par
->
dirty
.
x1
)
par
->
dirty
.
x1
=
x1
;
if
(
y1
<
par
->
dirty
.
y1
)
par
->
dirty
.
y1
=
y1
;
if
(
x2
>
par
->
dirty
.
x2
)
par
->
dirty
.
x2
=
x2
;
if
(
y2
>
par
->
dirty
.
y2
)
par
->
dirty
.
y2
=
y2
;
}
spin_unlock_irqrestore
(
&
par
->
dirty
.
lock
,
flags
);
}
static
void
vmw_deferred_io
(
struct
fb_info
*
info
,
struct
list_head
*
pagelist
)
{
struct
vmw_fb_par
*
par
=
info
->
par
;
unsigned
long
start
,
end
,
min
,
max
;
unsigned
long
flags
;
struct
page
*
page
;
int
y1
,
y2
;
min
=
ULONG_MAX
;
max
=
0
;
list_for_each_entry
(
page
,
pagelist
,
lru
)
{
start
=
page
->
index
<<
PAGE_SHIFT
;
end
=
start
+
PAGE_SIZE
-
1
;
min
=
min
(
min
,
start
);
max
=
max
(
max
,
end
);
}
if
(
min
<
max
)
{
y1
=
min
/
info
->
fix
.
line_length
;
y2
=
(
max
/
info
->
fix
.
line_length
)
+
1
;
spin_lock_irqsave
(
&
par
->
dirty
.
lock
,
flags
);
par
->
dirty
.
x1
=
0
;
par
->
dirty
.
y1
=
y1
;
par
->
dirty
.
x2
=
info
->
var
.
xres
;
par
->
dirty
.
y2
=
y2
;
spin_unlock_irqrestore
(
&
par
->
dirty
.
lock
,
flags
);
}
vmw_fb_dirty_flush
(
par
);
};
struct
fb_deferred_io
vmw_defio
=
{
.
delay
=
VMW_DIRTY_DELAY
,
.
deferred_io
=
vmw_deferred_io
,
};
/*
* Draw code
*/
static
void
vmw_fb_fillrect
(
struct
fb_info
*
info
,
const
struct
fb_fillrect
*
rect
)
{
cfb_fillrect
(
info
,
rect
);
vmw_fb_dirty_mark
(
info
->
par
,
rect
->
dx
,
rect
->
dy
,
rect
->
width
,
rect
->
height
);
}
static
void
vmw_fb_copyarea
(
struct
fb_info
*
info
,
const
struct
fb_copyarea
*
region
)
{
cfb_copyarea
(
info
,
region
);
vmw_fb_dirty_mark
(
info
->
par
,
region
->
dx
,
region
->
dy
,
region
->
width
,
region
->
height
);
}
static
void
vmw_fb_imageblit
(
struct
fb_info
*
info
,
const
struct
fb_image
*
image
)
{
cfb_imageblit
(
info
,
image
);
vmw_fb_dirty_mark
(
info
->
par
,
image
->
dx
,
image
->
dy
,
image
->
width
,
image
->
height
);
}
/*
* Bring up code
*/
static
struct
fb_ops
vmw_fb_ops
=
{
.
owner
=
THIS_MODULE
,
.
fb_check_var
=
vmw_fb_check_var
,
.
fb_set_par
=
vmw_fb_set_par
,
.
fb_setcolreg
=
vmw_fb_setcolreg
,
.
fb_fillrect
=
vmw_fb_fillrect
,
.
fb_copyarea
=
vmw_fb_copyarea
,
.
fb_imageblit
=
vmw_fb_imageblit
,
.
fb_pan_display
=
vmw_fb_pan_display
,
.
fb_blank
=
vmw_fb_blank
,
};
static
int
vmw_fb_create_bo
(
struct
vmw_private
*
vmw_priv
,
size_t
size
,
struct
vmw_dma_buffer
**
out
)
{
struct
vmw_dma_buffer
*
vmw_bo
;
struct
ttm_placement
ne_placement
=
vmw_vram_ne_placement
;
int
ret
;
ne_placement
.
lpfn
=
(
size
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
/* interuptable? */
ret
=
ttm_write_lock
(
&
vmw_priv
->
fbdev_master
.
lock
,
false
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
vmw_bo
=
kmalloc
(
sizeof
(
*
vmw_bo
),
GFP_KERNEL
);
if
(
!
vmw_bo
)
goto
err_unlock
;
ret
=
vmw_dmabuf_init
(
vmw_priv
,
vmw_bo
,
size
,
&
ne_placement
,
false
,
&
vmw_dmabuf_bo_free
);
if
(
unlikely
(
ret
!=
0
))
goto
err_unlock
;
/* init frees the buffer on failure */
*
out
=
vmw_bo
;
ttm_write_unlock
(
&
vmw_priv
->
fbdev_master
.
lock
);
return
0
;
err_unlock:
ttm_write_unlock
(
&
vmw_priv
->
fbdev_master
.
lock
);
return
ret
;
}
int
vmw_fb_init
(
struct
vmw_private
*
vmw_priv
)
{
struct
device
*
device
=
&
vmw_priv
->
dev
->
pdev
->
dev
;
struct
vmw_fb_par
*
par
;
struct
fb_info
*
info
;
unsigned
initial_width
,
initial_height
;
unsigned
fb_width
,
fb_height
;
unsigned
fb_bbp
,
fb_depth
,
fb_offset
,
fb_pitch
,
fb_size
;
int
ret
;
initial_width
=
800
;
initial_height
=
600
;
fb_bbp
=
32
;
fb_depth
=
24
;
if
(
vmw_priv
->
capabilities
&
SVGA_CAP_MULTIMON
)
{
fb_width
=
min
(
vmw_priv
->
fb_max_width
,
(
unsigned
)
2048
);
fb_height
=
min
(
vmw_priv
->
fb_max_height
,
(
unsigned
)
2048
);
}
else
{
fb_width
=
min
(
vmw_priv
->
fb_max_width
,
initial_width
);
fb_height
=
min
(
vmw_priv
->
fb_max_height
,
initial_height
);
}
initial_width
=
min
(
fb_width
,
initial_width
);
initial_height
=
min
(
fb_height
,
initial_height
);
vmw_write
(
vmw_priv
,
SVGA_REG_WIDTH
,
fb_width
);
vmw_write
(
vmw_priv
,
SVGA_REG_HEIGHT
,
fb_height
);
vmw_write
(
vmw_priv
,
SVGA_REG_BITS_PER_PIXEL
,
fb_bbp
);
vmw_write
(
vmw_priv
,
SVGA_REG_DEPTH
,
fb_depth
);
vmw_write
(
vmw_priv
,
SVGA_REG_RED_MASK
,
0x00ff0000
);
vmw_write
(
vmw_priv
,
SVGA_REG_GREEN_MASK
,
0x0000ff00
);
vmw_write
(
vmw_priv
,
SVGA_REG_BLUE_MASK
,
0x000000ff
);
fb_size
=
vmw_read
(
vmw_priv
,
SVGA_REG_FB_SIZE
);
fb_offset
=
vmw_read
(
vmw_priv
,
SVGA_REG_FB_OFFSET
);
fb_pitch
=
vmw_read
(
vmw_priv
,
SVGA_REG_BYTES_PER_LINE
);
DRM_DEBUG
(
"width %u
\n
"
,
vmw_read
(
vmw_priv
,
SVGA_REG_MAX_WIDTH
));
DRM_DEBUG
(
"height %u
\n
"
,
vmw_read
(
vmw_priv
,
SVGA_REG_MAX_HEIGHT
));
DRM_DEBUG
(
"width %u
\n
"
,
vmw_read
(
vmw_priv
,
SVGA_REG_WIDTH
));
DRM_DEBUG
(
"height %u
\n
"
,
vmw_read
(
vmw_priv
,
SVGA_REG_HEIGHT
));
DRM_DEBUG
(
"bpp %u
\n
"
,
vmw_read
(
vmw_priv
,
SVGA_REG_BITS_PER_PIXEL
));
DRM_DEBUG
(
"depth %u
\n
"
,
vmw_read
(
vmw_priv
,
SVGA_REG_DEPTH
));
DRM_DEBUG
(
"bpl %u
\n
"
,
vmw_read
(
vmw_priv
,
SVGA_REG_BYTES_PER_LINE
));
DRM_DEBUG
(
"r mask %08x
\n
"
,
vmw_read
(
vmw_priv
,
SVGA_REG_RED_MASK
));
DRM_DEBUG
(
"g mask %08x
\n
"
,
vmw_read
(
vmw_priv
,
SVGA_REG_GREEN_MASK
));
DRM_DEBUG
(
"b mask %08x
\n
"
,
vmw_read
(
vmw_priv
,
SVGA_REG_BLUE_MASK
));
DRM_DEBUG
(
"fb_offset 0x%08x
\n
"
,
fb_offset
);
DRM_DEBUG
(
"fb_pitch %u
\n
"
,
fb_pitch
);
DRM_DEBUG
(
"fb_size %u kiB
\n
"
,
fb_size
/
1024
);
info
=
framebuffer_alloc
(
sizeof
(
*
par
),
device
);
if
(
!
info
)
return
-
ENOMEM
;
/*
* Par
*/
vmw_priv
->
fb_info
=
info
;
par
=
info
->
par
;
par
->
vmw_priv
=
vmw_priv
;
par
->
depth
=
fb_depth
;
par
->
bpp
=
fb_bbp
;
par
->
vmalloc
=
NULL
;
par
->
max_width
=
fb_width
;
par
->
max_height
=
fb_height
;
/*
* Create buffers and alloc memory
*/
par
->
vmalloc
=
vmalloc
(
fb_size
);
if
(
unlikely
(
par
->
vmalloc
==
NULL
))
{
ret
=
-
ENOMEM
;
goto
err_free
;
}
ret
=
vmw_fb_create_bo
(
vmw_priv
,
fb_size
,
&
par
->
vmw_bo
);
if
(
unlikely
(
ret
!=
0
))
goto
err_free
;
ret
=
ttm_bo_kmap
(
&
par
->
vmw_bo
->
base
,
0
,
par
->
vmw_bo
->
base
.
num_pages
,
&
par
->
map
);
if
(
unlikely
(
ret
!=
0
))
goto
err_unref
;
par
->
bo_ptr
=
ttm_kmap_obj_virtual
(
&
par
->
map
,
&
par
->
bo_iowrite
);
par
->
bo_size
=
fb_size
;
/*
* Fixed and var
*/
strcpy
(
info
->
fix
.
id
,
"svgadrmfb"
);
info
->
fix
.
type
=
FB_TYPE_PACKED_PIXELS
;
info
->
fix
.
visual
=
FB_VISUAL_TRUECOLOR
;
info
->
fix
.
type_aux
=
0
;
info
->
fix
.
xpanstep
=
1
;
/* doing it in hw */
info
->
fix
.
ypanstep
=
1
;
/* doing it in hw */
info
->
fix
.
ywrapstep
=
0
;
info
->
fix
.
accel
=
FB_ACCEL_NONE
;
info
->
fix
.
line_length
=
fb_pitch
;
info
->
fix
.
smem_start
=
0
;
info
->
fix
.
smem_len
=
fb_size
;
info
->
fix
.
mmio_start
=
0
;
info
->
fix
.
mmio_len
=
0
;
info
->
pseudo_palette
=
par
->
pseudo_palette
;
info
->
screen_base
=
par
->
vmalloc
;
info
->
screen_size
=
fb_size
;
info
->
flags
=
FBINFO_DEFAULT
;
info
->
fbops
=
&
vmw_fb_ops
;
/* 24 depth per default */
info
->
var
.
red
.
offset
=
16
;
info
->
var
.
green
.
offset
=
8
;
info
->
var
.
blue
.
offset
=
0
;
info
->
var
.
red
.
length
=
8
;
info
->
var
.
green
.
length
=
8
;
info
->
var
.
blue
.
length
=
8
;
info
->
var
.
transp
.
offset
=
0
;
info
->
var
.
transp
.
length
=
0
;
info
->
var
.
xres_virtual
=
fb_width
;
info
->
var
.
yres_virtual
=
fb_height
;
info
->
var
.
bits_per_pixel
=
par
->
bpp
;
info
->
var
.
xoffset
=
0
;
info
->
var
.
yoffset
=
0
;
info
->
var
.
activate
=
FB_ACTIVATE_NOW
;
info
->
var
.
height
=
-
1
;
info
->
var
.
width
=
-
1
;
info
->
var
.
xres
=
initial_width
;
info
->
var
.
yres
=
initial_height
;
#if 0
info->pixmap.size = 64*1024;
info->pixmap.buf_align = 8;
info->pixmap.access_align = 32;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
#else
info
->
pixmap
.
size
=
0
;
info
->
pixmap
.
buf_align
=
8
;
info
->
pixmap
.
access_align
=
32
;
info
->
pixmap
.
flags
=
FB_PIXMAP_SYSTEM
;
info
->
pixmap
.
scan_align
=
1
;
#endif
/*
* Dirty & Deferred IO
*/
par
->
dirty
.
x1
=
par
->
dirty
.
x2
=
0
;
par
->
dirty
.
y1
=
par
->
dirty
.
y1
=
0
;
par
->
dirty
.
active
=
true
;
spin_lock_init
(
&
par
->
dirty
.
lock
);
info
->
fbdefio
=
&
vmw_defio
;
fb_deferred_io_init
(
info
);
ret
=
register_framebuffer
(
info
);
if
(
unlikely
(
ret
!=
0
))
goto
err_defio
;
return
0
;
err_defio:
fb_deferred_io_cleanup
(
info
);
ttm_bo_kunmap
(
&
par
->
map
);
err_unref:
ttm_bo_unref
((
struct
ttm_buffer_object
**
)
&
par
->
vmw_bo
);
err_free:
vfree
(
par
->
vmalloc
);
framebuffer_release
(
info
);
vmw_priv
->
fb_info
=
NULL
;
return
ret
;
}
int
vmw_fb_close
(
struct
vmw_private
*
vmw_priv
)
{
struct
fb_info
*
info
;
struct
vmw_fb_par
*
par
;
struct
ttm_buffer_object
*
bo
;
if
(
!
vmw_priv
->
fb_info
)
return
0
;
info
=
vmw_priv
->
fb_info
;
par
=
info
->
par
;
bo
=
&
par
->
vmw_bo
->
base
;
par
->
vmw_bo
=
NULL
;
/* ??? order */
fb_deferred_io_cleanup
(
info
);
unregister_framebuffer
(
info
);
ttm_bo_kunmap
(
&
par
->
map
);
ttm_bo_unref
(
&
bo
);
vfree
(
par
->
vmalloc
);
framebuffer_release
(
info
);
return
0
;
}
int
vmw_dmabuf_from_vram
(
struct
vmw_private
*
vmw_priv
,
struct
vmw_dma_buffer
*
vmw_bo
)
{
struct
ttm_buffer_object
*
bo
=
&
vmw_bo
->
base
;
int
ret
=
0
;
ret
=
ttm_bo_reserve
(
bo
,
false
,
false
,
false
,
0
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
ret
=
ttm_bo_validate
(
bo
,
&
vmw_sys_placement
,
false
,
false
);
ttm_bo_unreserve
(
bo
);
return
ret
;
}
int
vmw_dmabuf_to_start_of_vram
(
struct
vmw_private
*
vmw_priv
,
struct
vmw_dma_buffer
*
vmw_bo
)
{
struct
ttm_buffer_object
*
bo
=
&
vmw_bo
->
base
;
struct
ttm_placement
ne_placement
=
vmw_vram_ne_placement
;
int
ret
=
0
;
ne_placement
.
lpfn
=
bo
->
num_pages
;
/* interuptable? */
ret
=
ttm_write_lock
(
&
vmw_priv
->
active_master
->
lock
,
false
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
ret
=
ttm_bo_reserve
(
bo
,
false
,
false
,
false
,
0
);
if
(
unlikely
(
ret
!=
0
))
goto
err_unlock
;
if
(
vmw_bo
->
gmr_bound
)
{
vmw_gmr_unbind
(
vmw_priv
,
vmw_bo
->
gmr_id
);
spin_lock
(
&
bo
->
glob
->
lru_lock
);
ida_remove
(
&
vmw_priv
->
gmr_ida
,
vmw_bo
->
gmr_id
);
spin_unlock
(
&
bo
->
glob
->
lru_lock
);
vmw_bo
->
gmr_bound
=
NULL
;
}
ret
=
ttm_bo_validate
(
bo
,
&
ne_placement
,
false
,
false
);
ttm_bo_unreserve
(
bo
);
err_unlock:
ttm_write_unlock
(
&
vmw_priv
->
active_master
->
lock
);
return
ret
;
}
int
vmw_fb_off
(
struct
vmw_private
*
vmw_priv
)
{
struct
fb_info
*
info
;
struct
vmw_fb_par
*
par
;
unsigned
long
flags
;
if
(
!
vmw_priv
->
fb_info
)
return
-
EINVAL
;
info
=
vmw_priv
->
fb_info
;
par
=
info
->
par
;
spin_lock_irqsave
(
&
par
->
dirty
.
lock
,
flags
);
par
->
dirty
.
active
=
false
;
spin_unlock_irqrestore
(
&
par
->
dirty
.
lock
,
flags
);
flush_scheduled_work
();
par
->
bo_ptr
=
NULL
;
ttm_bo_kunmap
(
&
par
->
map
);
vmw_dmabuf_from_vram
(
vmw_priv
,
par
->
vmw_bo
);
return
0
;
}
int
vmw_fb_on
(
struct
vmw_private
*
vmw_priv
)
{
struct
fb_info
*
info
;
struct
vmw_fb_par
*
par
;
unsigned
long
flags
;
bool
dummy
;
int
ret
;
if
(
!
vmw_priv
->
fb_info
)
return
-
EINVAL
;
info
=
vmw_priv
->
fb_info
;
par
=
info
->
par
;
/* we are already active */
if
(
par
->
bo_ptr
!=
NULL
)
return
0
;
/* Make sure that all overlays are stoped when we take over */
vmw_overlay_stop_all
(
vmw_priv
);
ret
=
vmw_dmabuf_to_start_of_vram
(
vmw_priv
,
par
->
vmw_bo
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"could not move buffer to start of VRAM
\n
"
);
goto
err_no_buffer
;
}
ret
=
ttm_bo_kmap
(
&
par
->
vmw_bo
->
base
,
0
,
par
->
vmw_bo
->
base
.
num_pages
,
&
par
->
map
);
BUG_ON
(
ret
!=
0
);
par
->
bo_ptr
=
ttm_kmap_obj_virtual
(
&
par
->
map
,
&
dummy
);
spin_lock_irqsave
(
&
par
->
dirty
.
lock
,
flags
);
par
->
dirty
.
active
=
true
;
spin_unlock_irqrestore
(
&
par
->
dirty
.
lock
,
flags
);
err_no_buffer:
vmw_fb_set_par
(
info
);
vmw_fb_dirty_mark
(
par
,
0
,
0
,
info
->
var
.
xres
,
info
->
var
.
yres
);
/* If there already was stuff dirty we wont
* schedule a new work, so lets do it now */
schedule_delayed_work
(
&
info
->
deferred_work
,
0
);
return
0
;
}
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "drmP.h"
#include "ttm/ttm_placement.h"
int
vmw_fifo_init
(
struct
vmw_private
*
dev_priv
,
struct
vmw_fifo_state
*
fifo
)
{
__le32
__iomem
*
fifo_mem
=
dev_priv
->
mmio_virt
;
uint32_t
max
;
uint32_t
min
;
uint32_t
dummy
;
int
ret
;
fifo
->
static_buffer_size
=
VMWGFX_FIFO_STATIC_SIZE
;
fifo
->
static_buffer
=
vmalloc
(
fifo
->
static_buffer_size
);
if
(
unlikely
(
fifo
->
static_buffer
==
NULL
))
return
-
ENOMEM
;
fifo
->
last_buffer_size
=
VMWGFX_FIFO_STATIC_SIZE
;
fifo
->
last_data_size
=
0
;
fifo
->
last_buffer_add
=
false
;
fifo
->
last_buffer
=
vmalloc
(
fifo
->
last_buffer_size
);
if
(
unlikely
(
fifo
->
last_buffer
==
NULL
))
{
ret
=
-
ENOMEM
;
goto
out_err
;
}
fifo
->
dynamic_buffer
=
NULL
;
fifo
->
reserved_size
=
0
;
fifo
->
using_bounce_buffer
=
false
;
init_rwsem
(
&
fifo
->
rwsem
);
/*
* Allow mapping the first page read-only to user-space.
*/
DRM_INFO
(
"width %d
\n
"
,
vmw_read
(
dev_priv
,
SVGA_REG_WIDTH
));
DRM_INFO
(
"height %d
\n
"
,
vmw_read
(
dev_priv
,
SVGA_REG_HEIGHT
));
DRM_INFO
(
"bpp %d
\n
"
,
vmw_read
(
dev_priv
,
SVGA_REG_BITS_PER_PIXEL
));
mutex_lock
(
&
dev_priv
->
hw_mutex
);
dev_priv
->
enable_state
=
vmw_read
(
dev_priv
,
SVGA_REG_ENABLE
);
dev_priv
->
config_done_state
=
vmw_read
(
dev_priv
,
SVGA_REG_CONFIG_DONE
);
vmw_write
(
dev_priv
,
SVGA_REG_ENABLE
,
1
);
min
=
4
;
if
(
dev_priv
->
capabilities
&
SVGA_CAP_EXTENDED_FIFO
)
min
=
vmw_read
(
dev_priv
,
SVGA_REG_MEM_REGS
);
min
<<=
2
;
if
(
min
<
PAGE_SIZE
)
min
=
PAGE_SIZE
;
iowrite32
(
min
,
fifo_mem
+
SVGA_FIFO_MIN
);
iowrite32
(
dev_priv
->
mmio_size
,
fifo_mem
+
SVGA_FIFO_MAX
);
wmb
();
iowrite32
(
min
,
fifo_mem
+
SVGA_FIFO_NEXT_CMD
);
iowrite32
(
min
,
fifo_mem
+
SVGA_FIFO_STOP
);
iowrite32
(
0
,
fifo_mem
+
SVGA_FIFO_BUSY
);
mb
();
vmw_write
(
dev_priv
,
SVGA_REG_CONFIG_DONE
,
1
);
mutex_unlock
(
&
dev_priv
->
hw_mutex
);
max
=
ioread32
(
fifo_mem
+
SVGA_FIFO_MAX
);
min
=
ioread32
(
fifo_mem
+
SVGA_FIFO_MIN
);
fifo
->
capabilities
=
ioread32
(
fifo_mem
+
SVGA_FIFO_CAPABILITIES
);
DRM_INFO
(
"Fifo max 0x%08x min 0x%08x cap 0x%08x
\n
"
,
(
unsigned
int
)
max
,
(
unsigned
int
)
min
,
(
unsigned
int
)
fifo
->
capabilities
);
dev_priv
->
fence_seq
=
(
uint32_t
)
-
100
;
dev_priv
->
last_read_sequence
=
(
uint32_t
)
-
100
;
iowrite32
(
dev_priv
->
last_read_sequence
,
fifo_mem
+
SVGA_FIFO_FENCE
);
return
vmw_fifo_send_fence
(
dev_priv
,
&
dummy
);
out_err:
vfree
(
fifo
->
static_buffer
);
fifo
->
static_buffer
=
NULL
;
return
ret
;
}
void
vmw_fifo_ping_host
(
struct
vmw_private
*
dev_priv
,
uint32_t
reason
)
{
__le32
__iomem
*
fifo_mem
=
dev_priv
->
mmio_virt
;
mutex_lock
(
&
dev_priv
->
hw_mutex
);
if
(
unlikely
(
ioread32
(
fifo_mem
+
SVGA_FIFO_BUSY
)
==
0
))
{
iowrite32
(
1
,
fifo_mem
+
SVGA_FIFO_BUSY
);
vmw_write
(
dev_priv
,
SVGA_REG_SYNC
,
reason
);
}
mutex_unlock
(
&
dev_priv
->
hw_mutex
);
}
void
vmw_fifo_release
(
struct
vmw_private
*
dev_priv
,
struct
vmw_fifo_state
*
fifo
)
{
__le32
__iomem
*
fifo_mem
=
dev_priv
->
mmio_virt
;
mutex_lock
(
&
dev_priv
->
hw_mutex
);
while
(
vmw_read
(
dev_priv
,
SVGA_REG_BUSY
)
!=
0
)
vmw_write
(
dev_priv
,
SVGA_REG_SYNC
,
SVGA_SYNC_GENERIC
);
dev_priv
->
last_read_sequence
=
ioread32
(
fifo_mem
+
SVGA_FIFO_FENCE
);
vmw_write
(
dev_priv
,
SVGA_REG_CONFIG_DONE
,
dev_priv
->
config_done_state
);
vmw_write
(
dev_priv
,
SVGA_REG_ENABLE
,
dev_priv
->
enable_state
);
mutex_unlock
(
&
dev_priv
->
hw_mutex
);
if
(
likely
(
fifo
->
last_buffer
!=
NULL
))
{
vfree
(
fifo
->
last_buffer
);
fifo
->
last_buffer
=
NULL
;
}
if
(
likely
(
fifo
->
static_buffer
!=
NULL
))
{
vfree
(
fifo
->
static_buffer
);
fifo
->
static_buffer
=
NULL
;
}
if
(
likely
(
fifo
->
dynamic_buffer
!=
NULL
))
{
vfree
(
fifo
->
dynamic_buffer
);
fifo
->
dynamic_buffer
=
NULL
;
}
}
static
bool
vmw_fifo_is_full
(
struct
vmw_private
*
dev_priv
,
uint32_t
bytes
)
{
__le32
__iomem
*
fifo_mem
=
dev_priv
->
mmio_virt
;
uint32_t
max
=
ioread32
(
fifo_mem
+
SVGA_FIFO_MAX
);
uint32_t
next_cmd
=
ioread32
(
fifo_mem
+
SVGA_FIFO_NEXT_CMD
);
uint32_t
min
=
ioread32
(
fifo_mem
+
SVGA_FIFO_MIN
);
uint32_t
stop
=
ioread32
(
fifo_mem
+
SVGA_FIFO_STOP
);
return
((
max
-
next_cmd
)
+
(
stop
-
min
)
<=
bytes
);
}
static
int
vmw_fifo_wait_noirq
(
struct
vmw_private
*
dev_priv
,
uint32_t
bytes
,
bool
interruptible
,
unsigned
long
timeout
)
{
int
ret
=
0
;
unsigned
long
end_jiffies
=
jiffies
+
timeout
;
DEFINE_WAIT
(
__wait
);
DRM_INFO
(
"Fifo wait noirq.
\n
"
);
for
(;;)
{
prepare_to_wait
(
&
dev_priv
->
fifo_queue
,
&
__wait
,
(
interruptible
)
?
TASK_INTERRUPTIBLE
:
TASK_UNINTERRUPTIBLE
);
if
(
!
vmw_fifo_is_full
(
dev_priv
,
bytes
))
break
;
if
(
time_after_eq
(
jiffies
,
end_jiffies
))
{
ret
=
-
EBUSY
;
DRM_ERROR
(
"SVGA device lockup.
\n
"
);
break
;
}
schedule_timeout
(
1
);
if
(
interruptible
&&
signal_pending
(
current
))
{
ret
=
-
ERESTART
;
break
;
}
}
finish_wait
(
&
dev_priv
->
fifo_queue
,
&
__wait
);
wake_up_all
(
&
dev_priv
->
fifo_queue
);
DRM_INFO
(
"Fifo noirq exit.
\n
"
);
return
ret
;
}
static
int
vmw_fifo_wait
(
struct
vmw_private
*
dev_priv
,
uint32_t
bytes
,
bool
interruptible
,
unsigned
long
timeout
)
{
long
ret
=
1L
;
unsigned
long
irq_flags
;
if
(
likely
(
!
vmw_fifo_is_full
(
dev_priv
,
bytes
)))
return
0
;
vmw_fifo_ping_host
(
dev_priv
,
SVGA_SYNC_FIFOFULL
);
if
(
!
(
dev_priv
->
capabilities
&
SVGA_CAP_IRQMASK
))
return
vmw_fifo_wait_noirq
(
dev_priv
,
bytes
,
interruptible
,
timeout
);
mutex_lock
(
&
dev_priv
->
hw_mutex
);
if
(
atomic_add_return
(
1
,
&
dev_priv
->
fifo_queue_waiters
)
>
0
)
{
spin_lock_irqsave
(
&
dev_priv
->
irq_lock
,
irq_flags
);
outl
(
SVGA_IRQFLAG_FIFO_PROGRESS
,
dev_priv
->
io_start
+
VMWGFX_IRQSTATUS_PORT
);
vmw_write
(
dev_priv
,
SVGA_REG_IRQMASK
,
vmw_read
(
dev_priv
,
SVGA_REG_IRQMASK
)
|
SVGA_IRQFLAG_FIFO_PROGRESS
);
spin_unlock_irqrestore
(
&
dev_priv
->
irq_lock
,
irq_flags
);
}
mutex_unlock
(
&
dev_priv
->
hw_mutex
);
if
(
interruptible
)
ret
=
wait_event_interruptible_timeout
(
dev_priv
->
fifo_queue
,
!
vmw_fifo_is_full
(
dev_priv
,
bytes
),
timeout
);
else
ret
=
wait_event_timeout
(
dev_priv
->
fifo_queue
,
!
vmw_fifo_is_full
(
dev_priv
,
bytes
),
timeout
);
if
(
unlikely
(
ret
==
-
ERESTARTSYS
))
ret
=
-
ERESTART
;
else
if
(
unlikely
(
ret
==
0
))
ret
=
-
EBUSY
;
else
if
(
likely
(
ret
>
0
))
ret
=
0
;
mutex_lock
(
&
dev_priv
->
hw_mutex
);
if
(
atomic_dec_and_test
(
&
dev_priv
->
fifo_queue_waiters
))
{
spin_lock_irqsave
(
&
dev_priv
->
irq_lock
,
irq_flags
);
vmw_write
(
dev_priv
,
SVGA_REG_IRQMASK
,
vmw_read
(
dev_priv
,
SVGA_REG_IRQMASK
)
&
~
SVGA_IRQFLAG_FIFO_PROGRESS
);
spin_unlock_irqrestore
(
&
dev_priv
->
irq_lock
,
irq_flags
);
}
mutex_unlock
(
&
dev_priv
->
hw_mutex
);
return
ret
;
}
void
*
vmw_fifo_reserve
(
struct
vmw_private
*
dev_priv
,
uint32_t
bytes
)
{
struct
vmw_fifo_state
*
fifo_state
=
&
dev_priv
->
fifo
;
__le32
__iomem
*
fifo_mem
=
dev_priv
->
mmio_virt
;
uint32_t
max
;
uint32_t
min
;
uint32_t
next_cmd
;
uint32_t
reserveable
=
fifo_state
->
capabilities
&
SVGA_FIFO_CAP_RESERVE
;
int
ret
;
down_write
(
&
fifo_state
->
rwsem
);
max
=
ioread32
(
fifo_mem
+
SVGA_FIFO_MAX
);
min
=
ioread32
(
fifo_mem
+
SVGA_FIFO_MIN
);
next_cmd
=
ioread32
(
fifo_mem
+
SVGA_FIFO_NEXT_CMD
);
if
(
unlikely
(
bytes
>=
(
max
-
min
)))
goto
out_err
;
BUG_ON
(
fifo_state
->
reserved_size
!=
0
);
BUG_ON
(
fifo_state
->
dynamic_buffer
!=
NULL
);
fifo_state
->
reserved_size
=
bytes
;
while
(
1
)
{
uint32_t
stop
=
ioread32
(
fifo_mem
+
SVGA_FIFO_STOP
);
bool
need_bounce
=
false
;
bool
reserve_in_place
=
false
;
if
(
next_cmd
>=
stop
)
{
if
(
likely
((
next_cmd
+
bytes
<
max
||
(
next_cmd
+
bytes
==
max
&&
stop
>
min
))))
reserve_in_place
=
true
;
else
if
(
vmw_fifo_is_full
(
dev_priv
,
bytes
))
{
ret
=
vmw_fifo_wait
(
dev_priv
,
bytes
,
false
,
3
*
HZ
);
if
(
unlikely
(
ret
!=
0
))
goto
out_err
;
}
else
need_bounce
=
true
;
}
else
{
if
(
likely
((
next_cmd
+
bytes
<
stop
)))
reserve_in_place
=
true
;
else
{
ret
=
vmw_fifo_wait
(
dev_priv
,
bytes
,
false
,
3
*
HZ
);
if
(
unlikely
(
ret
!=
0
))
goto
out_err
;
}
}
if
(
reserve_in_place
)
{
if
(
reserveable
||
bytes
<=
sizeof
(
uint32_t
))
{
fifo_state
->
using_bounce_buffer
=
false
;
if
(
reserveable
)
iowrite32
(
bytes
,
fifo_mem
+
SVGA_FIFO_RESERVED
);
return
fifo_mem
+
(
next_cmd
>>
2
);
}
else
{
need_bounce
=
true
;
}
}
if
(
need_bounce
)
{
fifo_state
->
using_bounce_buffer
=
true
;
if
(
bytes
<
fifo_state
->
static_buffer_size
)
return
fifo_state
->
static_buffer
;
else
{
fifo_state
->
dynamic_buffer
=
vmalloc
(
bytes
);
return
fifo_state
->
dynamic_buffer
;
}
}
}
out_err:
fifo_state
->
reserved_size
=
0
;
up_write
(
&
fifo_state
->
rwsem
);
return
NULL
;
}
static
void
vmw_fifo_res_copy
(
struct
vmw_fifo_state
*
fifo_state
,
__le32
__iomem
*
fifo_mem
,
uint32_t
next_cmd
,
uint32_t
max
,
uint32_t
min
,
uint32_t
bytes
)
{
uint32_t
chunk_size
=
max
-
next_cmd
;
uint32_t
rest
;
uint32_t
*
buffer
=
(
fifo_state
->
dynamic_buffer
!=
NULL
)
?
fifo_state
->
dynamic_buffer
:
fifo_state
->
static_buffer
;
if
(
bytes
<
chunk_size
)
chunk_size
=
bytes
;
iowrite32
(
bytes
,
fifo_mem
+
SVGA_FIFO_RESERVED
);
mb
();
memcpy_toio
(
fifo_mem
+
(
next_cmd
>>
2
),
buffer
,
chunk_size
);
rest
=
bytes
-
chunk_size
;
if
(
rest
)
memcpy_toio
(
fifo_mem
+
(
min
>>
2
),
buffer
+
(
chunk_size
>>
2
),
rest
);
}
static
void
vmw_fifo_slow_copy
(
struct
vmw_fifo_state
*
fifo_state
,
__le32
__iomem
*
fifo_mem
,
uint32_t
next_cmd
,
uint32_t
max
,
uint32_t
min
,
uint32_t
bytes
)
{
uint32_t
*
buffer
=
(
fifo_state
->
dynamic_buffer
!=
NULL
)
?
fifo_state
->
dynamic_buffer
:
fifo_state
->
static_buffer
;
while
(
bytes
>
0
)
{
iowrite32
(
*
buffer
++
,
fifo_mem
+
(
next_cmd
>>
2
));
next_cmd
+=
sizeof
(
uint32_t
);
if
(
unlikely
(
next_cmd
==
max
))
next_cmd
=
min
;
mb
();
iowrite32
(
next_cmd
,
fifo_mem
+
SVGA_FIFO_NEXT_CMD
);
mb
();
bytes
-=
sizeof
(
uint32_t
);
}
}
void
vmw_fifo_commit
(
struct
vmw_private
*
dev_priv
,
uint32_t
bytes
)
{
struct
vmw_fifo_state
*
fifo_state
=
&
dev_priv
->
fifo
;
__le32
__iomem
*
fifo_mem
=
dev_priv
->
mmio_virt
;
uint32_t
next_cmd
=
ioread32
(
fifo_mem
+
SVGA_FIFO_NEXT_CMD
);
uint32_t
max
=
ioread32
(
fifo_mem
+
SVGA_FIFO_MAX
);
uint32_t
min
=
ioread32
(
fifo_mem
+
SVGA_FIFO_MIN
);
bool
reserveable
=
fifo_state
->
capabilities
&
SVGA_FIFO_CAP_RESERVE
;
BUG_ON
((
bytes
&
3
)
!=
0
);
BUG_ON
(
bytes
>
fifo_state
->
reserved_size
);
fifo_state
->
reserved_size
=
0
;
if
(
fifo_state
->
using_bounce_buffer
)
{
if
(
reserveable
)
vmw_fifo_res_copy
(
fifo_state
,
fifo_mem
,
next_cmd
,
max
,
min
,
bytes
);
else
vmw_fifo_slow_copy
(
fifo_state
,
fifo_mem
,
next_cmd
,
max
,
min
,
bytes
);
if
(
fifo_state
->
dynamic_buffer
)
{
vfree
(
fifo_state
->
dynamic_buffer
);
fifo_state
->
dynamic_buffer
=
NULL
;
}
}
if
(
fifo_state
->
using_bounce_buffer
||
reserveable
)
{
next_cmd
+=
bytes
;
if
(
next_cmd
>=
max
)
next_cmd
-=
max
-
min
;
mb
();
iowrite32
(
next_cmd
,
fifo_mem
+
SVGA_FIFO_NEXT_CMD
);
}
if
(
reserveable
)
iowrite32
(
0
,
fifo_mem
+
SVGA_FIFO_RESERVED
);
mb
();
vmw_fifo_ping_host
(
dev_priv
,
SVGA_SYNC_GENERIC
);
up_write
(
&
fifo_state
->
rwsem
);
}
int
vmw_fifo_send_fence
(
struct
vmw_private
*
dev_priv
,
uint32_t
*
sequence
)
{
struct
vmw_fifo_state
*
fifo_state
=
&
dev_priv
->
fifo
;
struct
svga_fifo_cmd_fence
*
cmd_fence
;
void
*
fm
;
int
ret
=
0
;
uint32_t
bytes
=
sizeof
(
__le32
)
+
sizeof
(
*
cmd_fence
);
fm
=
vmw_fifo_reserve
(
dev_priv
,
bytes
);
if
(
unlikely
(
fm
==
NULL
))
{
down_write
(
&
fifo_state
->
rwsem
);
*
sequence
=
dev_priv
->
fence_seq
;
up_write
(
&
fifo_state
->
rwsem
);
ret
=
-
ENOMEM
;
(
void
)
vmw_fallback_wait
(
dev_priv
,
false
,
true
,
*
sequence
,
false
,
3
*
HZ
);
goto
out_err
;
}
do
{
*
sequence
=
dev_priv
->
fence_seq
++
;
}
while
(
*
sequence
==
0
);
if
(
!
(
fifo_state
->
capabilities
&
SVGA_FIFO_CAP_FENCE
))
{
/*
* Don't request hardware to send a fence. The
* waiting code in vmwgfx_irq.c will emulate this.
*/
vmw_fifo_commit
(
dev_priv
,
0
);
return
0
;
}
*
(
__le32
*
)
fm
=
cpu_to_le32
(
SVGA_CMD_FENCE
);
cmd_fence
=
(
struct
svga_fifo_cmd_fence
*
)
((
unsigned
long
)
fm
+
sizeof
(
__le32
));
iowrite32
(
*
sequence
,
&
cmd_fence
->
fence
);
fifo_state
->
last_buffer_add
=
true
;
vmw_fifo_commit
(
dev_priv
,
bytes
);
fifo_state
->
last_buffer_add
=
false
;
out_err:
return
ret
;
}
/**
* Map the first page of the FIFO read-only to user-space.
*/
static
int
vmw_fifo_vm_fault
(
struct
vm_area_struct
*
vma
,
struct
vm_fault
*
vmf
)
{
int
ret
;
unsigned
long
address
=
(
unsigned
long
)
vmf
->
virtual_address
;
if
(
address
!=
vma
->
vm_start
)
return
VM_FAULT_SIGBUS
;
ret
=
vm_insert_pfn
(
vma
,
address
,
vma
->
vm_pgoff
);
if
(
likely
(
ret
==
-
EBUSY
||
ret
==
0
))
return
VM_FAULT_NOPAGE
;
else
if
(
ret
==
-
ENOMEM
)
return
VM_FAULT_OOM
;
return
VM_FAULT_SIGBUS
;
}
static
struct
vm_operations_struct
vmw_fifo_vm_ops
=
{
.
fault
=
vmw_fifo_vm_fault
,
.
open
=
NULL
,
.
close
=
NULL
};
int
vmw_fifo_mmap
(
struct
file
*
filp
,
struct
vm_area_struct
*
vma
)
{
struct
drm_file
*
file_priv
;
struct
vmw_private
*
dev_priv
;
file_priv
=
(
struct
drm_file
*
)
filp
->
private_data
;
dev_priv
=
vmw_priv
(
file_priv
->
minor
->
dev
);
if
(
vma
->
vm_pgoff
!=
(
dev_priv
->
mmio_start
>>
PAGE_SHIFT
)
||
(
vma
->
vm_end
-
vma
->
vm_start
)
!=
PAGE_SIZE
)
return
-
EINVAL
;
vma
->
vm_flags
&=
~
(
VM_WRITE
|
VM_MAYWRITE
);
vma
->
vm_flags
|=
VM_IO
|
VM_PFNMAP
|
VM_DONTEXPAND
|
VM_SHARED
;
vma
->
vm_page_prot
=
vm_get_page_prot
(
vma
->
vm_flags
);
vma
->
vm_page_prot
=
ttm_io_prot
(
TTM_PL_FLAG_UNCACHED
,
vma
->
vm_page_prot
);
vma
->
vm_ops
=
&
vmw_fifo_vm_ops
;
return
0
;
}
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "drmP.h"
#include "ttm/ttm_bo_driver.h"
/**
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
* the number of used descriptors.
*/
static
int
vmw_gmr_build_descriptors
(
struct
list_head
*
desc_pages
,
struct
page
*
pages
[],
unsigned
long
num_pages
)
{
struct
page
*
page
,
*
next
;
struct
svga_guest_mem_descriptor
*
page_virtual
=
NULL
;
struct
svga_guest_mem_descriptor
*
desc_virtual
=
NULL
;
unsigned
int
desc_per_page
;
unsigned
long
prev_pfn
;
unsigned
long
pfn
;
int
ret
;
desc_per_page
=
PAGE_SIZE
/
sizeof
(
struct
svga_guest_mem_descriptor
)
-
1
;
while
(
likely
(
num_pages
!=
0
))
{
page
=
alloc_page
(
__GFP_HIGHMEM
);
if
(
unlikely
(
page
==
NULL
))
{
ret
=
-
ENOMEM
;
goto
out_err
;
}
list_add_tail
(
&
page
->
lru
,
desc_pages
);
/*
* Point previous page terminating descriptor to this
* page before unmapping it.
*/
if
(
likely
(
page_virtual
!=
NULL
))
{
desc_virtual
->
ppn
=
page_to_pfn
(
page
);
kunmap_atomic
(
page_virtual
,
KM_USER0
);
}
page_virtual
=
kmap_atomic
(
page
,
KM_USER0
);
desc_virtual
=
page_virtual
-
1
;
prev_pfn
=
~
(
0UL
);
while
(
likely
(
num_pages
!=
0
))
{
pfn
=
page_to_pfn
(
*
pages
);
if
(
pfn
!=
prev_pfn
+
1
)
{
if
(
desc_virtual
-
page_virtual
==
desc_per_page
-
1
)
break
;
(
++
desc_virtual
)
->
ppn
=
cpu_to_le32
(
pfn
);
desc_virtual
->
num_pages
=
cpu_to_le32
(
1
);
}
else
{
uint32_t
tmp
=
le32_to_cpu
(
desc_virtual
->
num_pages
);
desc_virtual
->
num_pages
=
cpu_to_le32
(
tmp
+
1
);
}
prev_pfn
=
pfn
;
--
num_pages
;
++
pages
;
}
(
++
desc_virtual
)
->
ppn
=
cpu_to_le32
(
0
);
desc_virtual
->
num_pages
=
cpu_to_le32
(
0
);
}
if
(
likely
(
page_virtual
!=
NULL
))
kunmap_atomic
(
page_virtual
,
KM_USER0
);
return
0
;
out_err:
list_for_each_entry_safe
(
page
,
next
,
desc_pages
,
lru
)
{
list_del_init
(
&
page
->
lru
);
__free_page
(
page
);
}
return
ret
;
}
static
inline
void
vmw_gmr_free_descriptors
(
struct
list_head
*
desc_pages
)
{
struct
page
*
page
,
*
next
;
list_for_each_entry_safe
(
page
,
next
,
desc_pages
,
lru
)
{
list_del_init
(
&
page
->
lru
);
__free_page
(
page
);
}
}
static
void
vmw_gmr_fire_descriptors
(
struct
vmw_private
*
dev_priv
,
int
gmr_id
,
struct
list_head
*
desc_pages
)
{
struct
page
*
page
;
if
(
unlikely
(
list_empty
(
desc_pages
)))
return
;
page
=
list_entry
(
desc_pages
->
next
,
struct
page
,
lru
);
mutex_lock
(
&
dev_priv
->
hw_mutex
);
vmw_write
(
dev_priv
,
SVGA_REG_GMR_ID
,
gmr_id
);
wmb
();
vmw_write
(
dev_priv
,
SVGA_REG_GMR_DESCRIPTOR
,
page_to_pfn
(
page
));
mb
();
mutex_unlock
(
&
dev_priv
->
hw_mutex
);
}
/**
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
* the number of used descriptors.
*/
static
unsigned
long
vmw_gmr_count_descriptors
(
struct
page
*
pages
[],
unsigned
long
num_pages
)
{
unsigned
long
prev_pfn
=
~
(
0UL
);
unsigned
long
pfn
;
unsigned
long
descriptors
=
0
;
while
(
num_pages
--
)
{
pfn
=
page_to_pfn
(
*
pages
++
);
if
(
prev_pfn
+
1
!=
pfn
)
++
descriptors
;
prev_pfn
=
pfn
;
}
return
descriptors
;
}
int
vmw_gmr_bind
(
struct
vmw_private
*
dev_priv
,
struct
ttm_buffer_object
*
bo
)
{
struct
ttm_tt
*
ttm
=
bo
->
ttm
;
unsigned
long
descriptors
;
int
ret
;
uint32_t
id
;
struct
list_head
desc_pages
;
if
(
!
(
dev_priv
->
capabilities
&
SVGA_CAP_GMR
))
return
-
EINVAL
;
ret
=
ttm_tt_populate
(
ttm
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
descriptors
=
vmw_gmr_count_descriptors
(
ttm
->
pages
,
ttm
->
num_pages
);
if
(
unlikely
(
descriptors
>
dev_priv
->
max_gmr_descriptors
))
return
-
EINVAL
;
INIT_LIST_HEAD
(
&
desc_pages
);
ret
=
vmw_gmr_build_descriptors
(
&
desc_pages
,
ttm
->
pages
,
ttm
->
num_pages
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
ret
=
vmw_gmr_id_alloc
(
dev_priv
,
&
id
);
if
(
unlikely
(
ret
!=
0
))
goto
out_no_id
;
vmw_gmr_fire_descriptors
(
dev_priv
,
id
,
&
desc_pages
);
vmw_gmr_free_descriptors
(
&
desc_pages
);
vmw_dmabuf_set_gmr
(
bo
,
id
);
return
0
;
out_no_id:
vmw_gmr_free_descriptors
(
&
desc_pages
);
return
ret
;
}
void
vmw_gmr_unbind
(
struct
vmw_private
*
dev_priv
,
int
gmr_id
)
{
mutex_lock
(
&
dev_priv
->
hw_mutex
);
vmw_write
(
dev_priv
,
SVGA_REG_GMR_ID
,
gmr_id
);
wmb
();
vmw_write
(
dev_priv
,
SVGA_REG_GMR_DESCRIPTOR
,
0
);
mb
();
mutex_unlock
(
&
dev_priv
->
hw_mutex
);
}
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "vmwgfx_drm.h"
int
vmw_getparam_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
drm_vmw_getparam_arg
*
param
=
(
struct
drm_vmw_getparam_arg
*
)
data
;
switch
(
param
->
param
)
{
case
DRM_VMW_PARAM_NUM_STREAMS
:
param
->
value
=
vmw_overlay_num_overlays
(
dev_priv
);
break
;
case
DRM_VMW_PARAM_NUM_FREE_STREAMS
:
param
->
value
=
vmw_overlay_num_free_overlays
(
dev_priv
);
break
;
case
DRM_VMW_PARAM_3D
:
param
->
value
=
dev_priv
->
capabilities
&
SVGA_CAP_3D
?
1
:
0
;
break
;
case
DRM_VMW_PARAM_FIFO_OFFSET
:
param
->
value
=
dev_priv
->
mmio_start
;
break
;
default:
DRM_ERROR
(
"Illegal vmwgfx get param request: %d
\n
"
,
param
->
param
);
return
-
EINVAL
;
}
return
0
;
}
int
vmw_fifo_debug_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
vmw_fifo_state
*
fifo_state
=
&
dev_priv
->
fifo
;
struct
drm_vmw_fifo_debug_arg
*
arg
=
(
struct
drm_vmw_fifo_debug_arg
*
)
data
;
__le32
__user
*
buffer
=
(
__le32
__user
*
)
(
unsigned
long
)
arg
->
debug_buffer
;
if
(
unlikely
(
fifo_state
->
last_buffer
==
NULL
))
return
-
EINVAL
;
if
(
arg
->
debug_buffer_size
<
fifo_state
->
last_data_size
)
{
arg
->
used_size
=
arg
->
debug_buffer_size
;
arg
->
did_not_fit
=
1
;
}
else
{
arg
->
used_size
=
fifo_state
->
last_data_size
;
arg
->
did_not_fit
=
0
;
}
return
copy_to_user
(
buffer
,
fifo_state
->
last_buffer
,
arg
->
used_size
);
}
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "drmP.h"
#include "vmwgfx_drv.h"
#define VMW_FENCE_WRAP (1 << 24)
irqreturn_t
vmw_irq_handler
(
DRM_IRQ_ARGS
)
{
struct
drm_device
*
dev
=
(
struct
drm_device
*
)
arg
;
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
uint32_t
status
;
spin_lock
(
&
dev_priv
->
irq_lock
);
status
=
inl
(
dev_priv
->
io_start
+
VMWGFX_IRQSTATUS_PORT
);
spin_unlock
(
&
dev_priv
->
irq_lock
);
if
(
status
&
SVGA_IRQFLAG_ANY_FENCE
)
wake_up_all
(
&
dev_priv
->
fence_queue
);
if
(
status
&
SVGA_IRQFLAG_FIFO_PROGRESS
)
wake_up_all
(
&
dev_priv
->
fifo_queue
);
if
(
likely
(
status
))
{
outl
(
status
,
dev_priv
->
io_start
+
VMWGFX_IRQSTATUS_PORT
);
return
IRQ_HANDLED
;
}
return
IRQ_NONE
;
}
static
bool
vmw_fifo_idle
(
struct
vmw_private
*
dev_priv
,
uint32_t
sequence
)
{
uint32_t
busy
;
mutex_lock
(
&
dev_priv
->
hw_mutex
);
busy
=
vmw_read
(
dev_priv
,
SVGA_REG_BUSY
);
mutex_unlock
(
&
dev_priv
->
hw_mutex
);
return
(
busy
==
0
);
}
bool
vmw_fence_signaled
(
struct
vmw_private
*
dev_priv
,
uint32_t
sequence
)
{
__le32
__iomem
*
fifo_mem
=
dev_priv
->
mmio_virt
;
struct
vmw_fifo_state
*
fifo_state
;
bool
ret
;
if
(
likely
(
dev_priv
->
last_read_sequence
-
sequence
<
VMW_FENCE_WRAP
))
return
true
;
dev_priv
->
last_read_sequence
=
ioread32
(
fifo_mem
+
SVGA_FIFO_FENCE
);
if
(
likely
(
dev_priv
->
last_read_sequence
-
sequence
<
VMW_FENCE_WRAP
))
return
true
;
fifo_state
=
&
dev_priv
->
fifo
;
if
(
!
(
fifo_state
->
capabilities
&
SVGA_FIFO_CAP_FENCE
)
&&
vmw_fifo_idle
(
dev_priv
,
sequence
))
return
true
;
/**
* Below is to signal stale fences that have wrapped.
* First, block fence submission.
*/
down_read
(
&
fifo_state
->
rwsem
);
/**
* Then check if the sequence is higher than what we've actually
* emitted. Then the fence is stale and signaled.
*/
ret
=
((
dev_priv
->
fence_seq
-
sequence
)
>
VMW_FENCE_WRAP
);
up_read
(
&
fifo_state
->
rwsem
);
return
ret
;
}
int
vmw_fallback_wait
(
struct
vmw_private
*
dev_priv
,
bool
lazy
,
bool
fifo_idle
,
uint32_t
sequence
,
bool
interruptible
,
unsigned
long
timeout
)
{
struct
vmw_fifo_state
*
fifo_state
=
&
dev_priv
->
fifo
;
uint32_t
count
=
0
;
uint32_t
signal_seq
;
int
ret
;
unsigned
long
end_jiffies
=
jiffies
+
timeout
;
bool
(
*
wait_condition
)(
struct
vmw_private
*
,
uint32_t
);
DEFINE_WAIT
(
__wait
);
wait_condition
=
(
fifo_idle
)
?
&
vmw_fifo_idle
:
&
vmw_fence_signaled
;
/**
* Block command submission while waiting for idle.
*/
if
(
fifo_idle
)
down_read
(
&
fifo_state
->
rwsem
);
signal_seq
=
dev_priv
->
fence_seq
;
ret
=
0
;
for
(;;)
{
prepare_to_wait
(
&
dev_priv
->
fence_queue
,
&
__wait
,
(
interruptible
)
?
TASK_INTERRUPTIBLE
:
TASK_UNINTERRUPTIBLE
);
if
(
wait_condition
(
dev_priv
,
sequence
))
break
;
if
(
time_after_eq
(
jiffies
,
end_jiffies
))
{
DRM_ERROR
(
"SVGA device lockup.
\n
"
);
break
;
}
if
(
lazy
)
schedule_timeout
(
1
);
else
if
((
++
count
&
0x0F
)
==
0
)
{
/**
* FIXME: Use schedule_hr_timeout here for
* newer kernels and lower CPU utilization.
*/
__set_current_state
(
TASK_RUNNING
);
schedule
();
__set_current_state
((
interruptible
)
?
TASK_INTERRUPTIBLE
:
TASK_UNINTERRUPTIBLE
);
}
if
(
interruptible
&&
signal_pending
(
current
))
{
ret
=
-
ERESTART
;
break
;
}
}
finish_wait
(
&
dev_priv
->
fence_queue
,
&
__wait
);
if
(
ret
==
0
&&
fifo_idle
)
{
__le32
__iomem
*
fifo_mem
=
dev_priv
->
mmio_virt
;
iowrite32
(
signal_seq
,
fifo_mem
+
SVGA_FIFO_FENCE
);
}
wake_up_all
(
&
dev_priv
->
fence_queue
);
if
(
fifo_idle
)
up_read
(
&
fifo_state
->
rwsem
);
return
ret
;
}
int
vmw_wait_fence
(
struct
vmw_private
*
dev_priv
,
bool
lazy
,
uint32_t
sequence
,
bool
interruptible
,
unsigned
long
timeout
)
{
long
ret
;
unsigned
long
irq_flags
;
struct
vmw_fifo_state
*
fifo
=
&
dev_priv
->
fifo
;
if
(
likely
(
dev_priv
->
last_read_sequence
-
sequence
<
VMW_FENCE_WRAP
))
return
0
;
if
(
likely
(
vmw_fence_signaled
(
dev_priv
,
sequence
)))
return
0
;
vmw_fifo_ping_host
(
dev_priv
,
SVGA_SYNC_GENERIC
);
if
(
!
(
fifo
->
capabilities
&
SVGA_FIFO_CAP_FENCE
))
return
vmw_fallback_wait
(
dev_priv
,
lazy
,
true
,
sequence
,
interruptible
,
timeout
);
if
(
!
(
dev_priv
->
capabilities
&
SVGA_CAP_IRQMASK
))
return
vmw_fallback_wait
(
dev_priv
,
lazy
,
false
,
sequence
,
interruptible
,
timeout
);
mutex_lock
(
&
dev_priv
->
hw_mutex
);
if
(
atomic_add_return
(
1
,
&
dev_priv
->
fence_queue_waiters
)
>
0
)
{
spin_lock_irqsave
(
&
dev_priv
->
irq_lock
,
irq_flags
);
outl
(
SVGA_IRQFLAG_ANY_FENCE
,
dev_priv
->
io_start
+
VMWGFX_IRQSTATUS_PORT
);
vmw_write
(
dev_priv
,
SVGA_REG_IRQMASK
,
vmw_read
(
dev_priv
,
SVGA_REG_IRQMASK
)
|
SVGA_IRQFLAG_ANY_FENCE
);
spin_unlock_irqrestore
(
&
dev_priv
->
irq_lock
,
irq_flags
);
}
mutex_unlock
(
&
dev_priv
->
hw_mutex
);
if
(
interruptible
)
ret
=
wait_event_interruptible_timeout
(
dev_priv
->
fence_queue
,
vmw_fence_signaled
(
dev_priv
,
sequence
),
timeout
);
else
ret
=
wait_event_timeout
(
dev_priv
->
fence_queue
,
vmw_fence_signaled
(
dev_priv
,
sequence
),
timeout
);
if
(
unlikely
(
ret
==
-
ERESTARTSYS
))
ret
=
-
ERESTART
;
else
if
(
unlikely
(
ret
==
0
))
ret
=
-
EBUSY
;
else
if
(
likely
(
ret
>
0
))
ret
=
0
;
mutex_lock
(
&
dev_priv
->
hw_mutex
);
if
(
atomic_dec_and_test
(
&
dev_priv
->
fence_queue_waiters
))
{
spin_lock_irqsave
(
&
dev_priv
->
irq_lock
,
irq_flags
);
vmw_write
(
dev_priv
,
SVGA_REG_IRQMASK
,
vmw_read
(
dev_priv
,
SVGA_REG_IRQMASK
)
&
~
SVGA_IRQFLAG_ANY_FENCE
);
spin_unlock_irqrestore
(
&
dev_priv
->
irq_lock
,
irq_flags
);
}
mutex_unlock
(
&
dev_priv
->
hw_mutex
);
return
ret
;
}
void
vmw_irq_preinstall
(
struct
drm_device
*
dev
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
uint32_t
status
;
if
(
!
(
dev_priv
->
capabilities
&
SVGA_CAP_IRQMASK
))
return
;
spin_lock_init
(
&
dev_priv
->
irq_lock
);
status
=
inl
(
dev_priv
->
io_start
+
VMWGFX_IRQSTATUS_PORT
);
outl
(
status
,
dev_priv
->
io_start
+
VMWGFX_IRQSTATUS_PORT
);
}
int
vmw_irq_postinstall
(
struct
drm_device
*
dev
)
{
return
0
;
}
void
vmw_irq_uninstall
(
struct
drm_device
*
dev
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
uint32_t
status
;
if
(
!
(
dev_priv
->
capabilities
&
SVGA_CAP_IRQMASK
))
return
;
mutex_lock
(
&
dev_priv
->
hw_mutex
);
vmw_write
(
dev_priv
,
SVGA_REG_IRQMASK
,
0
);
mutex_unlock
(
&
dev_priv
->
hw_mutex
);
status
=
inl
(
dev_priv
->
io_start
+
VMWGFX_IRQSTATUS_PORT
);
outl
(
status
,
dev_priv
->
io_start
+
VMWGFX_IRQSTATUS_PORT
);
}
#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
int
vmw_fence_wait_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
drm_vmw_fence_wait_arg
*
arg
=
(
struct
drm_vmw_fence_wait_arg
*
)
data
;
unsigned
long
timeout
;
if
(
!
arg
->
cookie_valid
)
{
arg
->
cookie_valid
=
1
;
arg
->
kernel_cookie
=
jiffies
+
VMW_FENCE_WAIT_TIMEOUT
;
}
timeout
=
jiffies
;
if
(
time_after_eq
(
timeout
,
(
unsigned
long
)
arg
->
kernel_cookie
))
return
-
EBUSY
;
timeout
=
(
unsigned
long
)
arg
->
kernel_cookie
-
timeout
;
return
vmw_wait_fence
(
vmw_priv
(
dev
),
true
,
arg
->
sequence
,
true
,
timeout
);
}
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_kms.h"
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
void
vmw_display_unit_cleanup
(
struct
vmw_display_unit
*
du
)
{
if
(
du
->
cursor_surface
)
vmw_surface_unreference
(
&
du
->
cursor_surface
);
if
(
du
->
cursor_dmabuf
)
vmw_dmabuf_unreference
(
&
du
->
cursor_dmabuf
);
drm_crtc_cleanup
(
&
du
->
crtc
);
drm_encoder_cleanup
(
&
du
->
encoder
);
drm_connector_cleanup
(
&
du
->
connector
);
}
/*
* Display Unit Cursor functions
*/
int
vmw_cursor_update_image
(
struct
vmw_private
*
dev_priv
,
u32
*
image
,
u32
width
,
u32
height
,
u32
hotspotX
,
u32
hotspotY
)
{
struct
{
u32
cmd
;
SVGAFifoCmdDefineAlphaCursor
cursor
;
}
*
cmd
;
u32
image_size
=
width
*
height
*
4
;
u32
cmd_size
=
sizeof
(
*
cmd
)
+
image_size
;
if
(
!
image
)
return
-
EINVAL
;
cmd
=
vmw_fifo_reserve
(
dev_priv
,
cmd_size
);
if
(
unlikely
(
cmd
==
NULL
))
{
DRM_ERROR
(
"Fifo reserve failed.
\n
"
);
return
-
ENOMEM
;
}
memset
(
cmd
,
0
,
sizeof
(
*
cmd
));
memcpy
(
&
cmd
[
1
],
image
,
image_size
);
cmd
->
cmd
=
cpu_to_le32
(
SVGA_CMD_DEFINE_ALPHA_CURSOR
);
cmd
->
cursor
.
id
=
cpu_to_le32
(
0
);
cmd
->
cursor
.
width
=
cpu_to_le32
(
width
);
cmd
->
cursor
.
height
=
cpu_to_le32
(
height
);
cmd
->
cursor
.
hotspotX
=
cpu_to_le32
(
hotspotX
);
cmd
->
cursor
.
hotspotY
=
cpu_to_le32
(
hotspotY
);
vmw_fifo_commit
(
dev_priv
,
cmd_size
);
return
0
;
}
void
vmw_cursor_update_position
(
struct
vmw_private
*
dev_priv
,
bool
show
,
int
x
,
int
y
)
{
__le32
__iomem
*
fifo_mem
=
dev_priv
->
mmio_virt
;
uint32_t
count
;
iowrite32
(
show
?
1
:
0
,
fifo_mem
+
SVGA_FIFO_CURSOR_ON
);
iowrite32
(
x
,
fifo_mem
+
SVGA_FIFO_CURSOR_X
);
iowrite32
(
y
,
fifo_mem
+
SVGA_FIFO_CURSOR_Y
);
count
=
ioread32
(
fifo_mem
+
SVGA_FIFO_CURSOR_COUNT
);
iowrite32
(
++
count
,
fifo_mem
+
SVGA_FIFO_CURSOR_COUNT
);
}
int
vmw_du_crtc_cursor_set
(
struct
drm_crtc
*
crtc
,
struct
drm_file
*
file_priv
,
uint32_t
handle
,
uint32_t
width
,
uint32_t
height
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
crtc
->
dev
);
struct
ttm_object_file
*
tfile
=
vmw_fpriv
(
file_priv
)
->
tfile
;
struct
vmw_display_unit
*
du
=
vmw_crtc_to_du
(
crtc
);
struct
vmw_surface
*
surface
=
NULL
;
struct
vmw_dma_buffer
*
dmabuf
=
NULL
;
int
ret
;
if
(
handle
)
{
ret
=
vmw_user_surface_lookup
(
dev_priv
,
tfile
,
handle
,
&
surface
);
if
(
!
ret
)
{
if
(
!
surface
->
snooper
.
image
)
{
DRM_ERROR
(
"surface not suitable for cursor
\n
"
);
return
-
EINVAL
;
}
}
else
{
ret
=
vmw_user_dmabuf_lookup
(
tfile
,
handle
,
&
dmabuf
);
if
(
ret
)
{
DRM_ERROR
(
"failed to find surface or dmabuf: %i
\n
"
,
ret
);
return
-
EINVAL
;
}
}
}
/* takedown old cursor */
if
(
du
->
cursor_surface
)
{
du
->
cursor_surface
->
snooper
.
crtc
=
NULL
;
vmw_surface_unreference
(
&
du
->
cursor_surface
);
}
if
(
du
->
cursor_dmabuf
)
vmw_dmabuf_unreference
(
&
du
->
cursor_dmabuf
);
/* setup new image */
if
(
surface
)
{
/* vmw_user_surface_lookup takes one reference */
du
->
cursor_surface
=
surface
;
du
->
cursor_surface
->
snooper
.
crtc
=
crtc
;
du
->
cursor_age
=
du
->
cursor_surface
->
snooper
.
age
;
vmw_cursor_update_image
(
dev_priv
,
surface
->
snooper
.
image
,
64
,
64
,
du
->
hotspot_x
,
du
->
hotspot_y
);
}
else
if
(
dmabuf
)
{
struct
ttm_bo_kmap_obj
map
;
unsigned
long
kmap_offset
;
unsigned
long
kmap_num
;
void
*
virtual
;
bool
dummy
;
/* vmw_user_surface_lookup takes one reference */
du
->
cursor_dmabuf
=
dmabuf
;
kmap_offset
=
0
;
kmap_num
=
(
64
*
64
*
4
)
>>
PAGE_SHIFT
;
ret
=
ttm_bo_reserve
(
&
dmabuf
->
base
,
true
,
false
,
false
,
0
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"reserve failed
\n
"
);
return
-
EINVAL
;
}
ret
=
ttm_bo_kmap
(
&
dmabuf
->
base
,
kmap_offset
,
kmap_num
,
&
map
);
if
(
unlikely
(
ret
!=
0
))
goto
err_unreserve
;
virtual
=
ttm_kmap_obj_virtual
(
&
map
,
&
dummy
);
vmw_cursor_update_image
(
dev_priv
,
virtual
,
64
,
64
,
du
->
hotspot_x
,
du
->
hotspot_y
);
ttm_bo_kunmap
(
&
map
);
err_unreserve:
ttm_bo_unreserve
(
&
dmabuf
->
base
);
}
else
{
vmw_cursor_update_position
(
dev_priv
,
false
,
0
,
0
);
return
0
;
}
vmw_cursor_update_position
(
dev_priv
,
true
,
du
->
cursor_x
,
du
->
cursor_y
);
return
0
;
}
int
vmw_du_crtc_cursor_move
(
struct
drm_crtc
*
crtc
,
int
x
,
int
y
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
crtc
->
dev
);
struct
vmw_display_unit
*
du
=
vmw_crtc_to_du
(
crtc
);
bool
shown
=
du
->
cursor_surface
||
du
->
cursor_dmabuf
?
true
:
false
;
du
->
cursor_x
=
x
+
crtc
->
x
;
du
->
cursor_y
=
y
+
crtc
->
y
;
vmw_cursor_update_position
(
dev_priv
,
shown
,
du
->
cursor_x
,
du
->
cursor_y
);
return
0
;
}
void
vmw_kms_cursor_snoop
(
struct
vmw_surface
*
srf
,
struct
ttm_object_file
*
tfile
,
struct
ttm_buffer_object
*
bo
,
SVGA3dCmdHeader
*
header
)
{
struct
ttm_bo_kmap_obj
map
;
unsigned
long
kmap_offset
;
unsigned
long
kmap_num
;
SVGA3dCopyBox
*
box
;
unsigned
box_count
;
void
*
virtual
;
bool
dummy
;
struct
vmw_dma_cmd
{
SVGA3dCmdHeader
header
;
SVGA3dCmdSurfaceDMA
dma
;
}
*
cmd
;
int
ret
;
cmd
=
container_of
(
header
,
struct
vmw_dma_cmd
,
header
);
/* No snooper installed */
if
(
!
srf
->
snooper
.
image
)
return
;
if
(
cmd
->
dma
.
host
.
face
!=
0
||
cmd
->
dma
.
host
.
mipmap
!=
0
)
{
DRM_ERROR
(
"face and mipmap for cursors should never != 0
\n
"
);
return
;
}
if
(
cmd
->
header
.
size
<
64
)
{
DRM_ERROR
(
"at least one full copy box must be given
\n
"
);
return
;
}
box
=
(
SVGA3dCopyBox
*
)
&
cmd
[
1
];
box_count
=
(
cmd
->
header
.
size
-
sizeof
(
SVGA3dCmdSurfaceDMA
))
/
sizeof
(
SVGA3dCopyBox
);
if
(
cmd
->
dma
.
guest
.
pitch
!=
(
64
*
4
)
||
cmd
->
dma
.
guest
.
ptr
.
offset
%
PAGE_SIZE
||
box
->
x
!=
0
||
box
->
y
!=
0
||
box
->
z
!=
0
||
box
->
srcx
!=
0
||
box
->
srcy
!=
0
||
box
->
srcz
!=
0
||
box
->
w
!=
64
||
box
->
h
!=
64
||
box
->
d
!=
1
||
box_count
!=
1
)
{
/* TODO handle none page aligned offsets */
/* TODO handle partial uploads and pitch != 256 */
/* TODO handle more then one copy (size != 64) */
DRM_ERROR
(
"lazy programer, cant handle wierd stuff
\n
"
);
return
;
}
kmap_offset
=
cmd
->
dma
.
guest
.
ptr
.
offset
>>
PAGE_SHIFT
;
kmap_num
=
(
64
*
64
*
4
)
>>
PAGE_SHIFT
;
ret
=
ttm_bo_reserve
(
bo
,
true
,
false
,
false
,
0
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"reserve failed
\n
"
);
return
;
}
ret
=
ttm_bo_kmap
(
bo
,
kmap_offset
,
kmap_num
,
&
map
);
if
(
unlikely
(
ret
!=
0
))
goto
err_unreserve
;
virtual
=
ttm_kmap_obj_virtual
(
&
map
,
&
dummy
);
memcpy
(
srf
->
snooper
.
image
,
virtual
,
64
*
64
*
4
);
srf
->
snooper
.
age
++
;
/* we can't call this function from this function since execbuf has
* reserved fifo space.
*
* if (srf->snooper.crtc)
* vmw_ldu_crtc_cursor_update_image(dev_priv,
* srf->snooper.image, 64, 64,
* du->hotspot_x, du->hotspot_y);
*/
ttm_bo_kunmap
(
&
map
);
err_unreserve:
ttm_bo_unreserve
(
bo
);
}
void
vmw_kms_cursor_post_execbuf
(
struct
vmw_private
*
dev_priv
)
{
struct
drm_device
*
dev
=
dev_priv
->
dev
;
struct
vmw_display_unit
*
du
;
struct
drm_crtc
*
crtc
;
mutex_lock
(
&
dev
->
mode_config
.
mutex
);
list_for_each_entry
(
crtc
,
&
dev
->
mode_config
.
crtc_list
,
head
)
{
du
=
vmw_crtc_to_du
(
crtc
);
if
(
!
du
->
cursor_surface
||
du
->
cursor_age
==
du
->
cursor_surface
->
snooper
.
age
)
continue
;
du
->
cursor_age
=
du
->
cursor_surface
->
snooper
.
age
;
vmw_cursor_update_image
(
dev_priv
,
du
->
cursor_surface
->
snooper
.
image
,
64
,
64
,
du
->
hotspot_x
,
du
->
hotspot_y
);
}
mutex_unlock
(
&
dev
->
mode_config
.
mutex
);
}
/*
* Generic framebuffer code
*/
int
vmw_framebuffer_create_handle
(
struct
drm_framebuffer
*
fb
,
struct
drm_file
*
file_priv
,
unsigned
int
*
handle
)
{
if
(
handle
)
handle
=
0
;
return
0
;
}
/*
* Surface framebuffer code
*/
#define vmw_framebuffer_to_vfbs(x) \
container_of(x, struct vmw_framebuffer_surface, base.base)
struct
vmw_framebuffer_surface
{
struct
vmw_framebuffer
base
;
struct
vmw_surface
*
surface
;
struct
delayed_work
d_work
;
struct
mutex
work_lock
;
bool
present_fs
;
};
void
vmw_framebuffer_surface_destroy
(
struct
drm_framebuffer
*
framebuffer
)
{
struct
vmw_framebuffer_surface
*
vfb
=
vmw_framebuffer_to_vfbs
(
framebuffer
);
cancel_delayed_work_sync
(
&
vfb
->
d_work
);
drm_framebuffer_cleanup
(
framebuffer
);
vmw_surface_unreference
(
&
vfb
->
surface
);
kfree
(
framebuffer
);
}
static
void
vmw_framebuffer_present_fs_callback
(
struct
work_struct
*
work
)
{
struct
delayed_work
*
d_work
=
container_of
(
work
,
struct
delayed_work
,
work
);
struct
vmw_framebuffer_surface
*
vfbs
=
container_of
(
d_work
,
struct
vmw_framebuffer_surface
,
d_work
);
struct
vmw_surface
*
surf
=
vfbs
->
surface
;
struct
drm_framebuffer
*
framebuffer
=
&
vfbs
->
base
.
base
;
struct
vmw_private
*
dev_priv
=
vmw_priv
(
framebuffer
->
dev
);
struct
{
SVGA3dCmdHeader
header
;
SVGA3dCmdPresent
body
;
SVGA3dCopyRect
cr
;
}
*
cmd
;
mutex_lock
(
&
vfbs
->
work_lock
);
if
(
!
vfbs
->
present_fs
)
goto
out_unlock
;
cmd
=
vmw_fifo_reserve
(
dev_priv
,
sizeof
(
*
cmd
));
if
(
unlikely
(
cmd
==
NULL
))
goto
out_resched
;
cmd
->
header
.
id
=
cpu_to_le32
(
SVGA_3D_CMD_PRESENT
);
cmd
->
header
.
size
=
cpu_to_le32
(
sizeof
(
cmd
->
body
)
+
sizeof
(
cmd
->
cr
));
cmd
->
body
.
sid
=
cpu_to_le32
(
surf
->
res
.
id
);
cmd
->
cr
.
x
=
cpu_to_le32
(
0
);
cmd
->
cr
.
y
=
cpu_to_le32
(
0
);
cmd
->
cr
.
srcx
=
cmd
->
cr
.
x
;
cmd
->
cr
.
srcy
=
cmd
->
cr
.
y
;
cmd
->
cr
.
w
=
cpu_to_le32
(
framebuffer
->
width
);
cmd
->
cr
.
h
=
cpu_to_le32
(
framebuffer
->
height
);
vfbs
->
present_fs
=
false
;
vmw_fifo_commit
(
dev_priv
,
sizeof
(
*
cmd
));
out_resched:
/**
* Will not re-add if already pending.
*/
schedule_delayed_work
(
&
vfbs
->
d_work
,
VMWGFX_PRESENT_RATE
);
out_unlock:
mutex_unlock
(
&
vfbs
->
work_lock
);
}
int
vmw_framebuffer_surface_dirty
(
struct
drm_framebuffer
*
framebuffer
,
unsigned
flags
,
unsigned
color
,
struct
drm_clip_rect
*
clips
,
unsigned
num_clips
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
framebuffer
->
dev
);
struct
vmw_framebuffer_surface
*
vfbs
=
vmw_framebuffer_to_vfbs
(
framebuffer
);
struct
vmw_surface
*
surf
=
vfbs
->
surface
;
struct
drm_clip_rect
norect
;
SVGA3dCopyRect
*
cr
;
int
i
,
inc
=
1
;
struct
{
SVGA3dCmdHeader
header
;
SVGA3dCmdPresent
body
;
SVGA3dCopyRect
cr
;
}
*
cmd
;
if
(
!
num_clips
||
!
(
dev_priv
->
fifo
.
capabilities
&
SVGA_FIFO_CAP_SCREEN_OBJECT
))
{
int
ret
;
mutex_lock
(
&
vfbs
->
work_lock
);
vfbs
->
present_fs
=
true
;
ret
=
schedule_delayed_work
(
&
vfbs
->
d_work
,
VMWGFX_PRESENT_RATE
);
mutex_unlock
(
&
vfbs
->
work_lock
);
if
(
ret
)
{
/**
* No work pending, Force immediate present.
*/
vmw_framebuffer_present_fs_callback
(
&
vfbs
->
d_work
.
work
);
}
return
0
;
}
if
(
!
num_clips
)
{
num_clips
=
1
;
clips
=
&
norect
;
norect
.
x1
=
norect
.
y1
=
0
;
norect
.
x2
=
framebuffer
->
width
;
norect
.
y2
=
framebuffer
->
height
;
}
else
if
(
flags
&
DRM_MODE_FB_DIRTY_ANNOTATE_COPY
)
{
num_clips
/=
2
;
inc
=
2
;
/* skip source rects */
}
cmd
=
vmw_fifo_reserve
(
dev_priv
,
sizeof
(
*
cmd
)
+
(
num_clips
-
1
)
*
sizeof
(
cmd
->
cr
));
if
(
unlikely
(
cmd
==
NULL
))
{
DRM_ERROR
(
"Fifo reserve failed.
\n
"
);
return
-
ENOMEM
;
}
memset
(
cmd
,
0
,
sizeof
(
*
cmd
));
cmd
->
header
.
id
=
cpu_to_le32
(
SVGA_3D_CMD_PRESENT
);
cmd
->
header
.
size
=
cpu_to_le32
(
sizeof
(
cmd
->
body
)
+
num_clips
*
sizeof
(
cmd
->
cr
));
cmd
->
body
.
sid
=
cpu_to_le32
(
surf
->
res
.
id
);
for
(
i
=
0
,
cr
=
&
cmd
->
cr
;
i
<
num_clips
;
i
++
,
cr
++
,
clips
+=
inc
)
{
cr
->
x
=
cpu_to_le16
(
clips
->
x1
);
cr
->
y
=
cpu_to_le16
(
clips
->
y1
);
cr
->
srcx
=
cr
->
x
;
cr
->
srcy
=
cr
->
y
;
cr
->
w
=
cpu_to_le16
(
clips
->
x2
-
clips
->
x1
);
cr
->
h
=
cpu_to_le16
(
clips
->
y2
-
clips
->
y1
);
}
vmw_fifo_commit
(
dev_priv
,
sizeof
(
*
cmd
)
+
(
num_clips
-
1
)
*
sizeof
(
cmd
->
cr
));
return
0
;
}
static
struct
drm_framebuffer_funcs
vmw_framebuffer_surface_funcs
=
{
.
destroy
=
vmw_framebuffer_surface_destroy
,
.
dirty
=
vmw_framebuffer_surface_dirty
,
.
create_handle
=
vmw_framebuffer_create_handle
,
};
int
vmw_kms_new_framebuffer_surface
(
struct
vmw_private
*
dev_priv
,
struct
vmw_surface
*
surface
,
struct
vmw_framebuffer
**
out
,
unsigned
width
,
unsigned
height
)
{
struct
drm_device
*
dev
=
dev_priv
->
dev
;
struct
vmw_framebuffer_surface
*
vfbs
;
int
ret
;
vfbs
=
kzalloc
(
sizeof
(
*
vfbs
),
GFP_KERNEL
);
if
(
!
vfbs
)
{
ret
=
-
ENOMEM
;
goto
out_err1
;
}
ret
=
drm_framebuffer_init
(
dev
,
&
vfbs
->
base
.
base
,
&
vmw_framebuffer_surface_funcs
);
if
(
ret
)
goto
out_err2
;
if
(
!
vmw_surface_reference
(
surface
))
{
DRM_ERROR
(
"failed to reference surface %p
\n
"
,
surface
);
goto
out_err3
;
}
/* XXX get the first 3 from the surface info */
vfbs
->
base
.
base
.
bits_per_pixel
=
32
;
vfbs
->
base
.
base
.
pitch
=
width
*
32
/
4
;
vfbs
->
base
.
base
.
depth
=
24
;
vfbs
->
base
.
base
.
width
=
width
;
vfbs
->
base
.
base
.
height
=
height
;
vfbs
->
base
.
pin
=
NULL
;
vfbs
->
base
.
unpin
=
NULL
;
vfbs
->
surface
=
surface
;
mutex_init
(
&
vfbs
->
work_lock
);
INIT_DELAYED_WORK
(
&
vfbs
->
d_work
,
&
vmw_framebuffer_present_fs_callback
);
*
out
=
&
vfbs
->
base
;
return
0
;
out_err3:
drm_framebuffer_cleanup
(
&
vfbs
->
base
.
base
);
out_err2:
kfree
(
vfbs
);
out_err1:
return
ret
;
}
/*
* Dmabuf framebuffer code
*/
#define vmw_framebuffer_to_vfbd(x) \
container_of(x, struct vmw_framebuffer_dmabuf, base.base)
struct
vmw_framebuffer_dmabuf
{
struct
vmw_framebuffer
base
;
struct
vmw_dma_buffer
*
buffer
;
};
void
vmw_framebuffer_dmabuf_destroy
(
struct
drm_framebuffer
*
framebuffer
)
{
struct
vmw_framebuffer_dmabuf
*
vfbd
=
vmw_framebuffer_to_vfbd
(
framebuffer
);
drm_framebuffer_cleanup
(
framebuffer
);
vmw_dmabuf_unreference
(
&
vfbd
->
buffer
);
kfree
(
vfbd
);
}
int
vmw_framebuffer_dmabuf_dirty
(
struct
drm_framebuffer
*
framebuffer
,
unsigned
flags
,
unsigned
color
,
struct
drm_clip_rect
*
clips
,
unsigned
num_clips
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
framebuffer
->
dev
);
struct
drm_clip_rect
norect
;
struct
{
uint32_t
header
;
SVGAFifoCmdUpdate
body
;
}
*
cmd
;
int
i
,
increment
=
1
;
if
(
!
num_clips
||
!
(
dev_priv
->
fifo
.
capabilities
&
SVGA_FIFO_CAP_SCREEN_OBJECT
))
{
num_clips
=
1
;
clips
=
&
norect
;
norect
.
x1
=
norect
.
y1
=
0
;
norect
.
x2
=
framebuffer
->
width
;
norect
.
y2
=
framebuffer
->
height
;
}
else
if
(
flags
&
DRM_MODE_FB_DIRTY_ANNOTATE_COPY
)
{
num_clips
/=
2
;
increment
=
2
;
}
cmd
=
vmw_fifo_reserve
(
dev_priv
,
sizeof
(
*
cmd
)
*
num_clips
);
if
(
unlikely
(
cmd
==
NULL
))
{
DRM_ERROR
(
"Fifo reserve failed.
\n
"
);
return
-
ENOMEM
;
}
for
(
i
=
0
;
i
<
num_clips
;
i
++
,
clips
+=
increment
)
{
cmd
[
i
].
header
=
cpu_to_le32
(
SVGA_CMD_UPDATE
);
cmd
[
i
].
body
.
x
=
cpu_to_le32
(
clips
[
i
].
x1
);
cmd
[
i
].
body
.
y
=
cpu_to_le32
(
clips
[
i
].
y1
);
cmd
[
i
].
body
.
width
=
cpu_to_le32
(
clips
[
i
].
x2
-
clips
[
i
].
x1
);
cmd
[
i
].
body
.
height
=
cpu_to_le32
(
clips
[
i
].
y2
-
clips
[
i
].
y1
);
}
vmw_fifo_commit
(
dev_priv
,
sizeof
(
*
cmd
)
*
num_clips
);
return
0
;
}
static
struct
drm_framebuffer_funcs
vmw_framebuffer_dmabuf_funcs
=
{
.
destroy
=
vmw_framebuffer_dmabuf_destroy
,
.
dirty
=
vmw_framebuffer_dmabuf_dirty
,
.
create_handle
=
vmw_framebuffer_create_handle
,
};
static
int
vmw_framebuffer_dmabuf_pin
(
struct
vmw_framebuffer
*
vfb
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
vfb
->
base
.
dev
);
struct
vmw_framebuffer_dmabuf
*
vfbd
=
vmw_framebuffer_to_vfbd
(
&
vfb
->
base
);
int
ret
;
vmw_overlay_pause_all
(
dev_priv
);
ret
=
vmw_dmabuf_to_start_of_vram
(
dev_priv
,
vfbd
->
buffer
);
if
(
dev_priv
->
capabilities
&
SVGA_CAP_MULTIMON
)
{
vmw_write
(
dev_priv
,
SVGA_REG_NUM_GUEST_DISPLAYS
,
1
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_ID
,
0
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_IS_PRIMARY
,
true
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_POSITION_X
,
0
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_POSITION_Y
,
0
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_WIDTH
,
0
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_HEIGHT
,
0
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_ID
,
SVGA_ID_INVALID
);
vmw_write
(
dev_priv
,
SVGA_REG_ENABLE
,
1
);
vmw_write
(
dev_priv
,
SVGA_REG_WIDTH
,
vfb
->
base
.
width
);
vmw_write
(
dev_priv
,
SVGA_REG_HEIGHT
,
vfb
->
base
.
height
);
vmw_write
(
dev_priv
,
SVGA_REG_BITS_PER_PIXEL
,
vfb
->
base
.
bits_per_pixel
);
vmw_write
(
dev_priv
,
SVGA_REG_DEPTH
,
vfb
->
base
.
depth
);
vmw_write
(
dev_priv
,
SVGA_REG_RED_MASK
,
0x00ff0000
);
vmw_write
(
dev_priv
,
SVGA_REG_GREEN_MASK
,
0x0000ff00
);
vmw_write
(
dev_priv
,
SVGA_REG_BLUE_MASK
,
0x000000ff
);
}
else
WARN_ON
(
true
);
vmw_overlay_resume_all
(
dev_priv
);
return
0
;
}
static
int
vmw_framebuffer_dmabuf_unpin
(
struct
vmw_framebuffer
*
vfb
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
vfb
->
base
.
dev
);
struct
vmw_framebuffer_dmabuf
*
vfbd
=
vmw_framebuffer_to_vfbd
(
&
vfb
->
base
);
if
(
!
vfbd
->
buffer
)
{
WARN_ON
(
!
vfbd
->
buffer
);
return
0
;
}
return
vmw_dmabuf_from_vram
(
dev_priv
,
vfbd
->
buffer
);
}
int
vmw_kms_new_framebuffer_dmabuf
(
struct
vmw_private
*
dev_priv
,
struct
vmw_dma_buffer
*
dmabuf
,
struct
vmw_framebuffer
**
out
,
unsigned
width
,
unsigned
height
)
{
struct
drm_device
*
dev
=
dev_priv
->
dev
;
struct
vmw_framebuffer_dmabuf
*
vfbd
;
int
ret
;
vfbd
=
kzalloc
(
sizeof
(
*
vfbd
),
GFP_KERNEL
);
if
(
!
vfbd
)
{
ret
=
-
ENOMEM
;
goto
out_err1
;
}
ret
=
drm_framebuffer_init
(
dev
,
&
vfbd
->
base
.
base
,
&
vmw_framebuffer_dmabuf_funcs
);
if
(
ret
)
goto
out_err2
;
if
(
!
vmw_dmabuf_reference
(
dmabuf
))
{
DRM_ERROR
(
"failed to reference dmabuf %p
\n
"
,
dmabuf
);
goto
out_err3
;
}
/* XXX get the first 3 from the surface info */
vfbd
->
base
.
base
.
bits_per_pixel
=
32
;
vfbd
->
base
.
base
.
pitch
=
width
*
32
/
4
;
vfbd
->
base
.
base
.
depth
=
24
;
vfbd
->
base
.
base
.
width
=
width
;
vfbd
->
base
.
base
.
height
=
height
;
vfbd
->
base
.
pin
=
vmw_framebuffer_dmabuf_pin
;
vfbd
->
base
.
unpin
=
vmw_framebuffer_dmabuf_unpin
;
vfbd
->
buffer
=
dmabuf
;
*
out
=
&
vfbd
->
base
;
return
0
;
out_err3:
drm_framebuffer_cleanup
(
&
vfbd
->
base
.
base
);
out_err2:
kfree
(
vfbd
);
out_err1:
return
ret
;
}
/*
* Generic Kernel modesetting functions
*/
static
struct
drm_framebuffer
*
vmw_kms_fb_create
(
struct
drm_device
*
dev
,
struct
drm_file
*
file_priv
,
struct
drm_mode_fb_cmd
*
mode_cmd
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
ttm_object_file
*
tfile
=
vmw_fpriv
(
file_priv
)
->
tfile
;
struct
vmw_framebuffer
*
vfb
=
NULL
;
struct
vmw_surface
*
surface
=
NULL
;
struct
vmw_dma_buffer
*
bo
=
NULL
;
int
ret
;
ret
=
vmw_user_surface_lookup
(
dev_priv
,
tfile
,
mode_cmd
->
handle
,
&
surface
);
if
(
ret
)
goto
try_dmabuf
;
ret
=
vmw_kms_new_framebuffer_surface
(
dev_priv
,
surface
,
&
vfb
,
mode_cmd
->
width
,
mode_cmd
->
height
);
/* vmw_user_surface_lookup takes one ref so does new_fb */
vmw_surface_unreference
(
&
surface
);
if
(
ret
)
{
DRM_ERROR
(
"failed to create vmw_framebuffer: %i
\n
"
,
ret
);
return
NULL
;
}
return
&
vfb
->
base
;
try_dmabuf:
DRM_INFO
(
"%s: trying buffer
\n
"
,
__func__
);
ret
=
vmw_user_dmabuf_lookup
(
tfile
,
mode_cmd
->
handle
,
&
bo
);
if
(
ret
)
{
DRM_ERROR
(
"failed to find buffer: %i
\n
"
,
ret
);
return
NULL
;
}
ret
=
vmw_kms_new_framebuffer_dmabuf
(
dev_priv
,
bo
,
&
vfb
,
mode_cmd
->
width
,
mode_cmd
->
height
);
/* vmw_user_dmabuf_lookup takes one ref so does new_fb */
vmw_dmabuf_unreference
(
&
bo
);
if
(
ret
)
{
DRM_ERROR
(
"failed to create vmw_framebuffer: %i
\n
"
,
ret
);
return
NULL
;
}
return
&
vfb
->
base
;
}
static
int
vmw_kms_fb_changed
(
struct
drm_device
*
dev
)
{
return
0
;
}
static
struct
drm_mode_config_funcs
vmw_kms_funcs
=
{
.
fb_create
=
vmw_kms_fb_create
,
.
fb_changed
=
vmw_kms_fb_changed
,
};
int
vmw_kms_init
(
struct
vmw_private
*
dev_priv
)
{
struct
drm_device
*
dev
=
dev_priv
->
dev
;
int
ret
;
drm_mode_config_init
(
dev
);
dev
->
mode_config
.
funcs
=
&
vmw_kms_funcs
;
dev
->
mode_config
.
min_width
=
640
;
dev
->
mode_config
.
min_height
=
480
;
dev
->
mode_config
.
max_width
=
2048
;
dev
->
mode_config
.
max_height
=
2048
;
ret
=
vmw_kms_init_legacy_display_system
(
dev_priv
);
return
0
;
}
int
vmw_kms_close
(
struct
vmw_private
*
dev_priv
)
{
/*
* Docs says we should take the lock before calling this function
* but since it destroys encoders and our destructor calls
* drm_encoder_cleanup which takes the lock we deadlock.
*/
drm_mode_config_cleanup
(
dev_priv
->
dev
);
vmw_kms_close_legacy_display_system
(
dev_priv
);
return
0
;
}
int
vmw_kms_cursor_bypass_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
drm_vmw_cursor_bypass_arg
*
arg
=
data
;
struct
vmw_display_unit
*
du
;
struct
drm_mode_object
*
obj
;
struct
drm_crtc
*
crtc
;
int
ret
=
0
;
mutex_lock
(
&
dev
->
mode_config
.
mutex
);
if
(
arg
->
flags
&
DRM_VMW_CURSOR_BYPASS_ALL
)
{
list_for_each_entry
(
crtc
,
&
dev
->
mode_config
.
crtc_list
,
head
)
{
du
=
vmw_crtc_to_du
(
crtc
);
du
->
hotspot_x
=
arg
->
xhot
;
du
->
hotspot_y
=
arg
->
yhot
;
}
mutex_unlock
(
&
dev
->
mode_config
.
mutex
);
return
0
;
}
obj
=
drm_mode_object_find
(
dev
,
arg
->
crtc_id
,
DRM_MODE_OBJECT_CRTC
);
if
(
!
obj
)
{
ret
=
-
EINVAL
;
goto
out
;
}
crtc
=
obj_to_crtc
(
obj
);
du
=
vmw_crtc_to_du
(
crtc
);
du
->
hotspot_x
=
arg
->
xhot
;
du
->
hotspot_y
=
arg
->
yhot
;
out:
mutex_unlock
(
&
dev
->
mode_config
.
mutex
);
return
ret
;
}
int
vmw_kms_save_vga
(
struct
vmw_private
*
vmw_priv
)
{
/*
* setup a single multimon monitor with the size
* of 0x0, this stops the UI from resizing when we
* change the framebuffer size
*/
if
(
vmw_priv
->
capabilities
&
SVGA_CAP_MULTIMON
)
{
vmw_write
(
vmw_priv
,
SVGA_REG_NUM_GUEST_DISPLAYS
,
1
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_ID
,
0
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_IS_PRIMARY
,
true
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_POSITION_X
,
0
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_POSITION_Y
,
0
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_WIDTH
,
0
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_HEIGHT
,
0
);
vmw_write
(
vmw_priv
,
SVGA_REG_DISPLAY_ID
,
SVGA_ID_INVALID
);
}
vmw_priv
->
vga_width
=
vmw_read
(
vmw_priv
,
SVGA_REG_WIDTH
);
vmw_priv
->
vga_height
=
vmw_read
(
vmw_priv
,
SVGA_REG_HEIGHT
);
vmw_priv
->
vga_bpp
=
vmw_read
(
vmw_priv
,
SVGA_REG_BITS_PER_PIXEL
);
vmw_priv
->
vga_depth
=
vmw_read
(
vmw_priv
,
SVGA_REG_DEPTH
);
vmw_priv
->
vga_pseudo
=
vmw_read
(
vmw_priv
,
SVGA_REG_PSEUDOCOLOR
);
vmw_priv
->
vga_red_mask
=
vmw_read
(
vmw_priv
,
SVGA_REG_RED_MASK
);
vmw_priv
->
vga_green_mask
=
vmw_read
(
vmw_priv
,
SVGA_REG_GREEN_MASK
);
vmw_priv
->
vga_blue_mask
=
vmw_read
(
vmw_priv
,
SVGA_REG_BLUE_MASK
);
return
0
;
}
int
vmw_kms_restore_vga
(
struct
vmw_private
*
vmw_priv
)
{
vmw_write
(
vmw_priv
,
SVGA_REG_WIDTH
,
vmw_priv
->
vga_width
);
vmw_write
(
vmw_priv
,
SVGA_REG_HEIGHT
,
vmw_priv
->
vga_height
);
vmw_write
(
vmw_priv
,
SVGA_REG_BITS_PER_PIXEL
,
vmw_priv
->
vga_bpp
);
vmw_write
(
vmw_priv
,
SVGA_REG_DEPTH
,
vmw_priv
->
vga_depth
);
vmw_write
(
vmw_priv
,
SVGA_REG_PSEUDOCOLOR
,
vmw_priv
->
vga_pseudo
);
vmw_write
(
vmw_priv
,
SVGA_REG_RED_MASK
,
vmw_priv
->
vga_red_mask
);
vmw_write
(
vmw_priv
,
SVGA_REG_GREEN_MASK
,
vmw_priv
->
vga_green_mask
);
vmw_write
(
vmw_priv
,
SVGA_REG_BLUE_MASK
,
vmw_priv
->
vga_blue_mask
);
/* TODO check for multimon */
vmw_write
(
vmw_priv
,
SVGA_REG_NUM_GUEST_DISPLAYS
,
0
);
return
0
;
}
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef VMWGFX_KMS_H_
#define VMWGFX_KMS_H_
#include "drmP.h"
#include "vmwgfx_drv.h"
#define vmw_framebuffer_to_vfb(x) \
container_of(x, struct vmw_framebuffer, base)
/**
* Base class for framebuffers
*
* @pin is called the when ever a crtc uses this framebuffer
* @unpin is called
*/
struct
vmw_framebuffer
{
struct
drm_framebuffer
base
;
int
(
*
pin
)(
struct
vmw_framebuffer
*
fb
);
int
(
*
unpin
)(
struct
vmw_framebuffer
*
fb
);
};
#define vmw_crtc_to_du(x) \
container_of(x, struct vmw_display_unit, crtc)
/*
* Basic cursor manipulation
*/
int
vmw_cursor_update_image
(
struct
vmw_private
*
dev_priv
,
u32
*
image
,
u32
width
,
u32
height
,
u32
hotspotX
,
u32
hotspotY
);
void
vmw_cursor_update_position
(
struct
vmw_private
*
dev_priv
,
bool
show
,
int
x
,
int
y
);
/**
* Base class display unit.
*
* Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
* so the display unit is all of them at the same time. This is true for both
* legacy multimon and screen objects.
*/
struct
vmw_display_unit
{
struct
drm_crtc
crtc
;
struct
drm_encoder
encoder
;
struct
drm_connector
connector
;
struct
vmw_surface
*
cursor_surface
;
struct
vmw_dma_buffer
*
cursor_dmabuf
;
size_t
cursor_age
;
int
cursor_x
;
int
cursor_y
;
int
hotspot_x
;
int
hotspot_y
;
unsigned
unit
;
};
/*
* Shared display unit functions - vmwgfx_kms.c
*/
void
vmw_display_unit_cleanup
(
struct
vmw_display_unit
*
du
);
int
vmw_du_crtc_cursor_set
(
struct
drm_crtc
*
crtc
,
struct
drm_file
*
file_priv
,
uint32_t
handle
,
uint32_t
width
,
uint32_t
height
);
int
vmw_du_crtc_cursor_move
(
struct
drm_crtc
*
crtc
,
int
x
,
int
y
);
/*
* Legacy display unit functions - vmwgfx_ldu.h
*/
int
vmw_kms_init_legacy_display_system
(
struct
vmw_private
*
dev_priv
);
int
vmw_kms_close_legacy_display_system
(
struct
vmw_private
*
dev_priv
);
#endif
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_kms.h"
#define vmw_crtc_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.crtc)
#define vmw_encoder_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.encoder)
#define vmw_connector_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.connector)
struct
vmw_legacy_display
{
struct
list_head
active
;
unsigned
num_active
;
struct
vmw_framebuffer
*
fb
;
};
/**
* Display unit using the legacy register interface.
*/
struct
vmw_legacy_display_unit
{
struct
vmw_display_unit
base
;
struct
list_head
active
;
unsigned
unit
;
};
static
void
vmw_ldu_destroy
(
struct
vmw_legacy_display_unit
*
ldu
)
{
list_del_init
(
&
ldu
->
active
);
vmw_display_unit_cleanup
(
&
ldu
->
base
);
kfree
(
ldu
);
}
/*
* Legacy Display Unit CRTC functions
*/
static
void
vmw_ldu_crtc_save
(
struct
drm_crtc
*
crtc
)
{
}
static
void
vmw_ldu_crtc_restore
(
struct
drm_crtc
*
crtc
)
{
}
static
void
vmw_ldu_crtc_gamma_set
(
struct
drm_crtc
*
crtc
,
u16
*
r
,
u16
*
g
,
u16
*
b
,
uint32_t
size
)
{
}
static
void
vmw_ldu_crtc_destroy
(
struct
drm_crtc
*
crtc
)
{
vmw_ldu_destroy
(
vmw_crtc_to_ldu
(
crtc
));
}
static
int
vmw_ldu_commit_list
(
struct
vmw_private
*
dev_priv
)
{
struct
vmw_legacy_display
*
lds
=
dev_priv
->
ldu_priv
;
struct
vmw_legacy_display_unit
*
entry
;
struct
drm_crtc
*
crtc
;
int
i
=
0
;
/* to stop the screen from changing size on resize */
vmw_write
(
dev_priv
,
SVGA_REG_NUM_GUEST_DISPLAYS
,
0
);
for
(
i
=
0
;
i
<
lds
->
num_active
;
i
++
)
{
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_ID
,
i
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_IS_PRIMARY
,
!
i
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_POSITION_X
,
0
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_POSITION_Y
,
0
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_WIDTH
,
0
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_HEIGHT
,
0
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_ID
,
SVGA_ID_INVALID
);
}
/* Now set the mode */
vmw_write
(
dev_priv
,
SVGA_REG_NUM_GUEST_DISPLAYS
,
lds
->
num_active
);
i
=
0
;
list_for_each_entry
(
entry
,
&
lds
->
active
,
active
)
{
crtc
=
&
entry
->
base
.
crtc
;
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_ID
,
i
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_IS_PRIMARY
,
!
i
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_POSITION_X
,
crtc
->
x
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_POSITION_Y
,
crtc
->
y
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_WIDTH
,
crtc
->
mode
.
hdisplay
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_HEIGHT
,
crtc
->
mode
.
vdisplay
);
vmw_write
(
dev_priv
,
SVGA_REG_DISPLAY_ID
,
SVGA_ID_INVALID
);
i
++
;
}
return
0
;
}
static
int
vmw_ldu_del_active
(
struct
vmw_private
*
vmw_priv
,
struct
vmw_legacy_display_unit
*
ldu
)
{
struct
vmw_legacy_display
*
ld
=
vmw_priv
->
ldu_priv
;
if
(
list_empty
(
&
ldu
->
active
))
return
0
;
list_del_init
(
&
ldu
->
active
);
if
(
--
(
ld
->
num_active
)
==
0
)
{
BUG_ON
(
!
ld
->
fb
);
if
(
ld
->
fb
->
unpin
)
ld
->
fb
->
unpin
(
ld
->
fb
);
ld
->
fb
=
NULL
;
}
return
0
;
}
static
int
vmw_ldu_add_active
(
struct
vmw_private
*
vmw_priv
,
struct
vmw_legacy_display_unit
*
ldu
,
struct
vmw_framebuffer
*
vfb
)
{
struct
vmw_legacy_display
*
ld
=
vmw_priv
->
ldu_priv
;
struct
vmw_legacy_display_unit
*
entry
;
struct
list_head
*
at
;
if
(
!
list_empty
(
&
ldu
->
active
))
return
0
;
at
=
&
ld
->
active
;
list_for_each_entry
(
entry
,
&
ld
->
active
,
active
)
{
if
(
entry
->
unit
>
ldu
->
unit
)
break
;
at
=
&
entry
->
active
;
}
list_add
(
&
ldu
->
active
,
at
);
if
(
ld
->
num_active
++
==
0
)
{
BUG_ON
(
ld
->
fb
);
if
(
vfb
->
pin
)
vfb
->
pin
(
vfb
);
ld
->
fb
=
vfb
;
}
return
0
;
}
static
int
vmw_ldu_crtc_set_config
(
struct
drm_mode_set
*
set
)
{
struct
vmw_private
*
dev_priv
;
struct
vmw_legacy_display_unit
*
ldu
;
struct
drm_connector
*
connector
;
struct
drm_display_mode
*
mode
;
struct
drm_encoder
*
encoder
;
struct
vmw_framebuffer
*
vfb
;
struct
drm_framebuffer
*
fb
;
struct
drm_crtc
*
crtc
;
if
(
!
set
)
return
-
EINVAL
;
if
(
!
set
->
crtc
)
return
-
EINVAL
;
/* get the ldu */
crtc
=
set
->
crtc
;
ldu
=
vmw_crtc_to_ldu
(
crtc
);
vfb
=
set
->
fb
?
vmw_framebuffer_to_vfb
(
set
->
fb
)
:
NULL
;
dev_priv
=
vmw_priv
(
crtc
->
dev
);
if
(
set
->
num_connectors
>
1
)
{
DRM_ERROR
(
"to many connectors
\n
"
);
return
-
EINVAL
;
}
if
(
set
->
num_connectors
==
1
&&
set
->
connectors
[
0
]
!=
&
ldu
->
base
.
connector
)
{
DRM_ERROR
(
"connector doesn't match %p %p
\n
"
,
set
->
connectors
[
0
],
&
ldu
->
base
.
connector
);
return
-
EINVAL
;
}
/* ldu only supports one fb active at the time */
if
(
dev_priv
->
ldu_priv
->
fb
&&
vfb
&&
dev_priv
->
ldu_priv
->
fb
!=
vfb
)
{
DRM_ERROR
(
"Multiple framebuffers not supported
\n
"
);
return
-
EINVAL
;
}
/* since they always map one to one these are safe */
connector
=
&
ldu
->
base
.
connector
;
encoder
=
&
ldu
->
base
.
encoder
;
/* should we turn the crtc off? */
if
(
set
->
num_connectors
==
0
||
!
set
->
mode
||
!
set
->
fb
)
{
connector
->
encoder
=
NULL
;
encoder
->
crtc
=
NULL
;
crtc
->
fb
=
NULL
;
vmw_ldu_del_active
(
dev_priv
,
ldu
);
vmw_ldu_commit_list
(
dev_priv
);
return
0
;
}
/* we now know we want to set a mode */
mode
=
set
->
mode
;
fb
=
set
->
fb
;
if
(
set
->
x
+
mode
->
hdisplay
>
fb
->
width
||
set
->
y
+
mode
->
vdisplay
>
fb
->
height
)
{
DRM_ERROR
(
"set outside of framebuffer
\n
"
);
return
-
EINVAL
;
}
vmw_fb_off
(
dev_priv
);
crtc
->
fb
=
fb
;
encoder
->
crtc
=
crtc
;
connector
->
encoder
=
encoder
;
crtc
->
x
=
set
->
x
;
crtc
->
y
=
set
->
y
;
crtc
->
mode
=
*
mode
;
vmw_ldu_add_active
(
dev_priv
,
ldu
,
vfb
);
vmw_ldu_commit_list
(
dev_priv
);
return
0
;
}
static
struct
drm_crtc_funcs
vmw_legacy_crtc_funcs
=
{
.
save
=
vmw_ldu_crtc_save
,
.
restore
=
vmw_ldu_crtc_restore
,
.
cursor_set
=
vmw_du_crtc_cursor_set
,
.
cursor_move
=
vmw_du_crtc_cursor_move
,
.
gamma_set
=
vmw_ldu_crtc_gamma_set
,
.
destroy
=
vmw_ldu_crtc_destroy
,
.
set_config
=
vmw_ldu_crtc_set_config
,
};
/*
* Legacy Display Unit encoder functions
*/
static
void
vmw_ldu_encoder_destroy
(
struct
drm_encoder
*
encoder
)
{
vmw_ldu_destroy
(
vmw_encoder_to_ldu
(
encoder
));
}
static
struct
drm_encoder_funcs
vmw_legacy_encoder_funcs
=
{
.
destroy
=
vmw_ldu_encoder_destroy
,
};
/*
* Legacy Display Unit connector functions
*/
static
void
vmw_ldu_connector_dpms
(
struct
drm_connector
*
connector
,
int
mode
)
{
}
static
void
vmw_ldu_connector_save
(
struct
drm_connector
*
connector
)
{
}
static
void
vmw_ldu_connector_restore
(
struct
drm_connector
*
connector
)
{
}
static
enum
drm_connector_status
vmw_ldu_connector_detect
(
struct
drm_connector
*
connector
)
{
/* XXX vmwctrl should control connection status */
if
(
vmw_connector_to_ldu
(
connector
)
->
base
.
unit
==
0
)
return
connector_status_connected
;
return
connector_status_disconnected
;
}
static
struct
drm_display_mode
vmw_ldu_connector_builtin
[]
=
{
/* 640x480@60Hz */
{
DRM_MODE
(
"640x480"
,
DRM_MODE_TYPE_DRIVER
,
25175
,
640
,
656
,
752
,
800
,
0
,
480
,
489
,
492
,
525
,
0
,
DRM_MODE_FLAG_NHSYNC
|
DRM_MODE_FLAG_NVSYNC
)
},
/* 800x600@60Hz */
{
DRM_MODE
(
"800x600"
,
DRM_MODE_TYPE_DRIVER
|
DRM_MODE_TYPE_PREFERRED
,
40000
,
800
,
840
,
968
,
1056
,
0
,
600
,
601
,
605
,
628
,
0
,
DRM_MODE_FLAG_PHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* 1024x768@60Hz */
{
DRM_MODE
(
"1024x768"
,
DRM_MODE_TYPE_DRIVER
,
65000
,
1024
,
1048
,
1184
,
1344
,
0
,
768
,
771
,
777
,
806
,
0
,
DRM_MODE_FLAG_NHSYNC
|
DRM_MODE_FLAG_NVSYNC
)
},
/* 1152x864@75Hz */
{
DRM_MODE
(
"1152x864"
,
DRM_MODE_TYPE_DRIVER
,
108000
,
1152
,
1216
,
1344
,
1600
,
0
,
864
,
865
,
868
,
900
,
0
,
DRM_MODE_FLAG_PHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* 1280x768@60Hz */
{
DRM_MODE
(
"1280x768"
,
DRM_MODE_TYPE_DRIVER
,
79500
,
1280
,
1344
,
1472
,
1664
,
0
,
768
,
771
,
778
,
798
,
0
,
DRM_MODE_FLAG_NHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* 1280x800@60Hz */
{
DRM_MODE
(
"1280x800"
,
DRM_MODE_TYPE_DRIVER
,
83500
,
1280
,
1352
,
1480
,
1680
,
0
,
800
,
803
,
809
,
831
,
0
,
DRM_MODE_FLAG_PHSYNC
|
DRM_MODE_FLAG_NVSYNC
)
},
/* 1280x960@60Hz */
{
DRM_MODE
(
"1280x960"
,
DRM_MODE_TYPE_DRIVER
,
108000
,
1280
,
1376
,
1488
,
1800
,
0
,
960
,
961
,
964
,
1000
,
0
,
DRM_MODE_FLAG_PHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* 1280x1024@60Hz */
{
DRM_MODE
(
"1280x1024"
,
DRM_MODE_TYPE_DRIVER
,
108000
,
1280
,
1328
,
1440
,
1688
,
0
,
1024
,
1025
,
1028
,
1066
,
0
,
DRM_MODE_FLAG_PHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* 1360x768@60Hz */
{
DRM_MODE
(
"1360x768"
,
DRM_MODE_TYPE_DRIVER
,
85500
,
1360
,
1424
,
1536
,
1792
,
0
,
768
,
771
,
777
,
795
,
0
,
DRM_MODE_FLAG_PHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* 1440x1050@60Hz */
{
DRM_MODE
(
"1400x1050"
,
DRM_MODE_TYPE_DRIVER
,
121750
,
1400
,
1488
,
1632
,
1864
,
0
,
1050
,
1053
,
1057
,
1089
,
0
,
DRM_MODE_FLAG_NHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* 1440x900@60Hz */
{
DRM_MODE
(
"1440x900"
,
DRM_MODE_TYPE_DRIVER
,
106500
,
1440
,
1520
,
1672
,
1904
,
0
,
900
,
903
,
909
,
934
,
0
,
DRM_MODE_FLAG_NHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* 1600x1200@60Hz */
{
DRM_MODE
(
"1600x1200"
,
DRM_MODE_TYPE_DRIVER
,
162000
,
1600
,
1664
,
1856
,
2160
,
0
,
1200
,
1201
,
1204
,
1250
,
0
,
DRM_MODE_FLAG_PHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* 1680x1050@60Hz */
{
DRM_MODE
(
"1680x1050"
,
DRM_MODE_TYPE_DRIVER
,
146250
,
1680
,
1784
,
1960
,
2240
,
0
,
1050
,
1053
,
1059
,
1089
,
0
,
DRM_MODE_FLAG_NHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* 1792x1344@60Hz */
{
DRM_MODE
(
"1792x1344"
,
DRM_MODE_TYPE_DRIVER
,
204750
,
1792
,
1920
,
2120
,
2448
,
0
,
1344
,
1345
,
1348
,
1394
,
0
,
DRM_MODE_FLAG_NHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* 1853x1392@60Hz */
{
DRM_MODE
(
"1856x1392"
,
DRM_MODE_TYPE_DRIVER
,
218250
,
1856
,
1952
,
2176
,
2528
,
0
,
1392
,
1393
,
1396
,
1439
,
0
,
DRM_MODE_FLAG_NHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* 1920x1200@60Hz */
{
DRM_MODE
(
"1920x1200"
,
DRM_MODE_TYPE_DRIVER
,
193250
,
1920
,
2056
,
2256
,
2592
,
0
,
1200
,
1203
,
1209
,
1245
,
0
,
DRM_MODE_FLAG_NHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* 1920x1440@60Hz */
{
DRM_MODE
(
"1920x1440"
,
DRM_MODE_TYPE_DRIVER
,
234000
,
1920
,
2048
,
2256
,
2600
,
0
,
1440
,
1441
,
1444
,
1500
,
0
,
DRM_MODE_FLAG_NHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* 2560x1600@60Hz */
{
DRM_MODE
(
"2560x1600"
,
DRM_MODE_TYPE_DRIVER
,
348500
,
2560
,
2752
,
3032
,
3504
,
0
,
1600
,
1603
,
1609
,
1658
,
0
,
DRM_MODE_FLAG_NHSYNC
|
DRM_MODE_FLAG_PVSYNC
)
},
/* Terminate */
{
DRM_MODE
(
""
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
)
},
};
static
int
vmw_ldu_connector_fill_modes
(
struct
drm_connector
*
connector
,
uint32_t
max_width
,
uint32_t
max_height
)
{
struct
drm_device
*
dev
=
connector
->
dev
;
struct
drm_display_mode
*
mode
=
NULL
;
int
i
;
for
(
i
=
0
;
vmw_ldu_connector_builtin
[
i
].
type
!=
0
;
i
++
)
{
if
(
vmw_ldu_connector_builtin
[
i
].
hdisplay
>
max_width
||
vmw_ldu_connector_builtin
[
i
].
vdisplay
>
max_height
)
continue
;
mode
=
drm_mode_duplicate
(
dev
,
&
vmw_ldu_connector_builtin
[
i
]);
if
(
!
mode
)
return
0
;
mode
->
vrefresh
=
drm_mode_vrefresh
(
mode
);
drm_mode_probed_add
(
connector
,
mode
);
}
drm_mode_connector_list_update
(
connector
);
return
1
;
}
static
int
vmw_ldu_connector_set_property
(
struct
drm_connector
*
connector
,
struct
drm_property
*
property
,
uint64_t
val
)
{
return
0
;
}
static
void
vmw_ldu_connector_destroy
(
struct
drm_connector
*
connector
)
{
vmw_ldu_destroy
(
vmw_connector_to_ldu
(
connector
));
}
static
struct
drm_connector_funcs
vmw_legacy_connector_funcs
=
{
.
dpms
=
vmw_ldu_connector_dpms
,
.
save
=
vmw_ldu_connector_save
,
.
restore
=
vmw_ldu_connector_restore
,
.
detect
=
vmw_ldu_connector_detect
,
.
fill_modes
=
vmw_ldu_connector_fill_modes
,
.
set_property
=
vmw_ldu_connector_set_property
,
.
destroy
=
vmw_ldu_connector_destroy
,
};
static
int
vmw_ldu_init
(
struct
vmw_private
*
dev_priv
,
unsigned
unit
)
{
struct
vmw_legacy_display_unit
*
ldu
;
struct
drm_device
*
dev
=
dev_priv
->
dev
;
struct
drm_connector
*
connector
;
struct
drm_encoder
*
encoder
;
struct
drm_crtc
*
crtc
;
ldu
=
kzalloc
(
sizeof
(
*
ldu
),
GFP_KERNEL
);
if
(
!
ldu
)
return
-
ENOMEM
;
ldu
->
unit
=
unit
;
crtc
=
&
ldu
->
base
.
crtc
;
encoder
=
&
ldu
->
base
.
encoder
;
connector
=
&
ldu
->
base
.
connector
;
drm_connector_init
(
dev
,
connector
,
&
vmw_legacy_connector_funcs
,
DRM_MODE_CONNECTOR_LVDS
);
/* Initial status */
if
(
unit
==
0
)
connector
->
status
=
connector_status_connected
;
else
connector
->
status
=
connector_status_disconnected
;
drm_encoder_init
(
dev
,
encoder
,
&
vmw_legacy_encoder_funcs
,
DRM_MODE_ENCODER_LVDS
);
drm_mode_connector_attach_encoder
(
connector
,
encoder
);
encoder
->
possible_crtcs
=
(
1
<<
unit
);
encoder
->
possible_clones
=
0
;
INIT_LIST_HEAD
(
&
ldu
->
active
);
drm_crtc_init
(
dev
,
crtc
,
&
vmw_legacy_crtc_funcs
);
drm_connector_attach_property
(
connector
,
dev
->
mode_config
.
dirty_info_property
,
1
);
return
0
;
}
int
vmw_kms_init_legacy_display_system
(
struct
vmw_private
*
dev_priv
)
{
if
(
dev_priv
->
ldu_priv
)
{
DRM_INFO
(
"ldu system already on
\n
"
);
return
-
EINVAL
;
}
dev_priv
->
ldu_priv
=
kmalloc
(
GFP_KERNEL
,
sizeof
(
*
dev_priv
->
ldu_priv
));
if
(
!
dev_priv
->
ldu_priv
)
return
-
ENOMEM
;
INIT_LIST_HEAD
(
&
dev_priv
->
ldu_priv
->
active
);
dev_priv
->
ldu_priv
->
num_active
=
0
;
dev_priv
->
ldu_priv
->
fb
=
NULL
;
drm_mode_create_dirty_info_property
(
dev_priv
->
dev
);
vmw_ldu_init
(
dev_priv
,
0
);
vmw_ldu_init
(
dev_priv
,
1
);
vmw_ldu_init
(
dev_priv
,
2
);
vmw_ldu_init
(
dev_priv
,
3
);
vmw_ldu_init
(
dev_priv
,
4
);
vmw_ldu_init
(
dev_priv
,
5
);
vmw_ldu_init
(
dev_priv
,
6
);
vmw_ldu_init
(
dev_priv
,
7
);
return
0
;
}
int
vmw_kms_close_legacy_display_system
(
struct
vmw_private
*
dev_priv
)
{
if
(
!
dev_priv
->
ldu_priv
)
return
-
ENOSYS
;
BUG_ON
(
!
list_empty
(
&
dev_priv
->
ldu_priv
->
active
));
kfree
(
dev_priv
->
ldu_priv
);
return
0
;
}
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "drmP.h"
#include "vmwgfx_drv.h"
#include "ttm/ttm_placement.h"
#include "svga_overlay.h"
#include "svga_escape.h"
#define VMW_MAX_NUM_STREAMS 1
struct
vmw_stream
{
struct
vmw_dma_buffer
*
buf
;
bool
claimed
;
bool
paused
;
struct
drm_vmw_control_stream_arg
saved
;
};
/**
* Overlay control
*/
struct
vmw_overlay
{
/*
* Each stream is a single overlay. In Xv these are called ports.
*/
struct
mutex
mutex
;
struct
vmw_stream
stream
[
VMW_MAX_NUM_STREAMS
];
};
static
inline
struct
vmw_overlay
*
vmw_overlay
(
struct
drm_device
*
dev
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
return
dev_priv
?
dev_priv
->
overlay_priv
:
NULL
;
}
struct
vmw_escape_header
{
uint32_t
cmd
;
SVGAFifoCmdEscape
body
;
};
struct
vmw_escape_video_flush
{
struct
vmw_escape_header
escape
;
SVGAEscapeVideoFlush
flush
;
};
static
inline
void
fill_escape
(
struct
vmw_escape_header
*
header
,
uint32_t
size
)
{
header
->
cmd
=
SVGA_CMD_ESCAPE
;
header
->
body
.
nsid
=
SVGA_ESCAPE_NSID_VMWARE
;
header
->
body
.
size
=
size
;
}
static
inline
void
fill_flush
(
struct
vmw_escape_video_flush
*
cmd
,
uint32_t
stream_id
)
{
fill_escape
(
&
cmd
->
escape
,
sizeof
(
cmd
->
flush
));
cmd
->
flush
.
cmdType
=
SVGA_ESCAPE_VMWARE_VIDEO_FLUSH
;
cmd
->
flush
.
streamId
=
stream_id
;
}
/**
* Pin or unpin a buffer in vram.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to pin or unpin.
* @pin: Pin buffer in vram if true.
* @interruptible: Use interruptible wait.
*
* Takes the current masters ttm lock in read.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
static
int
vmw_dmabuf_pin_in_vram
(
struct
vmw_private
*
dev_priv
,
struct
vmw_dma_buffer
*
buf
,
bool
pin
,
bool
interruptible
)
{
struct
ttm_buffer_object
*
bo
=
&
buf
->
base
;
struct
ttm_bo_global
*
glob
=
bo
->
glob
;
struct
ttm_placement
*
overlay_placement
=
&
vmw_vram_placement
;
int
ret
;
ret
=
ttm_read_lock
(
&
dev_priv
->
active_master
->
lock
,
interruptible
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
ret
=
ttm_bo_reserve
(
bo
,
interruptible
,
false
,
false
,
0
);
if
(
unlikely
(
ret
!=
0
))
goto
err
;
if
(
buf
->
gmr_bound
)
{
vmw_gmr_unbind
(
dev_priv
,
buf
->
gmr_id
);
spin_lock
(
&
glob
->
lru_lock
);
ida_remove
(
&
dev_priv
->
gmr_ida
,
buf
->
gmr_id
);
spin_unlock
(
&
glob
->
lru_lock
);
buf
->
gmr_bound
=
NULL
;
}
if
(
pin
)
overlay_placement
=
&
vmw_vram_ne_placement
;
ret
=
ttm_bo_validate
(
bo
,
overlay_placement
,
interruptible
,
false
);
ttm_bo_unreserve
(
bo
);
err:
ttm_read_unlock
(
&
dev_priv
->
active_master
->
lock
);
return
ret
;
}
/**
* Send put command to hw.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
static
int
vmw_overlay_send_put
(
struct
vmw_private
*
dev_priv
,
struct
vmw_dma_buffer
*
buf
,
struct
drm_vmw_control_stream_arg
*
arg
,
bool
interruptible
)
{
struct
{
struct
vmw_escape_header
escape
;
struct
{
struct
{
uint32_t
cmdType
;
uint32_t
streamId
;
}
header
;
struct
{
uint32_t
registerId
;
uint32_t
value
;
}
items
[
SVGA_VIDEO_PITCH_3
+
1
];
}
body
;
struct
vmw_escape_video_flush
flush
;
}
*
cmds
;
uint32_t
offset
;
int
i
,
ret
;
for
(;;)
{
cmds
=
vmw_fifo_reserve
(
dev_priv
,
sizeof
(
*
cmds
));
if
(
cmds
)
break
;
ret
=
vmw_fallback_wait
(
dev_priv
,
false
,
true
,
0
,
interruptible
,
3
*
HZ
);
if
(
interruptible
&&
ret
==
-
ERESTARTSYS
)
return
ret
;
else
BUG_ON
(
ret
!=
0
);
}
fill_escape
(
&
cmds
->
escape
,
sizeof
(
cmds
->
body
));
cmds
->
body
.
header
.
cmdType
=
SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS
;
cmds
->
body
.
header
.
streamId
=
arg
->
stream_id
;
for
(
i
=
0
;
i
<=
SVGA_VIDEO_PITCH_3
;
i
++
)
cmds
->
body
.
items
[
i
].
registerId
=
i
;
offset
=
buf
->
base
.
offset
+
arg
->
offset
;
cmds
->
body
.
items
[
SVGA_VIDEO_ENABLED
].
value
=
true
;
cmds
->
body
.
items
[
SVGA_VIDEO_FLAGS
].
value
=
arg
->
flags
;
cmds
->
body
.
items
[
SVGA_VIDEO_DATA_OFFSET
].
value
=
offset
;
cmds
->
body
.
items
[
SVGA_VIDEO_FORMAT
].
value
=
arg
->
format
;
cmds
->
body
.
items
[
SVGA_VIDEO_COLORKEY
].
value
=
arg
->
color_key
;
cmds
->
body
.
items
[
SVGA_VIDEO_SIZE
].
value
=
arg
->
size
;
cmds
->
body
.
items
[
SVGA_VIDEO_WIDTH
].
value
=
arg
->
width
;
cmds
->
body
.
items
[
SVGA_VIDEO_HEIGHT
].
value
=
arg
->
height
;
cmds
->
body
.
items
[
SVGA_VIDEO_SRC_X
].
value
=
arg
->
src
.
x
;
cmds
->
body
.
items
[
SVGA_VIDEO_SRC_Y
].
value
=
arg
->
src
.
y
;
cmds
->
body
.
items
[
SVGA_VIDEO_SRC_WIDTH
].
value
=
arg
->
src
.
w
;
cmds
->
body
.
items
[
SVGA_VIDEO_SRC_HEIGHT
].
value
=
arg
->
src
.
h
;
cmds
->
body
.
items
[
SVGA_VIDEO_DST_X
].
value
=
arg
->
dst
.
x
;
cmds
->
body
.
items
[
SVGA_VIDEO_DST_Y
].
value
=
arg
->
dst
.
y
;
cmds
->
body
.
items
[
SVGA_VIDEO_DST_WIDTH
].
value
=
arg
->
dst
.
w
;
cmds
->
body
.
items
[
SVGA_VIDEO_DST_HEIGHT
].
value
=
arg
->
dst
.
h
;
cmds
->
body
.
items
[
SVGA_VIDEO_PITCH_1
].
value
=
arg
->
pitch
[
0
];
cmds
->
body
.
items
[
SVGA_VIDEO_PITCH_2
].
value
=
arg
->
pitch
[
1
];
cmds
->
body
.
items
[
SVGA_VIDEO_PITCH_3
].
value
=
arg
->
pitch
[
2
];
fill_flush
(
&
cmds
->
flush
,
arg
->
stream_id
);
vmw_fifo_commit
(
dev_priv
,
sizeof
(
*
cmds
));
return
0
;
}
/**
* Send stop command to hw.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
static
int
vmw_overlay_send_stop
(
struct
vmw_private
*
dev_priv
,
uint32_t
stream_id
,
bool
interruptible
)
{
struct
{
struct
vmw_escape_header
escape
;
SVGAEscapeVideoSetRegs
body
;
struct
vmw_escape_video_flush
flush
;
}
*
cmds
;
int
ret
;
for
(;;)
{
cmds
=
vmw_fifo_reserve
(
dev_priv
,
sizeof
(
*
cmds
));
if
(
cmds
)
break
;
ret
=
vmw_fallback_wait
(
dev_priv
,
false
,
true
,
0
,
interruptible
,
3
*
HZ
);
if
(
interruptible
&&
ret
==
-
ERESTARTSYS
)
return
ret
;
else
BUG_ON
(
ret
!=
0
);
}
fill_escape
(
&
cmds
->
escape
,
sizeof
(
cmds
->
body
));
cmds
->
body
.
header
.
cmdType
=
SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS
;
cmds
->
body
.
header
.
streamId
=
stream_id
;
cmds
->
body
.
items
[
0
].
registerId
=
SVGA_VIDEO_ENABLED
;
cmds
->
body
.
items
[
0
].
value
=
false
;
fill_flush
(
&
cmds
->
flush
,
stream_id
);
vmw_fifo_commit
(
dev_priv
,
sizeof
(
*
cmds
));
return
0
;
}
/**
* Stop or pause a stream.
*
* If the stream is paused the no evict flag is removed from the buffer
* but left in vram. This allows for instance mode_set to evict it
* should it need to.
*
* The caller must hold the overlay lock.
*
* @stream_id which stream to stop/pause.
* @pause true to pause, false to stop completely.
*/
static
int
vmw_overlay_stop
(
struct
vmw_private
*
dev_priv
,
uint32_t
stream_id
,
bool
pause
,
bool
interruptible
)
{
struct
vmw_overlay
*
overlay
=
dev_priv
->
overlay_priv
;
struct
vmw_stream
*
stream
=
&
overlay
->
stream
[
stream_id
];
int
ret
;
/* no buffer attached the stream is completely stopped */
if
(
!
stream
->
buf
)
return
0
;
/* If the stream is paused this is already done */
if
(
!
stream
->
paused
)
{
ret
=
vmw_overlay_send_stop
(
dev_priv
,
stream_id
,
interruptible
);
if
(
ret
)
return
ret
;
/* We just remove the NO_EVICT flag so no -ENOMEM */
ret
=
vmw_dmabuf_pin_in_vram
(
dev_priv
,
stream
->
buf
,
false
,
interruptible
);
if
(
interruptible
&&
ret
==
-
ERESTARTSYS
)
return
ret
;
else
BUG_ON
(
ret
!=
0
);
}
if
(
!
pause
)
{
vmw_dmabuf_unreference
(
&
stream
->
buf
);
stream
->
paused
=
false
;
}
else
{
stream
->
paused
=
true
;
}
return
0
;
}
/**
* Update a stream and send any put or stop fifo commands needed.
*
* The caller must hold the overlay lock.
*
* Returns
* -ENOMEM if buffer doesn't fit in vram.
* -ERESTARTSYS if interrupted.
*/
static
int
vmw_overlay_update_stream
(
struct
vmw_private
*
dev_priv
,
struct
vmw_dma_buffer
*
buf
,
struct
drm_vmw_control_stream_arg
*
arg
,
bool
interruptible
)
{
struct
vmw_overlay
*
overlay
=
dev_priv
->
overlay_priv
;
struct
vmw_stream
*
stream
=
&
overlay
->
stream
[
arg
->
stream_id
];
int
ret
=
0
;
if
(
!
buf
)
return
-
EINVAL
;
DRM_DEBUG
(
" %s: old %p, new %p, %spaused
\n
"
,
__func__
,
stream
->
buf
,
buf
,
stream
->
paused
?
""
:
"not "
);
if
(
stream
->
buf
!=
buf
)
{
ret
=
vmw_overlay_stop
(
dev_priv
,
arg
->
stream_id
,
false
,
interruptible
);
if
(
ret
)
return
ret
;
}
else
if
(
!
stream
->
paused
)
{
/* If the buffers match and not paused then just send
* the put command, no need to do anything else.
*/
ret
=
vmw_overlay_send_put
(
dev_priv
,
buf
,
arg
,
interruptible
);
if
(
ret
==
0
)
stream
->
saved
=
*
arg
;
else
BUG_ON
(
!
interruptible
);
return
ret
;
}
/* We don't start the old stream if we are interrupted.
* Might return -ENOMEM if it can't fit the buffer in vram.
*/
ret
=
vmw_dmabuf_pin_in_vram
(
dev_priv
,
buf
,
true
,
interruptible
);
if
(
ret
)
return
ret
;
ret
=
vmw_overlay_send_put
(
dev_priv
,
buf
,
arg
,
interruptible
);
if
(
ret
)
{
/* This one needs to happen no matter what. We only remove
* the NO_EVICT flag so this is safe from -ENOMEM.
*/
BUG_ON
(
vmw_dmabuf_pin_in_vram
(
dev_priv
,
buf
,
false
,
false
)
!=
0
);
return
ret
;
}
if
(
stream
->
buf
!=
buf
)
stream
->
buf
=
vmw_dmabuf_reference
(
buf
);
stream
->
saved
=
*
arg
;
return
0
;
}
/**
* Stop all streams.
*
* Used by the fb code when starting.
*
* Takes the overlay lock.
*/
int
vmw_overlay_stop_all
(
struct
vmw_private
*
dev_priv
)
{
struct
vmw_overlay
*
overlay
=
dev_priv
->
overlay_priv
;
int
i
,
ret
;
if
(
!
overlay
)
return
0
;
mutex_lock
(
&
overlay
->
mutex
);
for
(
i
=
0
;
i
<
VMW_MAX_NUM_STREAMS
;
i
++
)
{
struct
vmw_stream
*
stream
=
&
overlay
->
stream
[
i
];
if
(
!
stream
->
buf
)
continue
;
ret
=
vmw_overlay_stop
(
dev_priv
,
i
,
false
,
false
);
WARN_ON
(
ret
!=
0
);
}
mutex_unlock
(
&
overlay
->
mutex
);
return
0
;
}
/**
* Try to resume all paused streams.
*
* Used by the kms code after moving a new scanout buffer to vram.
*
* Takes the overlay lock.
*/
int
vmw_overlay_resume_all
(
struct
vmw_private
*
dev_priv
)
{
struct
vmw_overlay
*
overlay
=
dev_priv
->
overlay_priv
;
int
i
,
ret
;
if
(
!
overlay
)
return
0
;
mutex_lock
(
&
overlay
->
mutex
);
for
(
i
=
0
;
i
<
VMW_MAX_NUM_STREAMS
;
i
++
)
{
struct
vmw_stream
*
stream
=
&
overlay
->
stream
[
i
];
if
(
!
stream
->
paused
)
continue
;
ret
=
vmw_overlay_update_stream
(
dev_priv
,
stream
->
buf
,
&
stream
->
saved
,
false
);
if
(
ret
!=
0
)
DRM_INFO
(
"%s: *warning* failed to resume stream %i
\n
"
,
__func__
,
i
);
}
mutex_unlock
(
&
overlay
->
mutex
);
return
0
;
}
/**
* Pauses all active streams.
*
* Used by the kms code when moving a new scanout buffer to vram.
*
* Takes the overlay lock.
*/
int
vmw_overlay_pause_all
(
struct
vmw_private
*
dev_priv
)
{
struct
vmw_overlay
*
overlay
=
dev_priv
->
overlay_priv
;
int
i
,
ret
;
if
(
!
overlay
)
return
0
;
mutex_lock
(
&
overlay
->
mutex
);
for
(
i
=
0
;
i
<
VMW_MAX_NUM_STREAMS
;
i
++
)
{
if
(
overlay
->
stream
[
i
].
paused
)
DRM_INFO
(
"%s: *warning* stream %i already paused
\n
"
,
__func__
,
i
);
ret
=
vmw_overlay_stop
(
dev_priv
,
i
,
true
,
false
);
WARN_ON
(
ret
!=
0
);
}
mutex_unlock
(
&
overlay
->
mutex
);
return
0
;
}
int
vmw_overlay_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
ttm_object_file
*
tfile
=
vmw_fpriv
(
file_priv
)
->
tfile
;
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
vmw_overlay
*
overlay
=
dev_priv
->
overlay_priv
;
struct
drm_vmw_control_stream_arg
*
arg
=
(
struct
drm_vmw_control_stream_arg
*
)
data
;
struct
vmw_dma_buffer
*
buf
;
struct
vmw_resource
*
res
;
int
ret
;
if
(
!
overlay
)
return
-
ENOSYS
;
ret
=
vmw_user_stream_lookup
(
dev_priv
,
tfile
,
&
arg
->
stream_id
,
&
res
);
if
(
ret
)
return
ret
;
mutex_lock
(
&
overlay
->
mutex
);
if
(
!
arg
->
enabled
)
{
ret
=
vmw_overlay_stop
(
dev_priv
,
arg
->
stream_id
,
false
,
true
);
goto
out_unlock
;
}
ret
=
vmw_user_dmabuf_lookup
(
tfile
,
arg
->
handle
,
&
buf
);
if
(
ret
)
goto
out_unlock
;
ret
=
vmw_overlay_update_stream
(
dev_priv
,
buf
,
arg
,
true
);
vmw_dmabuf_unreference
(
&
buf
);
out_unlock:
mutex_unlock
(
&
overlay
->
mutex
);
vmw_resource_unreference
(
&
res
);
return
ret
;
}
int
vmw_overlay_num_overlays
(
struct
vmw_private
*
dev_priv
)
{
if
(
!
dev_priv
->
overlay_priv
)
return
0
;
return
VMW_MAX_NUM_STREAMS
;
}
int
vmw_overlay_num_free_overlays
(
struct
vmw_private
*
dev_priv
)
{
struct
vmw_overlay
*
overlay
=
dev_priv
->
overlay_priv
;
int
i
,
k
;
if
(
!
overlay
)
return
0
;
mutex_lock
(
&
overlay
->
mutex
);
for
(
i
=
0
,
k
=
0
;
i
<
VMW_MAX_NUM_STREAMS
;
i
++
)
if
(
!
overlay
->
stream
[
i
].
claimed
)
k
++
;
mutex_unlock
(
&
overlay
->
mutex
);
return
k
;
}
int
vmw_overlay_claim
(
struct
vmw_private
*
dev_priv
,
uint32_t
*
out
)
{
struct
vmw_overlay
*
overlay
=
dev_priv
->
overlay_priv
;
int
i
;
if
(
!
overlay
)
return
-
ENOSYS
;
mutex_lock
(
&
overlay
->
mutex
);
for
(
i
=
0
;
i
<
VMW_MAX_NUM_STREAMS
;
i
++
)
{
if
(
overlay
->
stream
[
i
].
claimed
)
continue
;
overlay
->
stream
[
i
].
claimed
=
true
;
*
out
=
i
;
mutex_unlock
(
&
overlay
->
mutex
);
return
0
;
}
mutex_unlock
(
&
overlay
->
mutex
);
return
-
ESRCH
;
}
int
vmw_overlay_unref
(
struct
vmw_private
*
dev_priv
,
uint32_t
stream_id
)
{
struct
vmw_overlay
*
overlay
=
dev_priv
->
overlay_priv
;
BUG_ON
(
stream_id
>=
VMW_MAX_NUM_STREAMS
);
if
(
!
overlay
)
return
-
ENOSYS
;
mutex_lock
(
&
overlay
->
mutex
);
WARN_ON
(
!
overlay
->
stream
[
stream_id
].
claimed
);
vmw_overlay_stop
(
dev_priv
,
stream_id
,
false
,
false
);
overlay
->
stream
[
stream_id
].
claimed
=
false
;
mutex_unlock
(
&
overlay
->
mutex
);
return
0
;
}
int
vmw_overlay_init
(
struct
vmw_private
*
dev_priv
)
{
struct
vmw_overlay
*
overlay
;
int
i
;
if
(
dev_priv
->
overlay_priv
)
return
-
EINVAL
;
if
(
!
(
dev_priv
->
fifo
.
capabilities
&
SVGA_FIFO_CAP_VIDEO
)
&&
(
dev_priv
->
fifo
.
capabilities
&
SVGA_FIFO_CAP_ESCAPE
))
{
DRM_INFO
(
"hardware doesn't support overlays
\n
"
);
return
-
ENOSYS
;
}
overlay
=
kmalloc
(
GFP_KERNEL
,
sizeof
(
*
overlay
));
if
(
!
overlay
)
return
-
ENOMEM
;
memset
(
overlay
,
0
,
sizeof
(
*
overlay
));
mutex_init
(
&
overlay
->
mutex
);
for
(
i
=
0
;
i
<
VMW_MAX_NUM_STREAMS
;
i
++
)
{
overlay
->
stream
[
i
].
buf
=
NULL
;
overlay
->
stream
[
i
].
paused
=
false
;
overlay
->
stream
[
i
].
claimed
=
false
;
}
dev_priv
->
overlay_priv
=
overlay
;
return
0
;
}
int
vmw_overlay_close
(
struct
vmw_private
*
dev_priv
)
{
struct
vmw_overlay
*
overlay
=
dev_priv
->
overlay_priv
;
bool
forgotten_buffer
=
false
;
int
i
;
if
(
!
overlay
)
return
-
ENOSYS
;
for
(
i
=
0
;
i
<
VMW_MAX_NUM_STREAMS
;
i
++
)
{
if
(
overlay
->
stream
[
i
].
buf
)
{
forgotten_buffer
=
true
;
vmw_overlay_stop
(
dev_priv
,
i
,
false
,
false
);
}
}
WARN_ON
(
forgotten_buffer
);
dev_priv
->
overlay_priv
=
NULL
;
kfree
(
overlay
);
return
0
;
}
drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* This file contains virtual hardware defines for kernel space.
*/
#ifndef _VMWGFX_REG_H_
#define _VMWGFX_REG_H_
#include <linux/types.h>
#define VMWGFX_INDEX_PORT 0x0
#define VMWGFX_VALUE_PORT 0x1
#define VMWGFX_IRQSTATUS_PORT 0x8
struct
svga_guest_mem_descriptor
{
__le32
ppn
;
__le32
num_pages
;
};
struct
svga_fifo_cmd_fence
{
__le32
fence
;
};
#define SVGA_SYNC_GENERIC 1
#define SVGA_SYNC_FIFOFULL 2
#include "svga_types.h"
#include "svga3d_reg.h"
#endif
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "vmwgfx_drm.h"
#include "ttm/ttm_object.h"
#include "ttm/ttm_placement.h"
#include "drmP.h"
#define VMW_RES_CONTEXT ttm_driver_type0
#define VMW_RES_SURFACE ttm_driver_type1
#define VMW_RES_STREAM ttm_driver_type2
struct
vmw_user_context
{
struct
ttm_base_object
base
;
struct
vmw_resource
res
;
};
struct
vmw_user_surface
{
struct
ttm_base_object
base
;
struct
vmw_surface
srf
;
};
struct
vmw_user_dma_buffer
{
struct
ttm_base_object
base
;
struct
vmw_dma_buffer
dma
;
};
struct
vmw_bo_user_rep
{
uint32_t
handle
;
uint64_t
map_handle
;
};
struct
vmw_stream
{
struct
vmw_resource
res
;
uint32_t
stream_id
;
};
struct
vmw_user_stream
{
struct
ttm_base_object
base
;
struct
vmw_stream
stream
;
};
static
inline
struct
vmw_dma_buffer
*
vmw_dma_buffer
(
struct
ttm_buffer_object
*
bo
)
{
return
container_of
(
bo
,
struct
vmw_dma_buffer
,
base
);
}
static
inline
struct
vmw_user_dma_buffer
*
vmw_user_dma_buffer
(
struct
ttm_buffer_object
*
bo
)
{
struct
vmw_dma_buffer
*
vmw_bo
=
vmw_dma_buffer
(
bo
);
return
container_of
(
vmw_bo
,
struct
vmw_user_dma_buffer
,
dma
);
}
struct
vmw_resource
*
vmw_resource_reference
(
struct
vmw_resource
*
res
)
{
kref_get
(
&
res
->
kref
);
return
res
;
}
static
void
vmw_resource_release
(
struct
kref
*
kref
)
{
struct
vmw_resource
*
res
=
container_of
(
kref
,
struct
vmw_resource
,
kref
);
struct
vmw_private
*
dev_priv
=
res
->
dev_priv
;
idr_remove
(
res
->
idr
,
res
->
id
);
write_unlock
(
&
dev_priv
->
resource_lock
);
if
(
likely
(
res
->
hw_destroy
!=
NULL
))
res
->
hw_destroy
(
res
);
if
(
res
->
res_free
!=
NULL
)
res
->
res_free
(
res
);
else
kfree
(
res
);
write_lock
(
&
dev_priv
->
resource_lock
);
}
void
vmw_resource_unreference
(
struct
vmw_resource
**
p_res
)
{
struct
vmw_resource
*
res
=
*
p_res
;
struct
vmw_private
*
dev_priv
=
res
->
dev_priv
;
*
p_res
=
NULL
;
write_lock
(
&
dev_priv
->
resource_lock
);
kref_put
(
&
res
->
kref
,
vmw_resource_release
);
write_unlock
(
&
dev_priv
->
resource_lock
);
}
static
int
vmw_resource_init
(
struct
vmw_private
*
dev_priv
,
struct
vmw_resource
*
res
,
struct
idr
*
idr
,
enum
ttm_object_type
obj_type
,
void
(
*
res_free
)
(
struct
vmw_resource
*
res
))
{
int
ret
;
kref_init
(
&
res
->
kref
);
res
->
hw_destroy
=
NULL
;
res
->
res_free
=
res_free
;
res
->
res_type
=
obj_type
;
res
->
idr
=
idr
;
res
->
avail
=
false
;
res
->
dev_priv
=
dev_priv
;
do
{
if
(
unlikely
(
idr_pre_get
(
idr
,
GFP_KERNEL
)
==
0
))
return
-
ENOMEM
;
write_lock
(
&
dev_priv
->
resource_lock
);
ret
=
idr_get_new_above
(
idr
,
res
,
1
,
&
res
->
id
);
write_unlock
(
&
dev_priv
->
resource_lock
);
}
while
(
ret
==
-
EAGAIN
);
return
ret
;
}
/**
* vmw_resource_activate
*
* @res: Pointer to the newly created resource
* @hw_destroy: Destroy function. NULL if none.
*
* Activate a resource after the hardware has been made aware of it.
* Set tye destroy function to @destroy. Typically this frees the
* resource and destroys the hardware resources associated with it.
* Activate basically means that the function vmw_resource_lookup will
* find it.
*/
static
void
vmw_resource_activate
(
struct
vmw_resource
*
res
,
void
(
*
hw_destroy
)
(
struct
vmw_resource
*
))
{
struct
vmw_private
*
dev_priv
=
res
->
dev_priv
;
write_lock
(
&
dev_priv
->
resource_lock
);
res
->
avail
=
true
;
res
->
hw_destroy
=
hw_destroy
;
write_unlock
(
&
dev_priv
->
resource_lock
);
}
struct
vmw_resource
*
vmw_resource_lookup
(
struct
vmw_private
*
dev_priv
,
struct
idr
*
idr
,
int
id
)
{
struct
vmw_resource
*
res
;
read_lock
(
&
dev_priv
->
resource_lock
);
res
=
idr_find
(
idr
,
id
);
if
(
res
&&
res
->
avail
)
kref_get
(
&
res
->
kref
);
else
res
=
NULL
;
read_unlock
(
&
dev_priv
->
resource_lock
);
if
(
unlikely
(
res
==
NULL
))
return
NULL
;
return
res
;
}
/**
* Context management:
*/
static
void
vmw_hw_context_destroy
(
struct
vmw_resource
*
res
)
{
struct
vmw_private
*
dev_priv
=
res
->
dev_priv
;
struct
{
SVGA3dCmdHeader
header
;
SVGA3dCmdDestroyContext
body
;
}
*
cmd
=
vmw_fifo_reserve
(
dev_priv
,
sizeof
(
*
cmd
));
if
(
unlikely
(
cmd
==
NULL
))
{
DRM_ERROR
(
"Failed reserving FIFO space for surface "
"destruction.
\n
"
);
return
;
}
cmd
->
header
.
id
=
cpu_to_le32
(
SVGA_3D_CMD_CONTEXT_DESTROY
);
cmd
->
header
.
size
=
cpu_to_le32
(
sizeof
(
cmd
->
body
));
cmd
->
body
.
cid
=
cpu_to_le32
(
res
->
id
);
vmw_fifo_commit
(
dev_priv
,
sizeof
(
*
cmd
));
}
static
int
vmw_context_init
(
struct
vmw_private
*
dev_priv
,
struct
vmw_resource
*
res
,
void
(
*
res_free
)
(
struct
vmw_resource
*
res
))
{
int
ret
;
struct
{
SVGA3dCmdHeader
header
;
SVGA3dCmdDefineContext
body
;
}
*
cmd
;
ret
=
vmw_resource_init
(
dev_priv
,
res
,
&
dev_priv
->
context_idr
,
VMW_RES_CONTEXT
,
res_free
);
if
(
unlikely
(
ret
!=
0
))
{
if
(
res_free
==
NULL
)
kfree
(
res
);
else
res_free
(
res
);
return
ret
;
}
cmd
=
vmw_fifo_reserve
(
dev_priv
,
sizeof
(
*
cmd
));
if
(
unlikely
(
cmd
==
NULL
))
{
DRM_ERROR
(
"Fifo reserve failed.
\n
"
);
vmw_resource_unreference
(
&
res
);
return
-
ENOMEM
;
}
cmd
->
header
.
id
=
cpu_to_le32
(
SVGA_3D_CMD_CONTEXT_DEFINE
);
cmd
->
header
.
size
=
cpu_to_le32
(
sizeof
(
cmd
->
body
));
cmd
->
body
.
cid
=
cpu_to_le32
(
res
->
id
);
vmw_fifo_commit
(
dev_priv
,
sizeof
(
*
cmd
));
vmw_resource_activate
(
res
,
vmw_hw_context_destroy
);
return
0
;
}
struct
vmw_resource
*
vmw_context_alloc
(
struct
vmw_private
*
dev_priv
)
{
struct
vmw_resource
*
res
=
kmalloc
(
sizeof
(
*
res
),
GFP_KERNEL
);
int
ret
;
if
(
unlikely
(
res
==
NULL
))
return
NULL
;
ret
=
vmw_context_init
(
dev_priv
,
res
,
NULL
);
return
(
ret
==
0
)
?
res
:
NULL
;
}
/**
* User-space context management:
*/
static
void
vmw_user_context_free
(
struct
vmw_resource
*
res
)
{
struct
vmw_user_context
*
ctx
=
container_of
(
res
,
struct
vmw_user_context
,
res
);
kfree
(
ctx
);
}
/**
* This function is called when user space has no more references on the
* base object. It releases the base-object's reference on the resource object.
*/
static
void
vmw_user_context_base_release
(
struct
ttm_base_object
**
p_base
)
{
struct
ttm_base_object
*
base
=
*
p_base
;
struct
vmw_user_context
*
ctx
=
container_of
(
base
,
struct
vmw_user_context
,
base
);
struct
vmw_resource
*
res
=
&
ctx
->
res
;
*
p_base
=
NULL
;
vmw_resource_unreference
(
&
res
);
}
int
vmw_context_destroy_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
vmw_resource
*
res
;
struct
vmw_user_context
*
ctx
;
struct
drm_vmw_context_arg
*
arg
=
(
struct
drm_vmw_context_arg
*
)
data
;
struct
ttm_object_file
*
tfile
=
vmw_fpriv
(
file_priv
)
->
tfile
;
int
ret
=
0
;
res
=
vmw_resource_lookup
(
dev_priv
,
&
dev_priv
->
context_idr
,
arg
->
cid
);
if
(
unlikely
(
res
==
NULL
))
return
-
EINVAL
;
if
(
res
->
res_free
!=
&
vmw_user_context_free
)
{
ret
=
-
EINVAL
;
goto
out
;
}
ctx
=
container_of
(
res
,
struct
vmw_user_context
,
res
);
if
(
ctx
->
base
.
tfile
!=
tfile
&&
!
ctx
->
base
.
shareable
)
{
ret
=
-
EPERM
;
goto
out
;
}
ttm_ref_object_base_unref
(
tfile
,
ctx
->
base
.
hash
.
key
,
TTM_REF_USAGE
);
out:
vmw_resource_unreference
(
&
res
);
return
ret
;
}
int
vmw_context_define_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
vmw_user_context
*
ctx
=
kmalloc
(
sizeof
(
*
ctx
),
GFP_KERNEL
);
struct
vmw_resource
*
res
;
struct
vmw_resource
*
tmp
;
struct
drm_vmw_context_arg
*
arg
=
(
struct
drm_vmw_context_arg
*
)
data
;
struct
ttm_object_file
*
tfile
=
vmw_fpriv
(
file_priv
)
->
tfile
;
int
ret
;
if
(
unlikely
(
ctx
==
NULL
))
return
-
ENOMEM
;
res
=
&
ctx
->
res
;
ctx
->
base
.
shareable
=
false
;
ctx
->
base
.
tfile
=
NULL
;
ret
=
vmw_context_init
(
dev_priv
,
res
,
vmw_user_context_free
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
tmp
=
vmw_resource_reference
(
&
ctx
->
res
);
ret
=
ttm_base_object_init
(
tfile
,
&
ctx
->
base
,
false
,
VMW_RES_CONTEXT
,
&
vmw_user_context_base_release
,
NULL
);
if
(
unlikely
(
ret
!=
0
))
{
vmw_resource_unreference
(
&
tmp
);
goto
out_err
;
}
arg
->
cid
=
res
->
id
;
out_err:
vmw_resource_unreference
(
&
res
);
return
ret
;
}
int
vmw_context_check
(
struct
vmw_private
*
dev_priv
,
struct
ttm_object_file
*
tfile
,
int
id
)
{
struct
vmw_resource
*
res
;
int
ret
=
0
;
read_lock
(
&
dev_priv
->
resource_lock
);
res
=
idr_find
(
&
dev_priv
->
context_idr
,
id
);
if
(
res
&&
res
->
avail
)
{
struct
vmw_user_context
*
ctx
=
container_of
(
res
,
struct
vmw_user_context
,
res
);
if
(
ctx
->
base
.
tfile
!=
tfile
&&
!
ctx
->
base
.
shareable
)
ret
=
-
EPERM
;
}
else
ret
=
-
EINVAL
;
read_unlock
(
&
dev_priv
->
resource_lock
);
return
ret
;
}
/**
* Surface management.
*/
static
void
vmw_hw_surface_destroy
(
struct
vmw_resource
*
res
)
{
struct
vmw_private
*
dev_priv
=
res
->
dev_priv
;
struct
{
SVGA3dCmdHeader
header
;
SVGA3dCmdDestroySurface
body
;
}
*
cmd
=
vmw_fifo_reserve
(
dev_priv
,
sizeof
(
*
cmd
));
if
(
unlikely
(
cmd
==
NULL
))
{
DRM_ERROR
(
"Failed reserving FIFO space for surface "
"destruction.
\n
"
);
return
;
}
cmd
->
header
.
id
=
cpu_to_le32
(
SVGA_3D_CMD_SURFACE_DESTROY
);
cmd
->
header
.
size
=
cpu_to_le32
(
sizeof
(
cmd
->
body
));
cmd
->
body
.
sid
=
cpu_to_le32
(
res
->
id
);
vmw_fifo_commit
(
dev_priv
,
sizeof
(
*
cmd
));
}
void
vmw_surface_res_free
(
struct
vmw_resource
*
res
)
{
struct
vmw_surface
*
srf
=
container_of
(
res
,
struct
vmw_surface
,
res
);
kfree
(
srf
->
sizes
);
kfree
(
srf
->
snooper
.
image
);
kfree
(
srf
);
}
int
vmw_surface_init
(
struct
vmw_private
*
dev_priv
,
struct
vmw_surface
*
srf
,
void
(
*
res_free
)
(
struct
vmw_resource
*
res
))
{
int
ret
;
struct
{
SVGA3dCmdHeader
header
;
SVGA3dCmdDefineSurface
body
;
}
*
cmd
;
SVGA3dSize
*
cmd_size
;
struct
vmw_resource
*
res
=
&
srf
->
res
;
struct
drm_vmw_size
*
src_size
;
size_t
submit_size
;
uint32_t
cmd_len
;
int
i
;
BUG_ON
(
res_free
==
NULL
);
ret
=
vmw_resource_init
(
dev_priv
,
res
,
&
dev_priv
->
surface_idr
,
VMW_RES_SURFACE
,
res_free
);
if
(
unlikely
(
ret
!=
0
))
{
res_free
(
res
);
return
ret
;
}
submit_size
=
sizeof
(
*
cmd
)
+
srf
->
num_sizes
*
sizeof
(
SVGA3dSize
);
cmd_len
=
sizeof
(
cmd
->
body
)
+
srf
->
num_sizes
*
sizeof
(
SVGA3dSize
);
cmd
=
vmw_fifo_reserve
(
dev_priv
,
submit_size
);
if
(
unlikely
(
cmd
==
NULL
))
{
DRM_ERROR
(
"Fifo reserve failed for create surface.
\n
"
);
vmw_resource_unreference
(
&
res
);
return
-
ENOMEM
;
}
cmd
->
header
.
id
=
cpu_to_le32
(
SVGA_3D_CMD_SURFACE_DEFINE
);
cmd
->
header
.
size
=
cpu_to_le32
(
cmd_len
);
cmd
->
body
.
sid
=
cpu_to_le32
(
res
->
id
);
cmd
->
body
.
surfaceFlags
=
cpu_to_le32
(
srf
->
flags
);
cmd
->
body
.
format
=
cpu_to_le32
(
srf
->
format
);
for
(
i
=
0
;
i
<
DRM_VMW_MAX_SURFACE_FACES
;
++
i
)
{
cmd
->
body
.
face
[
i
].
numMipLevels
=
cpu_to_le32
(
srf
->
mip_levels
[
i
]);
}
cmd
+=
1
;
cmd_size
=
(
SVGA3dSize
*
)
cmd
;
src_size
=
srf
->
sizes
;
for
(
i
=
0
;
i
<
srf
->
num_sizes
;
++
i
,
cmd_size
++
,
src_size
++
)
{
cmd_size
->
width
=
cpu_to_le32
(
src_size
->
width
);
cmd_size
->
height
=
cpu_to_le32
(
src_size
->
height
);
cmd_size
->
depth
=
cpu_to_le32
(
src_size
->
depth
);
}
vmw_fifo_commit
(
dev_priv
,
submit_size
);
vmw_resource_activate
(
res
,
vmw_hw_surface_destroy
);
return
0
;
}
static
void
vmw_user_surface_free
(
struct
vmw_resource
*
res
)
{
struct
vmw_surface
*
srf
=
container_of
(
res
,
struct
vmw_surface
,
res
);
struct
vmw_user_surface
*
user_srf
=
container_of
(
srf
,
struct
vmw_user_surface
,
srf
);
kfree
(
srf
->
sizes
);
kfree
(
srf
->
snooper
.
image
);
kfree
(
user_srf
);
}
int
vmw_user_surface_lookup
(
struct
vmw_private
*
dev_priv
,
struct
ttm_object_file
*
tfile
,
int
sid
,
struct
vmw_surface
**
out
)
{
struct
vmw_resource
*
res
;
struct
vmw_surface
*
srf
;
struct
vmw_user_surface
*
user_srf
;
res
=
vmw_resource_lookup
(
dev_priv
,
&
dev_priv
->
surface_idr
,
sid
);
if
(
unlikely
(
res
==
NULL
))
return
-
EINVAL
;
if
(
res
->
res_free
!=
&
vmw_user_surface_free
)
return
-
EINVAL
;
srf
=
container_of
(
res
,
struct
vmw_surface
,
res
);
user_srf
=
container_of
(
srf
,
struct
vmw_user_surface
,
srf
);
if
(
user_srf
->
base
.
tfile
!=
tfile
&&
!
user_srf
->
base
.
shareable
)
return
-
EPERM
;
*
out
=
srf
;
return
0
;
}
static
void
vmw_user_surface_base_release
(
struct
ttm_base_object
**
p_base
)
{
struct
ttm_base_object
*
base
=
*
p_base
;
struct
vmw_user_surface
*
user_srf
=
container_of
(
base
,
struct
vmw_user_surface
,
base
);
struct
vmw_resource
*
res
=
&
user_srf
->
srf
.
res
;
*
p_base
=
NULL
;
vmw_resource_unreference
(
&
res
);
}
int
vmw_surface_destroy_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
vmw_resource
*
res
;
struct
vmw_surface
*
srf
;
struct
vmw_user_surface
*
user_srf
;
struct
drm_vmw_surface_arg
*
arg
=
(
struct
drm_vmw_surface_arg
*
)
data
;
struct
ttm_object_file
*
tfile
=
vmw_fpriv
(
file_priv
)
->
tfile
;
int
ret
=
0
;
res
=
vmw_resource_lookup
(
dev_priv
,
&
dev_priv
->
surface_idr
,
arg
->
sid
);
if
(
unlikely
(
res
==
NULL
))
return
-
EINVAL
;
if
(
res
->
res_free
!=
&
vmw_user_surface_free
)
{
ret
=
-
EINVAL
;
goto
out
;
}
srf
=
container_of
(
res
,
struct
vmw_surface
,
res
);
user_srf
=
container_of
(
srf
,
struct
vmw_user_surface
,
srf
);
if
(
user_srf
->
base
.
tfile
!=
tfile
&&
!
user_srf
->
base
.
shareable
)
{
ret
=
-
EPERM
;
goto
out
;
}
ttm_ref_object_base_unref
(
tfile
,
user_srf
->
base
.
hash
.
key
,
TTM_REF_USAGE
);
out:
vmw_resource_unreference
(
&
res
);
return
ret
;
}
int
vmw_surface_define_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
vmw_user_surface
*
user_srf
=
kmalloc
(
sizeof
(
*
user_srf
),
GFP_KERNEL
);
struct
vmw_surface
*
srf
;
struct
vmw_resource
*
res
;
struct
vmw_resource
*
tmp
;
union
drm_vmw_surface_create_arg
*
arg
=
(
union
drm_vmw_surface_create_arg
*
)
data
;
struct
drm_vmw_surface_create_req
*
req
=
&
arg
->
req
;
struct
drm_vmw_surface_arg
*
rep
=
&
arg
->
rep
;
struct
ttm_object_file
*
tfile
=
vmw_fpriv
(
file_priv
)
->
tfile
;
struct
drm_vmw_size
__user
*
user_sizes
;
int
ret
;
int
i
;
if
(
unlikely
(
user_srf
==
NULL
))
return
-
ENOMEM
;
srf
=
&
user_srf
->
srf
;
res
=
&
srf
->
res
;
srf
->
flags
=
req
->
flags
;
srf
->
format
=
req
->
format
;
memcpy
(
srf
->
mip_levels
,
req
->
mip_levels
,
sizeof
(
srf
->
mip_levels
));
srf
->
num_sizes
=
0
;
for
(
i
=
0
;
i
<
DRM_VMW_MAX_SURFACE_FACES
;
++
i
)
srf
->
num_sizes
+=
srf
->
mip_levels
[
i
];
if
(
srf
->
num_sizes
>
DRM_VMW_MAX_SURFACE_FACES
*
DRM_VMW_MAX_MIP_LEVELS
)
{
ret
=
-
EINVAL
;
goto
out_err0
;
}
srf
->
sizes
=
kmalloc
(
srf
->
num_sizes
*
sizeof
(
*
srf
->
sizes
),
GFP_KERNEL
);
if
(
unlikely
(
srf
->
sizes
==
NULL
))
{
ret
=
-
ENOMEM
;
goto
out_err0
;
}
user_sizes
=
(
struct
drm_vmw_size
__user
*
)(
unsigned
long
)
req
->
size_addr
;
ret
=
copy_from_user
(
srf
->
sizes
,
user_sizes
,
srf
->
num_sizes
*
sizeof
(
*
srf
->
sizes
));
if
(
unlikely
(
ret
!=
0
))
goto
out_err1
;
user_srf
->
base
.
shareable
=
false
;
user_srf
->
base
.
tfile
=
NULL
;
/**
* From this point, the generic resource management functions
* destroy the object on failure.
*/
ret
=
vmw_surface_init
(
dev_priv
,
srf
,
vmw_user_surface_free
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
tmp
=
vmw_resource_reference
(
&
srf
->
res
);
ret
=
ttm_base_object_init
(
tfile
,
&
user_srf
->
base
,
req
->
shareable
,
VMW_RES_SURFACE
,
&
vmw_user_surface_base_release
,
NULL
);
if
(
unlikely
(
ret
!=
0
))
{
vmw_resource_unreference
(
&
tmp
);
vmw_resource_unreference
(
&
res
);
return
ret
;
}
if
(
srf
->
flags
&
(
1
<<
9
)
&&
srf
->
num_sizes
==
1
&&
srf
->
sizes
[
0
].
width
==
64
&&
srf
->
sizes
[
0
].
height
==
64
&&
srf
->
format
==
SVGA3D_A8R8G8B8
)
{
srf
->
snooper
.
image
=
kmalloc
(
64
*
64
*
4
,
GFP_KERNEL
);
/* clear the image */
if
(
srf
->
snooper
.
image
)
memset
(
srf
->
snooper
.
image
,
0x00
,
64
*
64
*
4
);
else
DRM_ERROR
(
"Failed to allocate cursor_image
\n
"
);
}
else
{
srf
->
snooper
.
image
=
NULL
;
}
srf
->
snooper
.
crtc
=
NULL
;
rep
->
sid
=
res
->
id
;
vmw_resource_unreference
(
&
res
);
return
0
;
out_err1:
kfree
(
srf
->
sizes
);
out_err0:
kfree
(
user_srf
);
return
ret
;
}
int
vmw_surface_reference_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
union
drm_vmw_surface_reference_arg
*
arg
=
(
union
drm_vmw_surface_reference_arg
*
)
data
;
struct
drm_vmw_surface_arg
*
req
=
&
arg
->
req
;
struct
drm_vmw_surface_create_req
*
rep
=
&
arg
->
rep
;
struct
ttm_object_file
*
tfile
=
vmw_fpriv
(
file_priv
)
->
tfile
;
struct
vmw_resource
*
res
;
struct
vmw_surface
*
srf
;
struct
vmw_user_surface
*
user_srf
;
struct
drm_vmw_size
__user
*
user_sizes
;
int
ret
;
res
=
vmw_resource_lookup
(
dev_priv
,
&
dev_priv
->
surface_idr
,
req
->
sid
);
if
(
unlikely
(
res
==
NULL
))
return
-
EINVAL
;
if
(
res
->
res_free
!=
&
vmw_user_surface_free
)
{
ret
=
-
EINVAL
;
goto
out
;
}
srf
=
container_of
(
res
,
struct
vmw_surface
,
res
);
user_srf
=
container_of
(
srf
,
struct
vmw_user_surface
,
srf
);
if
(
user_srf
->
base
.
tfile
!=
tfile
&&
!
user_srf
->
base
.
shareable
)
{
DRM_ERROR
(
"Tried to reference none shareable surface
\n
"
);
ret
=
-
EPERM
;
goto
out
;
}
ret
=
ttm_ref_object_add
(
tfile
,
&
user_srf
->
base
,
TTM_REF_USAGE
,
NULL
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"Could not add a reference to a surface.
\n
"
);
goto
out
;
}
rep
->
flags
=
srf
->
flags
;
rep
->
format
=
srf
->
format
;
memcpy
(
rep
->
mip_levels
,
srf
->
mip_levels
,
sizeof
(
srf
->
mip_levels
));
user_sizes
=
(
struct
drm_vmw_size
__user
*
)(
unsigned
long
)
rep
->
size_addr
;
if
(
user_sizes
)
ret
=
copy_to_user
(
user_sizes
,
srf
->
sizes
,
srf
->
num_sizes
*
sizeof
(
*
srf
->
sizes
));
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"copy_to_user failed %p %u
\n
"
,
user_sizes
,
srf
->
num_sizes
);
/**
* FIXME: Unreference surface here?
*/
goto
out
;
}
out:
vmw_resource_unreference
(
&
res
);
return
ret
;
}
int
vmw_surface_check
(
struct
vmw_private
*
dev_priv
,
struct
ttm_object_file
*
tfile
,
int
id
)
{
struct
vmw_resource
*
res
;
int
ret
=
0
;
read_lock
(
&
dev_priv
->
resource_lock
);
res
=
idr_find
(
&
dev_priv
->
surface_idr
,
id
);
if
(
res
&&
res
->
avail
)
{
struct
vmw_surface
*
srf
=
container_of
(
res
,
struct
vmw_surface
,
res
);
struct
vmw_user_surface
*
usrf
=
container_of
(
srf
,
struct
vmw_user_surface
,
srf
);
if
(
usrf
->
base
.
tfile
!=
tfile
&&
!
usrf
->
base
.
shareable
)
ret
=
-
EPERM
;
}
else
ret
=
-
EINVAL
;
read_unlock
(
&
dev_priv
->
resource_lock
);
return
ret
;
}
/**
* Buffer management.
*/
static
size_t
vmw_dmabuf_acc_size
(
struct
ttm_bo_global
*
glob
,
unsigned
long
num_pages
)
{
static
size_t
bo_user_size
=
~
0
;
size_t
page_array_size
=
(
num_pages
*
sizeof
(
void
*
)
+
PAGE_SIZE
-
1
)
&
PAGE_MASK
;
if
(
unlikely
(
bo_user_size
==
~
0
))
{
bo_user_size
=
glob
->
ttm_bo_extra_size
+
ttm_round_pot
(
sizeof
(
struct
vmw_dma_buffer
));
}
return
bo_user_size
+
page_array_size
;
}
void
vmw_dmabuf_bo_free
(
struct
ttm_buffer_object
*
bo
)
{
struct
vmw_dma_buffer
*
vmw_bo
=
vmw_dma_buffer
(
bo
);
struct
ttm_bo_global
*
glob
=
bo
->
glob
;
struct
vmw_private
*
dev_priv
=
container_of
(
bo
->
bdev
,
struct
vmw_private
,
bdev
);
ttm_mem_global_free
(
glob
->
mem_glob
,
bo
->
acc_size
);
if
(
vmw_bo
->
gmr_bound
)
{
vmw_gmr_unbind
(
dev_priv
,
vmw_bo
->
gmr_id
);
spin_lock
(
&
glob
->
lru_lock
);
ida_remove
(
&
dev_priv
->
gmr_ida
,
vmw_bo
->
gmr_id
);
spin_unlock
(
&
glob
->
lru_lock
);
}
kfree
(
vmw_bo
);
}
int
vmw_dmabuf_init
(
struct
vmw_private
*
dev_priv
,
struct
vmw_dma_buffer
*
vmw_bo
,
size_t
size
,
struct
ttm_placement
*
placement
,
bool
interruptible
,
void
(
*
bo_free
)
(
struct
ttm_buffer_object
*
bo
))
{
struct
ttm_bo_device
*
bdev
=
&
dev_priv
->
bdev
;
struct
ttm_mem_global
*
mem_glob
=
bdev
->
glob
->
mem_glob
;
size_t
acc_size
;
int
ret
;
BUG_ON
(
!
bo_free
);
acc_size
=
vmw_dmabuf_acc_size
(
bdev
->
glob
,
(
size
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
);
ret
=
ttm_mem_global_alloc
(
mem_glob
,
acc_size
,
false
,
false
);
if
(
unlikely
(
ret
!=
0
))
{
/* we must free the bo here as
* ttm_buffer_object_init does so as well */
bo_free
(
&
vmw_bo
->
base
);
return
ret
;
}
memset
(
vmw_bo
,
0
,
sizeof
(
*
vmw_bo
));
INIT_LIST_HEAD
(
&
vmw_bo
->
gmr_lru
);
INIT_LIST_HEAD
(
&
vmw_bo
->
validate_list
);
vmw_bo
->
gmr_id
=
0
;
vmw_bo
->
gmr_bound
=
false
;
ret
=
ttm_bo_init
(
bdev
,
&
vmw_bo
->
base
,
size
,
ttm_bo_type_device
,
placement
,
0
,
0
,
interruptible
,
NULL
,
acc_size
,
bo_free
);
return
ret
;
}
static
void
vmw_user_dmabuf_destroy
(
struct
ttm_buffer_object
*
bo
)
{
struct
vmw_user_dma_buffer
*
vmw_user_bo
=
vmw_user_dma_buffer
(
bo
);
struct
vmw_dma_buffer
*
vmw_bo
=
&
vmw_user_bo
->
dma
;
struct
ttm_bo_global
*
glob
=
bo
->
glob
;
struct
vmw_private
*
dev_priv
=
container_of
(
bo
->
bdev
,
struct
vmw_private
,
bdev
);
ttm_mem_global_free
(
glob
->
mem_glob
,
bo
->
acc_size
);
if
(
vmw_bo
->
gmr_bound
)
{
vmw_gmr_unbind
(
dev_priv
,
vmw_bo
->
gmr_id
);
spin_lock
(
&
glob
->
lru_lock
);
ida_remove
(
&
dev_priv
->
gmr_ida
,
vmw_bo
->
gmr_id
);
spin_unlock
(
&
glob
->
lru_lock
);
}
kfree
(
vmw_user_bo
);
}
static
void
vmw_user_dmabuf_release
(
struct
ttm_base_object
**
p_base
)
{
struct
vmw_user_dma_buffer
*
vmw_user_bo
;
struct
ttm_base_object
*
base
=
*
p_base
;
struct
ttm_buffer_object
*
bo
;
*
p_base
=
NULL
;
if
(
unlikely
(
base
==
NULL
))
return
;
vmw_user_bo
=
container_of
(
base
,
struct
vmw_user_dma_buffer
,
base
);
bo
=
&
vmw_user_bo
->
dma
.
base
;
ttm_bo_unref
(
&
bo
);
}
int
vmw_dmabuf_alloc_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
union
drm_vmw_alloc_dmabuf_arg
*
arg
=
(
union
drm_vmw_alloc_dmabuf_arg
*
)
data
;
struct
drm_vmw_alloc_dmabuf_req
*
req
=
&
arg
->
req
;
struct
drm_vmw_dmabuf_rep
*
rep
=
&
arg
->
rep
;
struct
vmw_user_dma_buffer
*
vmw_user_bo
;
struct
ttm_buffer_object
*
tmp
;
struct
vmw_master
*
vmaster
=
vmw_master
(
file_priv
->
master
);
int
ret
;
vmw_user_bo
=
kzalloc
(
sizeof
(
*
vmw_user_bo
),
GFP_KERNEL
);
if
(
unlikely
(
vmw_user_bo
==
NULL
))
return
-
ENOMEM
;
ret
=
ttm_read_lock
(
&
vmaster
->
lock
,
true
);
if
(
unlikely
(
ret
!=
0
))
{
kfree
(
vmw_user_bo
);
return
ret
;
}
ret
=
vmw_dmabuf_init
(
dev_priv
,
&
vmw_user_bo
->
dma
,
req
->
size
,
&
vmw_vram_placement
,
true
,
&
vmw_user_dmabuf_destroy
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
tmp
=
ttm_bo_reference
(
&
vmw_user_bo
->
dma
.
base
);
ret
=
ttm_base_object_init
(
vmw_fpriv
(
file_priv
)
->
tfile
,
&
vmw_user_bo
->
base
,
false
,
ttm_buffer_type
,
&
vmw_user_dmabuf_release
,
NULL
);
if
(
unlikely
(
ret
!=
0
))
{
ttm_bo_unref
(
&
tmp
);
}
else
{
rep
->
handle
=
vmw_user_bo
->
base
.
hash
.
key
;
rep
->
map_handle
=
vmw_user_bo
->
dma
.
base
.
addr_space_offset
;
rep
->
cur_gmr_id
=
vmw_user_bo
->
base
.
hash
.
key
;
rep
->
cur_gmr_offset
=
0
;
}
ttm_bo_unref
(
&
tmp
);
ttm_read_unlock
(
&
vmaster
->
lock
);
return
0
;
}
int
vmw_dmabuf_unref_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
drm_vmw_unref_dmabuf_arg
*
arg
=
(
struct
drm_vmw_unref_dmabuf_arg
*
)
data
;
return
ttm_ref_object_base_unref
(
vmw_fpriv
(
file_priv
)
->
tfile
,
arg
->
handle
,
TTM_REF_USAGE
);
}
uint32_t
vmw_dmabuf_validate_node
(
struct
ttm_buffer_object
*
bo
,
uint32_t
cur_validate_node
)
{
struct
vmw_dma_buffer
*
vmw_bo
=
vmw_dma_buffer
(
bo
);
if
(
likely
(
vmw_bo
->
on_validate_list
))
return
vmw_bo
->
cur_validate_node
;
vmw_bo
->
cur_validate_node
=
cur_validate_node
;
vmw_bo
->
on_validate_list
=
true
;
return
cur_validate_node
;
}
void
vmw_dmabuf_validate_clear
(
struct
ttm_buffer_object
*
bo
)
{
struct
vmw_dma_buffer
*
vmw_bo
=
vmw_dma_buffer
(
bo
);
vmw_bo
->
on_validate_list
=
false
;
}
uint32_t
vmw_dmabuf_gmr
(
struct
ttm_buffer_object
*
bo
)
{
struct
vmw_dma_buffer
*
vmw_bo
;
if
(
bo
->
mem
.
mem_type
==
TTM_PL_VRAM
)
return
SVGA_GMR_FRAMEBUFFER
;
vmw_bo
=
vmw_dma_buffer
(
bo
);
return
(
vmw_bo
->
gmr_bound
)
?
vmw_bo
->
gmr_id
:
SVGA_GMR_NULL
;
}
void
vmw_dmabuf_set_gmr
(
struct
ttm_buffer_object
*
bo
,
uint32_t
id
)
{
struct
vmw_dma_buffer
*
vmw_bo
=
vmw_dma_buffer
(
bo
);
vmw_bo
->
gmr_bound
=
true
;
vmw_bo
->
gmr_id
=
id
;
}
int
vmw_user_dmabuf_lookup
(
struct
ttm_object_file
*
tfile
,
uint32_t
handle
,
struct
vmw_dma_buffer
**
out
)
{
struct
vmw_user_dma_buffer
*
vmw_user_bo
;
struct
ttm_base_object
*
base
;
base
=
ttm_base_object_lookup
(
tfile
,
handle
);
if
(
unlikely
(
base
==
NULL
))
{
printk
(
KERN_ERR
"Invalid buffer object handle 0x%08lx.
\n
"
,
(
unsigned
long
)
handle
);
return
-
ESRCH
;
}
if
(
unlikely
(
base
->
object_type
!=
ttm_buffer_type
))
{
ttm_base_object_unref
(
&
base
);
printk
(
KERN_ERR
"Invalid buffer object handle 0x%08lx.
\n
"
,
(
unsigned
long
)
handle
);
return
-
EINVAL
;
}
vmw_user_bo
=
container_of
(
base
,
struct
vmw_user_dma_buffer
,
base
);
(
void
)
ttm_bo_reference
(
&
vmw_user_bo
->
dma
.
base
);
ttm_base_object_unref
(
&
base
);
*
out
=
&
vmw_user_bo
->
dma
;
return
0
;
}
/**
* TODO: Implement a gmr id eviction mechanism. Currently we just fail
* when we're out of ids, causing GMR space to be allocated
* out of VRAM.
*/
int
vmw_gmr_id_alloc
(
struct
vmw_private
*
dev_priv
,
uint32_t
*
p_id
)
{
struct
ttm_bo_global
*
glob
=
dev_priv
->
bdev
.
glob
;
int
id
;
int
ret
;
do
{
if
(
unlikely
(
ida_pre_get
(
&
dev_priv
->
gmr_ida
,
GFP_KERNEL
)
==
0
))
return
-
ENOMEM
;
spin_lock
(
&
glob
->
lru_lock
);
ret
=
ida_get_new
(
&
dev_priv
->
gmr_ida
,
&
id
);
spin_unlock
(
&
glob
->
lru_lock
);
}
while
(
ret
==
-
EAGAIN
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
if
(
unlikely
(
id
>=
dev_priv
->
max_gmr_ids
))
{
spin_lock
(
&
glob
->
lru_lock
);
ida_remove
(
&
dev_priv
->
gmr_ida
,
id
);
spin_unlock
(
&
glob
->
lru_lock
);
return
-
EBUSY
;
}
*
p_id
=
(
uint32_t
)
id
;
return
0
;
}
/*
* Stream managment
*/
static
void
vmw_stream_destroy
(
struct
vmw_resource
*
res
)
{
struct
vmw_private
*
dev_priv
=
res
->
dev_priv
;
struct
vmw_stream
*
stream
;
int
ret
;
DRM_INFO
(
"%s: unref
\n
"
,
__func__
);
stream
=
container_of
(
res
,
struct
vmw_stream
,
res
);
ret
=
vmw_overlay_unref
(
dev_priv
,
stream
->
stream_id
);
WARN_ON
(
ret
!=
0
);
}
static
int
vmw_stream_init
(
struct
vmw_private
*
dev_priv
,
struct
vmw_stream
*
stream
,
void
(
*
res_free
)
(
struct
vmw_resource
*
res
))
{
struct
vmw_resource
*
res
=
&
stream
->
res
;
int
ret
;
ret
=
vmw_resource_init
(
dev_priv
,
res
,
&
dev_priv
->
stream_idr
,
VMW_RES_STREAM
,
res_free
);
if
(
unlikely
(
ret
!=
0
))
{
if
(
res_free
==
NULL
)
kfree
(
stream
);
else
res_free
(
&
stream
->
res
);
return
ret
;
}
ret
=
vmw_overlay_claim
(
dev_priv
,
&
stream
->
stream_id
);
if
(
ret
)
{
vmw_resource_unreference
(
&
res
);
return
ret
;
}
DRM_INFO
(
"%s: claimed
\n
"
,
__func__
);
vmw_resource_activate
(
&
stream
->
res
,
vmw_stream_destroy
);
return
0
;
}
/**
* User-space context management:
*/
static
void
vmw_user_stream_free
(
struct
vmw_resource
*
res
)
{
struct
vmw_user_stream
*
stream
=
container_of
(
res
,
struct
vmw_user_stream
,
stream
.
res
);
kfree
(
stream
);
}
/**
* This function is called when user space has no more references on the
* base object. It releases the base-object's reference on the resource object.
*/
static
void
vmw_user_stream_base_release
(
struct
ttm_base_object
**
p_base
)
{
struct
ttm_base_object
*
base
=
*
p_base
;
struct
vmw_user_stream
*
stream
=
container_of
(
base
,
struct
vmw_user_stream
,
base
);
struct
vmw_resource
*
res
=
&
stream
->
stream
.
res
;
*
p_base
=
NULL
;
vmw_resource_unreference
(
&
res
);
}
int
vmw_stream_unref_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
vmw_resource
*
res
;
struct
vmw_user_stream
*
stream
;
struct
drm_vmw_stream_arg
*
arg
=
(
struct
drm_vmw_stream_arg
*
)
data
;
struct
ttm_object_file
*
tfile
=
vmw_fpriv
(
file_priv
)
->
tfile
;
int
ret
=
0
;
res
=
vmw_resource_lookup
(
dev_priv
,
&
dev_priv
->
stream_idr
,
arg
->
stream_id
);
if
(
unlikely
(
res
==
NULL
))
return
-
EINVAL
;
if
(
res
->
res_free
!=
&
vmw_user_stream_free
)
{
ret
=
-
EINVAL
;
goto
out
;
}
stream
=
container_of
(
res
,
struct
vmw_user_stream
,
stream
.
res
);
if
(
stream
->
base
.
tfile
!=
tfile
)
{
ret
=
-
EINVAL
;
goto
out
;
}
ttm_ref_object_base_unref
(
tfile
,
stream
->
base
.
hash
.
key
,
TTM_REF_USAGE
);
out:
vmw_resource_unreference
(
&
res
);
return
ret
;
}
int
vmw_stream_claim_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
vmw_private
*
dev_priv
=
vmw_priv
(
dev
);
struct
vmw_user_stream
*
stream
=
kmalloc
(
sizeof
(
*
stream
),
GFP_KERNEL
);
struct
vmw_resource
*
res
;
struct
vmw_resource
*
tmp
;
struct
drm_vmw_stream_arg
*
arg
=
(
struct
drm_vmw_stream_arg
*
)
data
;
struct
ttm_object_file
*
tfile
=
vmw_fpriv
(
file_priv
)
->
tfile
;
int
ret
;
if
(
unlikely
(
stream
==
NULL
))
return
-
ENOMEM
;
res
=
&
stream
->
stream
.
res
;
stream
->
base
.
shareable
=
false
;
stream
->
base
.
tfile
=
NULL
;
ret
=
vmw_stream_init
(
dev_priv
,
&
stream
->
stream
,
vmw_user_stream_free
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
tmp
=
vmw_resource_reference
(
res
);
ret
=
ttm_base_object_init
(
tfile
,
&
stream
->
base
,
false
,
VMW_RES_STREAM
,
&
vmw_user_stream_base_release
,
NULL
);
if
(
unlikely
(
ret
!=
0
))
{
vmw_resource_unreference
(
&
tmp
);
goto
out_err
;
}
arg
->
stream_id
=
res
->
id
;
out_err:
vmw_resource_unreference
(
&
res
);
return
ret
;
}
int
vmw_user_stream_lookup
(
struct
vmw_private
*
dev_priv
,
struct
ttm_object_file
*
tfile
,
uint32_t
*
inout_id
,
struct
vmw_resource
**
out
)
{
struct
vmw_user_stream
*
stream
;
struct
vmw_resource
*
res
;
int
ret
;
res
=
vmw_resource_lookup
(
dev_priv
,
&
dev_priv
->
stream_idr
,
*
inout_id
);
if
(
unlikely
(
res
==
NULL
))
return
-
EINVAL
;
if
(
res
->
res_free
!=
&
vmw_user_stream_free
)
{
ret
=
-
EINVAL
;
goto
err_ref
;
}
stream
=
container_of
(
res
,
struct
vmw_user_stream
,
stream
.
res
);
if
(
stream
->
base
.
tfile
!=
tfile
)
{
ret
=
-
EPERM
;
goto
err_ref
;
}
*
inout_id
=
stream
->
stream
.
stream_id
;
*
out
=
res
;
return
0
;
err_ref:
vmw_resource_unreference
(
&
res
);
return
ret
;
}
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "drmP.h"
#include "vmwgfx_drv.h"
int
vmw_mmap
(
struct
file
*
filp
,
struct
vm_area_struct
*
vma
)
{
struct
drm_file
*
file_priv
;
struct
vmw_private
*
dev_priv
;
if
(
unlikely
(
vma
->
vm_pgoff
<
VMWGFX_FILE_PAGE_OFFSET
))
{
if
(
vmw_fifo_mmap
(
filp
,
vma
)
==
0
)
return
0
;
return
drm_mmap
(
filp
,
vma
);
}
file_priv
=
(
struct
drm_file
*
)
filp
->
private_data
;
dev_priv
=
vmw_priv
(
file_priv
->
minor
->
dev
);
return
ttm_bo_mmap
(
filp
,
vma
,
&
dev_priv
->
bdev
);
}
static
int
vmw_ttm_mem_global_init
(
struct
ttm_global_reference
*
ref
)
{
DRM_INFO
(
"global init.
\n
"
);
return
ttm_mem_global_init
(
ref
->
object
);
}
static
void
vmw_ttm_mem_global_release
(
struct
ttm_global_reference
*
ref
)
{
ttm_mem_global_release
(
ref
->
object
);
}
int
vmw_ttm_global_init
(
struct
vmw_private
*
dev_priv
)
{
struct
ttm_global_reference
*
global_ref
;
int
ret
;
global_ref
=
&
dev_priv
->
mem_global_ref
;
global_ref
->
global_type
=
TTM_GLOBAL_TTM_MEM
;
global_ref
->
size
=
sizeof
(
struct
ttm_mem_global
);
global_ref
->
init
=
&
vmw_ttm_mem_global_init
;
global_ref
->
release
=
&
vmw_ttm_mem_global_release
;
ret
=
ttm_global_item_ref
(
global_ref
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"Failed setting up TTM memory accounting.
\n
"
);
return
ret
;
}
dev_priv
->
bo_global_ref
.
mem_glob
=
dev_priv
->
mem_global_ref
.
object
;
global_ref
=
&
dev_priv
->
bo_global_ref
.
ref
;
global_ref
->
global_type
=
TTM_GLOBAL_TTM_BO
;
global_ref
->
size
=
sizeof
(
struct
ttm_bo_global
);
global_ref
->
init
=
&
ttm_bo_global_init
;
global_ref
->
release
=
&
ttm_bo_global_release
;
ret
=
ttm_global_item_ref
(
global_ref
);
if
(
unlikely
(
ret
!=
0
))
{
DRM_ERROR
(
"Failed setting up TTM buffer objects.
\n
"
);
goto
out_no_bo
;
}
return
0
;
out_no_bo:
ttm_global_item_unref
(
&
dev_priv
->
mem_global_ref
);
return
ret
;
}
void
vmw_ttm_global_release
(
struct
vmw_private
*
dev_priv
)
{
ttm_global_item_unref
(
&
dev_priv
->
bo_global_ref
.
ref
);
ttm_global_item_unref
(
&
dev_priv
->
mem_global_ref
);
}
drivers/staging/Kconfig
View file @
cbc8cc04
...
...
@@ -101,6 +101,8 @@ source "drivers/staging/p9auth/Kconfig"
source "drivers/staging/line6/Kconfig"
source "drivers/gpu/drm/vmwgfx/Kconfig"
source "drivers/gpu/drm/radeon/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig"
...
...
include/drm/Kbuild
View file @
cbc8cc04
...
...
@@ -7,5 +7,6 @@ unifdef-y += r128_drm.h
unifdef-y += radeon_drm.h
unifdef-y += sis_drm.h
unifdef-y += savage_drm.h
unifdef-y += vmwgfx_drm.h
unifdef-y += via_drm.h
unifdef-y += nouveau_drm.h
include/drm/ttm/ttm_object.h
View file @
cbc8cc04
...
...
@@ -77,7 +77,11 @@ enum ttm_object_type {
ttm_buffer_type
,
ttm_lock_type
,
ttm_driver_type0
=
256
,
ttm_driver_type1
ttm_driver_type1
,
ttm_driver_type2
,
ttm_driver_type3
,
ttm_driver_type4
,
ttm_driver_type5
};
struct
ttm_object_file
;
...
...
include/drm/vmwgfx_drm.h
0 → 100644
View file @
cbc8cc04
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef __VMWGFX_DRM_H__
#define __VMWGFX_DRM_H__
#define DRM_VMW_MAX_SURFACE_FACES 6
#define DRM_VMW_MAX_MIP_LEVELS 24
#define DRM_VMW_EXT_NAME_LEN 128
#define DRM_VMW_GET_PARAM 0
#define DRM_VMW_ALLOC_DMABUF 1
#define DRM_VMW_UNREF_DMABUF 2
#define DRM_VMW_CURSOR_BYPASS 3
/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
#define DRM_VMW_CONTROL_STREAM 4
#define DRM_VMW_CLAIM_STREAM 5
#define DRM_VMW_UNREF_STREAM 6
/* guarded by DRM_VMW_PARAM_3D == 1 */
#define DRM_VMW_CREATE_CONTEXT 7
#define DRM_VMW_UNREF_CONTEXT 8
#define DRM_VMW_CREATE_SURFACE 9
#define DRM_VMW_UNREF_SURFACE 10
#define DRM_VMW_REF_SURFACE 11
#define DRM_VMW_EXECBUF 12
#define DRM_VMW_FIFO_DEBUG 13
#define DRM_VMW_FENCE_WAIT 14
/*************************************************************************/
/**
* DRM_VMW_GET_PARAM - get device information.
*
* DRM_VMW_PARAM_FIFO_OFFSET:
* Offset to use to map the first page of the FIFO read-only.
* The fifo is mapped using the mmap() system call on the drm device.
*
* DRM_VMW_PARAM_OVERLAY_IOCTL:
* Does the driver support the overlay ioctl.
*/
#define DRM_VMW_PARAM_NUM_STREAMS 0
#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
#define DRM_VMW_PARAM_3D 2
#define DRM_VMW_PARAM_FIFO_OFFSET 3
/**
* struct drm_vmw_getparam_arg
*
* @value: Returned value. //Out
* @param: Parameter to query. //In.
*
* Argument to the DRM_VMW_GET_PARAM Ioctl.
*/
struct
drm_vmw_getparam_arg
{
uint64_t
value
;
uint32_t
param
;
uint32_t
pad64
;
};
/*************************************************************************/
/**
* DRM_VMW_EXTENSION - Query device extensions.
*/
/**
* struct drm_vmw_extension_rep
*
* @exists: The queried extension exists.
* @driver_ioctl_offset: Ioctl number of the first ioctl in the extension.
* @driver_sarea_offset: Offset to any space in the DRI SAREA
* used by the extension.
* @major: Major version number of the extension.
* @minor: Minor version number of the extension.
* @pl: Patch level version number of the extension.
*
* Output argument to the DRM_VMW_EXTENSION Ioctl.
*/
struct
drm_vmw_extension_rep
{
int32_t
exists
;
uint32_t
driver_ioctl_offset
;
uint32_t
driver_sarea_offset
;
uint32_t
major
;
uint32_t
minor
;
uint32_t
pl
;
uint32_t
pad64
;
};
/**
* union drm_vmw_extension_arg
*
* @extension - Ascii name of the extension to be queried. //In
* @rep - Reply as defined above. //Out
*
* Argument to the DRM_VMW_EXTENSION Ioctl.
*/
union
drm_vmw_extension_arg
{
char
extension
[
DRM_VMW_EXT_NAME_LEN
];
struct
drm_vmw_extension_rep
rep
;
};
/*************************************************************************/
/**
* DRM_VMW_CREATE_CONTEXT - Create a host context.
*
* Allocates a device unique context id, and queues a create context command
* for the host. Does not wait for host completion.
*/
/**
* struct drm_vmw_context_arg
*
* @cid: Device unique context ID.
*
* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
*/
struct
drm_vmw_context_arg
{
int32_t
cid
;
uint32_t
pad64
;
};
/*************************************************************************/
/**
* DRM_VMW_UNREF_CONTEXT - Create a host context.
*
* Frees a global context id, and queues a destroy host command for the host.
* Does not wait for host completion. The context ID can be used directly
* in the command stream and shows up as the same context ID on the host.
*/
/*************************************************************************/
/**
* DRM_VMW_CREATE_SURFACE - Create a host suface.
*
* Allocates a device unique surface id, and queues a create surface command
* for the host. Does not wait for host completion. The surface ID can be
* used directly in the command stream and shows up as the same surface
* ID on the host.
*/
/**
* struct drm_wmv_surface_create_req
*
* @flags: Surface flags as understood by the host.
* @format: Surface format as understood by the host.
* @mip_levels: Number of mip levels for each face.
* An unused face should have 0 encoded.
* @size_addr: Address of a user-space array of sruct drm_vmw_size
* cast to an uint64_t for 32-64 bit compatibility.
* The size of the array should equal the total number of mipmap levels.
* @shareable: Boolean whether other clients (as identified by file descriptors)
* may reference this surface.
*
* Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
* Output data from the DRM_VMW_REF_SURFACE Ioctl.
*/
struct
drm_vmw_surface_create_req
{
uint32_t
flags
;
uint32_t
format
;
uint32_t
mip_levels
[
DRM_VMW_MAX_SURFACE_FACES
];
uint64_t
size_addr
;
int32_t
shareable
;
uint32_t
pad64
;
};
/**
* struct drm_wmv_surface_arg
*
* @sid: Surface id of created surface or surface to destroy or reference.
*
* Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
* Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
* Input argument to the DRM_VMW_REF_SURFACE Ioctl.
*/
struct
drm_vmw_surface_arg
{
int32_t
sid
;
uint32_t
pad64
;
};
/**
* struct drm_vmw_size ioctl.
*
* @width - mip level width
* @height - mip level height
* @depth - mip level depth
*
* Description of a mip level.
* Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
*/
struct
drm_vmw_size
{
uint32_t
width
;
uint32_t
height
;
uint32_t
depth
;
uint32_t
pad64
;
};
/**
* union drm_vmw_surface_create_arg
*
* @rep: Output data as described above.
* @req: Input data as described above.
*
* Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
*/
union
drm_vmw_surface_create_arg
{
struct
drm_vmw_surface_arg
rep
;
struct
drm_vmw_surface_create_req
req
;
};
/*************************************************************************/
/**
* DRM_VMW_REF_SURFACE - Reference a host surface.
*
* Puts a reference on a host surface with a give sid, as previously
* returned by the DRM_VMW_CREATE_SURFACE ioctl.
* A reference will make sure the surface isn't destroyed while we hold
* it and will allow the calling client to use the surface ID in the command
* stream.
*
* On successful return, the Ioctl returns the surface information given
* in the DRM_VMW_CREATE_SURFACE ioctl.
*/
/**
* union drm_vmw_surface_reference_arg
*
* @rep: Output data as described above.
* @req: Input data as described above.
*
* Argument to the DRM_VMW_REF_SURFACE Ioctl.
*/
union
drm_vmw_surface_reference_arg
{
struct
drm_vmw_surface_create_req
rep
;
struct
drm_vmw_surface_arg
req
;
};
/*************************************************************************/
/**
* DRM_VMW_UNREF_SURFACE - Unreference a host surface.
*
* Clear a reference previously put on a host surface.
* When all references are gone, including the one implicitly placed
* on creation,
* a destroy surface command will be queued for the host.
* Does not wait for completion.
*/
/*************************************************************************/
/**
* DRM_VMW_EXECBUF
*
* Submit a command buffer for execution on the host, and return a
* fence sequence that when signaled, indicates that the command buffer has
* executed.
*/
/**
* struct drm_vmw_execbuf_arg
*
* @commands: User-space address of a command buffer cast to an uint64_t.
* @command-size: Size in bytes of the command buffer.
* @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
* uint64_t.
*
* Argument to the DRM_VMW_EXECBUF Ioctl.
*/
struct
drm_vmw_execbuf_arg
{
uint64_t
commands
;
uint32_t
command_size
;
uint32_t
pad64
;
uint64_t
fence_rep
;
};
/**
* struct drm_vmw_fence_rep
*
* @fence_seq: Fence sequence associated with a command submission.
* @error: This member should've been set to -EFAULT on submission.
* The following actions should be take on completion:
* error == -EFAULT: Fence communication failed. The host is synchronized.
* Use the last fence id read from the FIFO fence register.
* error != 0 && error != -EFAULT:
* Fence submission failed. The host is synchronized. Use the fence_seq member.
* error == 0: All is OK, The host may not be synchronized.
* Use the fence_seq member.
*
* Input / Output data to the DRM_VMW_EXECBUF Ioctl.
*/
struct
drm_vmw_fence_rep
{
uint64_t
fence_seq
;
int32_t
error
;
uint32_t
pad64
;
};
/*************************************************************************/
/**
* DRM_VMW_ALLOC_DMABUF
*
* Allocate a DMA buffer that is visible also to the host.
* NOTE: The buffer is
* identified by a handle and an offset, which are private to the guest, but
* useable in the command stream. The guest kernel may translate these
* and patch up the command stream accordingly. In the future, the offset may
* be zero at all times, or it may disappear from the interface before it is
* fixed.
*
* The DMA buffer may stay user-space mapped in the guest at all times,
* and is thus suitable for sub-allocation.
*
* DMA buffers are mapped using the mmap() syscall on the drm device.
*/
/**
* struct drm_vmw_alloc_dmabuf_req
*
* @size: Required minimum size of the buffer.
*
* Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
*/
struct
drm_vmw_alloc_dmabuf_req
{
uint32_t
size
;
uint32_t
pad64
;
};
/**
* struct drm_vmw_dmabuf_rep
*
* @map_handle: Offset to use in the mmap() call used to map the buffer.
* @handle: Handle unique to this buffer. Used for unreferencing.
* @cur_gmr_id: GMR id to use in the command stream when this buffer is
* referenced. See not above.
* @cur_gmr_offset: Offset to use in the command stream when this buffer is
* referenced. See note above.
*
* Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
*/
struct
drm_vmw_dmabuf_rep
{
uint64_t
map_handle
;
uint32_t
handle
;
uint32_t
cur_gmr_id
;
uint32_t
cur_gmr_offset
;
uint32_t
pad64
;
};
/**
* union drm_vmw_dmabuf_arg
*
* @req: Input data as described above.
* @rep: Output data as described above.
*
* Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
*/
union
drm_vmw_alloc_dmabuf_arg
{
struct
drm_vmw_alloc_dmabuf_req
req
;
struct
drm_vmw_dmabuf_rep
rep
;
};
/*************************************************************************/
/**
* DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
*
*/
/**
* struct drm_vmw_unref_dmabuf_arg
*
* @handle: Handle indicating what buffer to free. Obtained from the
* DRM_VMW_ALLOC_DMABUF Ioctl.
*
* Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
*/
struct
drm_vmw_unref_dmabuf_arg
{
uint32_t
handle
;
uint32_t
pad64
;
};
/*************************************************************************/
/**
* DRM_VMW_FIFO_DEBUG - Get last FIFO submission.
*
* This IOCTL copies the last FIFO submission directly out of the FIFO buffer.
*/
/**
* struct drm_vmw_fifo_debug_arg
*
* @debug_buffer: User space address of a debug_buffer cast to an uint64_t //In
* @debug_buffer_size: Size in bytes of debug buffer //In
* @used_size: Number of bytes copied to the buffer // Out
* @did_not_fit: Boolean indicating that the fifo contents did not fit. //Out
*
* Argument to the DRM_VMW_FIFO_DEBUG Ioctl.
*/
struct
drm_vmw_fifo_debug_arg
{
uint64_t
debug_buffer
;
uint32_t
debug_buffer_size
;
uint32_t
used_size
;
int32_t
did_not_fit
;
uint32_t
pad64
;
};
struct
drm_vmw_fence_wait_arg
{
uint64_t
sequence
;
uint64_t
kernel_cookie
;
int32_t
cookie_valid
;
int32_t
pad64
;
};
/*************************************************************************/
/**
* DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
*
* This IOCTL controls the overlay units of the svga device.
* The SVGA overlay units does not work like regular hardware units in
* that they do not automaticaly read back the contents of the given dma
* buffer. But instead only read back for each call to this ioctl, and
* at any point between this call being made and a following call that
* either changes the buffer or disables the stream.
*/
/**
* struct drm_vmw_rect
*
* Defines a rectangle. Used in the overlay ioctl to define
* source and destination rectangle.
*/
struct
drm_vmw_rect
{
int32_t
x
;
int32_t
y
;
uint32_t
w
;
uint32_t
h
;
};
/**
* struct drm_vmw_control_stream_arg
*
* @stream_id: Stearm to control
* @enabled: If false all following arguments are ignored.
* @handle: Handle to buffer for getting data from.
* @format: Format of the overlay as understood by the host.
* @width: Width of the overlay.
* @height: Height of the overlay.
* @size: Size of the overlay in bytes.
* @pitch: Array of pitches, the two last are only used for YUV12 formats.
* @offset: Offset from start of dma buffer to overlay.
* @src: Source rect, must be within the defined area above.
* @dst: Destination rect, x and y may be negative.
*
* Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
*/
struct
drm_vmw_control_stream_arg
{
uint32_t
stream_id
;
uint32_t
enabled
;
uint32_t
flags
;
uint32_t
color_key
;
uint32_t
handle
;
uint32_t
offset
;
int32_t
format
;
uint32_t
size
;
uint32_t
width
;
uint32_t
height
;
uint32_t
pitch
[
3
];
uint32_t
pad64
;
struct
drm_vmw_rect
src
;
struct
drm_vmw_rect
dst
;
};
/*************************************************************************/
/**
* DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
*
*/
#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
#define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
/**
* struct drm_vmw_cursor_bypass_arg
*
* @flags: Flags.
* @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
* @xpos: X position of cursor.
* @ypos: Y position of cursor.
* @xhot: X hotspot.
* @yhot: Y hotspot.
*
* Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
*/
struct
drm_vmw_cursor_bypass_arg
{
uint32_t
flags
;
uint32_t
crtc_id
;
int32_t
xpos
;
int32_t
ypos
;
int32_t
xhot
;
int32_t
yhot
;
};
/*************************************************************************/
/**
* DRM_VMW_CLAIM_STREAM - Claim a single stream.
*/
/**
* struct drm_vmw_context_arg
*
* @stream_id: Device unique context ID.
*
* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
*/
struct
drm_vmw_stream_arg
{
uint32_t
stream_id
;
uint32_t
pad64
;
};
/*************************************************************************/
/**
* DRM_VMW_UNREF_STREAM - Unclaim a stream.
*
* Return a single stream that was claimed by this process. Also makes
* sure that the stream has been stopped.
*/
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment