Commit 523375c9 authored by Zack Rusin's avatar Zack Rusin

drm/vmwgfx: Port vmwgfx to arm64

This change fixes all of the arm64 issues we've had in the driver.
ARM support is provided in svga version 3, for which support we've added
in previous changes. svga version 3 currently lacks many of the
advanced features (in particular 3D support is lacking) but
that will change in time.
Signed-off-by: default avatarZack Rusin <zackr@vmware.com>
Reviewed-by: default avatarRoland Scheidegger <sroland@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210505035740.286923-7-zackr@vmware.com
parent 2cd80dbd
# SPDX-License-Identifier: GPL-2.0
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
depends on DRM && PCI && X86 && MMU
depends on DRM && PCI && MMU
depends on X86 || ARM64
select DRM_TTM
select MAPPING_DIRTY_HELPERS
# Only needed for the transitional use of drm_crtc_init - can be removed
......
......@@ -37,6 +37,7 @@
#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <generated/utsrelease.h>
#include "ttm_object.h"
#include "vmwgfx_binding.h"
......@@ -781,7 +782,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
int ret;
enum vmw_res_type i;
bool refuse_dma = false;
char host_log[100] = {0};
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
dev_priv->vmw_chipset = pci_id;
......@@ -1050,10 +1050,9 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
DRM_INFO("SM4 support available.\n");
DRM_INFO("Running without reservation semaphore\n");
snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
VMWGFX_DRIVER_PATCHLEVEL);
vmw_host_log(host_log);
vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
if (dev_priv->enable_fb) {
vmw_fifo_resource_inc(dev_priv);
......
......@@ -1498,7 +1498,7 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
/* Host messaging -vmwgfx_msg.c: */
int vmw_host_get_guestinfo(const char *guest_info_param,
char *buffer, size_t *length);
int vmw_host_log(const char *log);
__printf(1, 2) int vmw_host_printf(const char *fmt, ...);
int vmw_msg_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
......
......@@ -33,7 +33,8 @@
#include <asm/hypervisor.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_msg.h"
#include "vmwgfx_msg_x86.h"
#include "vmwgfx_msg_arm64.h"
#define MESSAGE_STATUS_SUCCESS 0x0001
#define MESSAGE_STATUS_DORECV 0x0002
......@@ -473,30 +474,40 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
}
/**
* vmw_host_log: Sends a log message to the host
* vmw_host_printf: Sends a log message to the host
*
* @log: NULL terminated string
* @fmt: Regular printf format string and arguments
*
* Returns: 0 on success
*/
int vmw_host_log(const char *log)
__printf(1, 2)
int vmw_host_printf(const char *fmt, ...)
{
va_list ap;
struct rpc_channel channel;
char *msg;
char *log;
int ret = 0;
if (!vmw_msg_enabled)
return -ENODEV;
if (!log)
if (!fmt)
return ret;
va_start(ap, fmt);
log = kvasprintf(GFP_KERNEL, fmt, ap);
va_end(ap);
if (!log) {
DRM_ERROR("Cannot allocate memory for the log message.\n");
return -ENOMEM;
}
msg = kasprintf(GFP_KERNEL, "log %s", log);
if (!msg) {
DRM_ERROR("Cannot allocate memory for host log message.\n");
kfree(log);
return -ENOMEM;
}
......@@ -508,6 +519,7 @@ int vmw_host_log(const char *log)
vmw_close_channel(&channel);
kfree(msg);
kfree(log);
return 0;
......@@ -515,6 +527,7 @@ int vmw_host_log(const char *log)
vmw_close_channel(&channel);
out_open:
kfree(msg);
kfree(log);
DRM_ERROR("Failed to send host log message.\n");
return -EINVAL;
......@@ -537,7 +550,7 @@ int vmw_msg_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_msg_arg *arg =
(struct drm_vmw_msg_arg *) data;
(struct drm_vmw_msg_arg *)data;
struct rpc_channel channel;
char *msg;
int length;
......@@ -577,7 +590,7 @@ int vmw_msg_ioctl(struct drm_device *dev, void *data,
}
if (reply && reply_len > 0) {
if (copy_to_user((void __user *)((unsigned long)arg->receive),
reply, reply_len)) {
reply, reply_len)) {
DRM_ERROR("Failed to copy message to userspace.\n");
kfree(reply);
goto out_msg;
......
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2021 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _VMWGFX_MSG_ARM64_H
#define _VMWGFX_MSG_ARM64_H
#if defined(__aarch64__)
#define VMWARE_HYPERVISOR_PORT 0x5658
#define VMWARE_HYPERVISOR_PORT_HB 0x5659
#define VMWARE_HYPERVISOR_HB BIT(0)
#define VMWARE_HYPERVISOR_OUT BIT(1)
#define X86_IO_MAGIC 0x86
#define X86_IO_W7_SIZE_SHIFT 0
#define X86_IO_W7_SIZE_MASK (0x3 << X86_IO_W7_SIZE_SHIFT)
#define X86_IO_W7_DIR (1 << 2)
#define X86_IO_W7_WITH (1 << 3)
#define X86_IO_W7_STR (1 << 4)
#define X86_IO_W7_DF (1 << 5)
#define X86_IO_W7_IMM_SHIFT 5
#define X86_IO_W7_IMM_MASK (0xff << X86_IO_W7_IMM_SHIFT)
static inline void vmw_port(unsigned long cmd, unsigned long in_ebx,
unsigned long in_si, unsigned long in_di,
unsigned long flags, unsigned long magic,
unsigned long *eax, unsigned long *ebx,
unsigned long *ecx, unsigned long *edx,
unsigned long *si, unsigned long *di)
{
register u64 x0 asm("x0") = magic;
register u64 x1 asm("x1") = in_ebx;
register u64 x2 asm("x2") = cmd;
register u64 x3 asm("x3") = flags | VMWARE_HYPERVISOR_PORT;
register u64 x4 asm("x4") = in_si;
register u64 x5 asm("x5") = in_di;
register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
X86_IO_W7_WITH |
X86_IO_W7_DIR |
(2 << X86_IO_W7_SIZE_SHIFT);
asm volatile("mrs xzr, mdccsr_el0 \n\t"
: "+r"(x0), "+r"(x1), "+r"(x2),
"+r"(x3), "+r"(x4), "+r"(x5)
: "r"(x7)
:);
*eax = x0;
*ebx = x1;
*ecx = x2;
*edx = x3;
*si = x4;
*di = x5;
}
static inline void vmw_port_hb(unsigned long cmd, unsigned long in_ecx,
unsigned long in_si, unsigned long in_di,
unsigned long flags, unsigned long magic,
unsigned long bp, u32 w7dir,
unsigned long *eax, unsigned long *ebx,
unsigned long *ecx, unsigned long *edx,
unsigned long *si, unsigned long *di)
{
register u64 x0 asm("x0") = magic;
register u64 x1 asm("x1") = cmd;
register u64 x2 asm("x2") = in_ecx;
register u64 x3 asm("x3") = flags | VMWARE_HYPERVISOR_PORT_HB;
register u64 x4 asm("x4") = in_si;
register u64 x5 asm("x5") = in_di;
register u64 x6 asm("x6") = bp;
register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
X86_IO_W7_STR |
X86_IO_W7_WITH |
w7dir;
asm volatile("mrs xzr, mdccsr_el0 \n\t"
: "+r"(x0), "+r"(x1), "+r"(x2),
"+r"(x3), "+r"(x4), "+r"(x5)
: "r"(x6), "r"(x7)
:);
*eax = x0;
*ebx = x1;
*ecx = x2;
*edx = x3;
*si = x4;
*di = x5;
}
#define VMW_PORT(cmd, in_ebx, in_si, in_di, flags, magic, eax, ebx, ecx, edx, \
si, di) \
vmw_port(cmd, in_ebx, in_si, in_di, flags, magic, &eax, &ebx, &ecx, \
&edx, &si, &di)
#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, flags, magic, bp, eax, ebx, \
ecx, edx, si, di) \
vmw_port_hb(cmd, in_ecx, in_si, in_di, flags, magic, bp, \
0, &eax, &ebx, &ecx, &edx, &si, &di)
#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, flags, magic, bp, eax, ebx, \
ecx, edx, si, di) \
vmw_port_hb(cmd, in_ecx, in_si, in_di, flags, magic, bp, \
X86_IO_W7_DIR, &eax, &ebx, &ecx, &edx, &si, &di)
#endif
#endif /* _VMWGFX_MSG_ARM64_H */
/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
/**************************************************************************
*
* Copyright 2016 VMware, Inc., Palo Alto, CA., USA
* Copyright 2016-2021 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
......@@ -29,8 +29,11 @@
* Author:
* Sinclair Yeh <syeh@vmware.com>
*/
#ifndef _VMWGFX_MSG_H
#define _VMWGFX_MSG_H
#ifndef _VMWGFX_MSG_X86_H
#define _VMWGFX_MSG_X86_H
#if defined(__i386__) || defined(__x86_64__)
#include <asm/vmware.h>
......@@ -55,23 +58,23 @@
* @di: [OUT]
*/
#define VMW_PORT(cmd, in_ebx, in_si, in_di, \
flags, magic, \
eax, ebx, ecx, edx, si, di) \
flags, magic, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile (VMWARE_HYPERCALL : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
"=d"(edx), \
"=S"(si), \
"=D"(di) : \
"a"(magic), \
"b"(in_ebx), \
"c"(cmd), \
"d"(flags), \
"S"(in_si), \
"D"(in_di) : \
"memory"); \
asm volatile (VMWARE_HYPERCALL : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
"=d"(edx), \
"=S"(si), \
"=D"(di) : \
"a"(magic), \
"b"(in_ebx), \
"c"(cmd), \
"d"(flags), \
"S"(in_si), \
"D"(in_di) : \
"memory"); \
})
......@@ -99,55 +102,55 @@
#ifdef __x86_64__
#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile ("push %%rbp;" \
"mov %12, %%rbp;" \
VMWARE_HYPERCALL_HB_OUT \
"pop %%rbp;" : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
"=d"(edx), \
"=S"(si), \
"=D"(di) : \
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
"d"(flags), \
"S"(in_si), \
"D"(in_di), \
"r"(bp) : \
"memory", "cc"); \
asm volatile ("push %%rbp;" \
"mov %12, %%rbp;" \
VMWARE_HYPERCALL_HB_OUT \
"pop %%rbp;" : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
"=d"(edx), \
"=S"(si), \
"=D"(di) : \
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
"d"(flags), \
"S"(in_si), \
"D"(in_di), \
"r"(bp) : \
"memory", "cc"); \
})
#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile ("push %%rbp;" \
"mov %12, %%rbp;" \
VMWARE_HYPERCALL_HB_IN \
"pop %%rbp" : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
"=d"(edx), \
"=S"(si), \
"=D"(di) : \
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
"d"(flags), \
"S"(in_si), \
"D"(in_di), \
"r"(bp) : \
"memory", "cc"); \
asm volatile ("push %%rbp;" \
"mov %12, %%rbp;" \
VMWARE_HYPERCALL_HB_IN \
"pop %%rbp" : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
"=d"(edx), \
"=S"(si), \
"=D"(di) : \
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
"d"(flags), \
"S"(in_si), \
"D"(in_di), \
"r"(bp) : \
"memory", "cc"); \
})
#else
#elif defined(__i386__)
/*
* In the 32-bit version of this macro, we store bp in a memory location
......@@ -158,57 +161,59 @@
* just pushed it.
*/
#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile ("push %12;" \
"push %%ebp;" \
"mov 0x04(%%esp), %%ebp;" \
VMWARE_HYPERCALL_HB_OUT \
"pop %%ebp;" \
"add $0x04, %%esp;" : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
"=d"(edx), \
"=S"(si), \
"=D"(di) : \
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
"d"(flags), \
"S"(in_si), \
"D"(in_di), \
"m"(bp) : \
"memory", "cc"); \
asm volatile ("push %12;" \
"push %%ebp;" \
"mov 0x04(%%esp), %%ebp;" \
VMWARE_HYPERCALL_HB_OUT \
"pop %%ebp;" \
"add $0x04, %%esp;" : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
"=d"(edx), \
"=S"(si), \
"=D"(di) : \
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
"d"(flags), \
"S"(in_si), \
"D"(in_di), \
"m"(bp) : \
"memory", "cc"); \
})
#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile ("push %12;" \
"push %%ebp;" \
"mov 0x04(%%esp), %%ebp;" \
VMWARE_HYPERCALL_HB_IN \
"pop %%ebp;" \
"add $0x04, %%esp;" : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
"=d"(edx), \
"=S"(si), \
"=D"(di) : \
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
"d"(flags), \
"S"(in_si), \
"D"(in_di), \
"m"(bp) : \
"memory", "cc"); \
asm volatile ("push %12;" \
"push %%ebp;" \
"mov 0x04(%%esp), %%ebp;" \
VMWARE_HYPERCALL_HB_IN \
"pop %%ebp;" \
"add $0x04, %%esp;" : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
"=d"(edx), \
"=S"(si), \
"=D"(di) : \
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
"d"(flags), \
"S"(in_si), \
"D"(in_di), \
"m"(bp) : \
"memory", "cc"); \
})
#endif /* #if __x86_64__ */
#endif /* defined(__i386__) */
#endif /* defined(__i386__) || defined(__x86_64__) */
#endif
#endif /* _VMWGFX_MSG_X86_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment