Commit 040bf2a9 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-next-fixes-2021-12-23' of...

Merge tag 'drm-misc-next-fixes-2021-12-23' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

Short summary of fixes pull:

 * bridge/lvds: Fix DT bindings
 * vmwgfx: Fix several issues with the recent conversion to GEM
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/YcRAH8lYbsoSCeY9@linux-uq9g.fritz.box
parents 4817c37d 5da8b49d
......@@ -94,31 +94,32 @@ properties:
power-supply: true
if:
not:
properties:
compatible:
contains:
const: lvds-decoder
then:
properties:
ports:
allOf:
- if:
not:
properties:
compatible:
contains:
const: lvds-decoder
then:
properties:
port@0:
ports:
properties:
endpoint:
port@0:
properties:
data-mapping: false
endpoint:
properties:
data-mapping: false
if:
not:
properties:
compatible:
contains:
const: lvds-encoder
then:
properties:
pclk-sample: false
- if:
not:
properties:
compatible:
contains:
const: lvds-encoder
then:
properties:
pclk-sample: false
required:
- compatible
......
......@@ -13,6 +13,5 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_d
vmwgfx_gem.o
vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o
vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
......@@ -568,10 +568,12 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
struct vmw_buffer_object *vmw_bo;
int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
if (!(flags & drm_vmw_synccpu_allow_cs)) {
atomic_dec(&vmw_bo->cpu_writers);
if (!ret) {
if (!(flags & drm_vmw_synccpu_allow_cs)) {
atomic_dec(&vmw_bo->cpu_writers);
}
ttm_bo_put(&vmw_bo->base);
}
ttm_bo_put(&vmw_bo->base);
return ret;
}
......
......@@ -701,23 +701,15 @@ static int vmw_dma_masks(struct vmw_private *dev_priv)
static int vmw_vram_manager_init(struct vmw_private *dev_priv)
{
int ret;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
ret = vmw_thp_init(dev_priv);
#else
ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
dev_priv->vram_size >> PAGE_SHIFT);
#endif
ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
return ret;
}
static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
vmw_thp_fini(dev_priv);
#else
ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
#endif
}
static int vmw_setup_pci_resources(struct vmw_private *dev,
......
......@@ -59,11 +59,8 @@
#define VMWGFX_DRIVER_MINOR 20
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_VALIDATIONS 2048
#define VMWGFX_MAX_DISPLAYS 16
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
#define VMWGFX_PCI_ID_SVGA2 0x0405
#define VMWGFX_PCI_ID_SVGA3 0x0406
......@@ -1564,11 +1561,6 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
/* Transparent hugepage support - vmwgfx_thp.c */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern int vmw_thp_init(struct vmw_private *dev_priv);
void vmw_thp_fini(struct vmw_private *dev_priv);
#endif
/**
* VMW_DEBUG_KMS - Debug output for kernel mode-setting
......
......@@ -227,7 +227,7 @@ static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_f
break;
}
seq_printf(m, "\t\t0x%08x: %12ld bytes %s, type = %s",
seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s",
id, bo->base.base.size, placement, type);
seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
bo->base.priority,
......
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2012-2015 VMware, Inc., Palo Alto, CA., USA
* Copyright 2012-2021 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
......@@ -29,12 +29,6 @@
#include "vmwgfx_drv.h"
/*
* If we set up the screen target otable, screen objects stop working.
*/
#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
#ifdef CONFIG_64BIT
#define VMW_PPN_SIZE 8
#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PT64_0
......@@ -75,7 +69,7 @@ static const struct vmw_otable pre_dx_tables[] = {
{VMWGFX_NUM_GB_CONTEXT * sizeof(SVGAOTableContextEntry), NULL, true},
{VMWGFX_NUM_GB_SHADER * sizeof(SVGAOTableShaderEntry), NULL, true},
{VMWGFX_NUM_GB_SCREEN_TARGET * sizeof(SVGAOTableScreenTargetEntry),
NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
NULL, true}
};
static const struct vmw_otable dx_tables[] = {
......@@ -84,7 +78,7 @@ static const struct vmw_otable dx_tables[] = {
{VMWGFX_NUM_GB_CONTEXT * sizeof(SVGAOTableContextEntry), NULL, true},
{VMWGFX_NUM_GB_SHADER * sizeof(SVGAOTableShaderEntry), NULL, true},
{VMWGFX_NUM_GB_SCREEN_TARGET * sizeof(SVGAOTableScreenTargetEntry),
NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
NULL, true},
{VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
};
......
......@@ -1872,8 +1872,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
int i, ret;
/* Do nothing if Screen Target support is turned off */
if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE || !dev_priv->has_mob)
/* Do nothing if there's no support for MOBs */
if (!dev_priv->has_mob)
return -ENOSYS;
if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
......
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Huge page-table-entry support for IO memory.
*
* Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
*/
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
/**
* struct vmw_thp_manager - Range manager implementing huge page alignment
*
* @manager: TTM resource manager.
* @mm: The underlying range manager. Protected by @lock.
* @lock: Manager lock.
*/
struct vmw_thp_manager {
struct ttm_resource_manager manager;
struct drm_mm mm;
spinlock_t lock;
};
static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
{
return container_of(man, struct vmw_thp_manager, manager);
}
static const struct ttm_resource_manager_func vmw_thp_func;
static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
struct drm_mm *mm, struct drm_mm_node *node,
unsigned long align_pages,
const struct ttm_place *place,
struct ttm_resource *mem,
unsigned long lpfn,
enum drm_mm_insert_mode mode)
{
if (align_pages >= bo->page_alignment &&
(!bo->page_alignment || align_pages % bo->page_alignment == 0)) {
return drm_mm_insert_node_in_range(mm, node,
mem->num_pages,
align_pages, 0,
place->fpfn, lpfn, mode);
}
return -ENOSPC;
}
static int vmw_thp_get_node(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res)
{
struct vmw_thp_manager *rman = to_thp_manager(man);
struct drm_mm *mm = &rman->mm;
struct ttm_range_mgr_node *node;
unsigned long align_pages;
unsigned long lpfn;
enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
int ret;
node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
if (!node)
return -ENOMEM;
ttm_resource_init(bo, place, &node->base);
lpfn = place->lpfn;
if (!lpfn)
lpfn = man->size;
mode = DRM_MM_INSERT_BEST;
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
spin_lock(&rman->lock);
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
if (node->base.num_pages >= align_pages) {
ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
align_pages, place,
&node->base, lpfn, mode);
if (!ret)
goto found_unlock;
}
}
align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
if (node->base.num_pages >= align_pages) {
ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
align_pages, place, &node->base,
lpfn, mode);
if (!ret)
goto found_unlock;
}
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
node->base.num_pages,
bo->page_alignment, 0,
place->fpfn, lpfn, mode);
found_unlock:
spin_unlock(&rman->lock);
if (unlikely(ret)) {
kfree(node);
} else {
node->base.start = node->mm_nodes[0].start;
*res = &node->base;
}
return ret;
}
static void vmw_thp_put_node(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct vmw_thp_manager *rman = to_thp_manager(man);
spin_lock(&rman->lock);
drm_mm_remove_node(&node->mm_nodes[0]);
spin_unlock(&rman->lock);
kfree(node);
}
int vmw_thp_init(struct vmw_private *dev_priv)
{
struct vmw_thp_manager *rman;
rman = kzalloc(sizeof(*rman), GFP_KERNEL);
if (!rman)
return -ENOMEM;
ttm_resource_manager_init(&rman->manager,
dev_priv->vram_size >> PAGE_SHIFT);
rman->manager.func = &vmw_thp_func;
drm_mm_init(&rman->mm, 0, rman->manager.size);
spin_lock_init(&rman->lock);
ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
ttm_resource_manager_set_used(&rman->manager, true);
return 0;
}
void vmw_thp_fini(struct vmw_private *dev_priv)
{
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
struct vmw_thp_manager *rman = to_thp_manager(man);
struct drm_mm *mm = &rman->mm;
int ret;
ttm_resource_manager_set_used(man, false);
ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man);
if (ret)
return;
spin_lock(&rman->lock);
drm_mm_clean(mm);
drm_mm_takedown(mm);
spin_unlock(&rman->lock);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
kfree(rman);
}
static void vmw_thp_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
struct vmw_thp_manager *rman = to_thp_manager(man);
spin_lock(&rman->lock);
drm_mm_print(&rman->mm, printer);
spin_unlock(&rman->lock);
}
static const struct ttm_resource_manager_func vmw_thp_func = {
.alloc = vmw_thp_get_node,
.free = vmw_thp_put_node,
.debug = vmw_thp_debug
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment