Commit 644b4930 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm/tegra/for-4.12-rc1' of git://anongit.freedesktop.org/tegra/linux into drm-next

drm/tegra: Changes for v4.12-rc1

This contains various fixes to the host1x driver as well as a plug for a
leak of kernel pointers to userspace.

A fairly big addition this time around is the Video Image Composer (VIC)
support that can be used to accelerate some 2D and image compositing
operations.

Furthermore the driver now supports FB modifiers, so we no longer rely
on a custom IOCTL to set those.

Finally this contains a few preparatory patches for Tegra186 support
which unfortunately didn't quite make it this time, but will hopefully
be ready for v4.13.

* tag 'drm/tegra/for-4.12-rc1' of git://anongit.freedesktop.org/tegra/linux:
  gpu: host1x: Fix host1x driver shutdown
  gpu: host1x: Support module reset
  gpu: host1x: Sort includes alphabetically
  drm/tegra: Add VIC support
  dt-bindings: Add bindings for the Tegra VIC
  drm/tegra: Add falcon helper library
  drm/tegra: Add Tegra DRM allocation API
  drm/tegra: Add tiling FB modifiers
  drm/tegra: Don't leak kernel pointer to userspace
  drm/tegra: Protect IOMMU operations by mutex
  drm/tegra: Enable IOVA API when IOMMU support is enabled
  gpu: host1x: Add IOMMU support
  gpu: host1x: Fix potential out-of-bounds access
  iommu/iova: Fix compile error with CONFIG_IOMMU_IOVA=m
  iommu: Add dummy implementations for !IOMMU_IOVA
  MAINTAINERS: Add related headers to IOMMU section
  iommu/iova: Consolidate code for adding new node to iovad domain rbtree
parents 8b03d1ed b0d36daa
...@@ -249,6 +249,19 @@ of the following host1x client modules: ...@@ -249,6 +249,19 @@ of the following host1x client modules:
See ../pinctrl/nvidia,tegra124-dpaux-padctl.txt for information See ../pinctrl/nvidia,tegra124-dpaux-padctl.txt for information
regarding the DPAUX pad controller bindings. regarding the DPAUX pad controller bindings.
- vic: Video Image Compositor
- compatible : "nvidia,tegra<chip>-vic"
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- clocks: Must contain an entry for each entry in clock-names.
See ../clocks/clock-bindings.txt for details.
- clock-names: Must include the following entries:
- vic: clock input for the VIC hardware
- resets: Must contain an entry for each entry in reset-names.
See ../reset/reset.txt for details.
- reset-names: Must include the following entries:
- vic
Example: Example:
/ { / {
......
...@@ -6780,6 +6780,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git ...@@ -6780,6 +6780,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/iommu/ F: Documentation/devicetree/bindings/iommu/
F: drivers/iommu/ F: drivers/iommu/
F: include/linux/iommu.h
F: include/linux/iova.h
IP MASQUERADING IP MASQUERADING
M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar> M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
......
...@@ -7,6 +7,7 @@ config DRM_TEGRA ...@@ -7,6 +7,7 @@ config DRM_TEGRA
select DRM_MIPI_DSI select DRM_MIPI_DSI
select DRM_PANEL select DRM_PANEL
select TEGRA_HOST1X select TEGRA_HOST1X
select IOMMU_IOVA if IOMMU_SUPPORT
help help
Choose this option if you have an NVIDIA Tegra SoC. Choose this option if you have an NVIDIA Tegra SoC.
......
...@@ -13,6 +13,8 @@ tegra-drm-y := \ ...@@ -13,6 +13,8 @@ tegra-drm-y := \
sor.o \ sor.o \
dpaux.o \ dpaux.o \
gr2d.o \ gr2d.o \
gr3d.o gr3d.o \
falcon.o \
vic.o
obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
/* /*
* Copyright (C) 2012 Avionic Design GmbH * Copyright (C) 2012 Avionic Design GmbH
* Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved. * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/bitops.h>
#include <linux/host1x.h> #include <linux/host1x.h>
#include <linux/idr.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <drm/drm_atomic.h> #include <drm/drm_atomic.h>
...@@ -23,8 +25,11 @@ ...@@ -23,8 +25,11 @@
#define DRIVER_MINOR 0 #define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0 #define DRIVER_PATCHLEVEL 0
#define CARVEOUT_SZ SZ_64M
struct tegra_drm_file { struct tegra_drm_file {
struct list_head contexts; struct idr contexts;
struct mutex lock;
}; };
static void tegra_atomic_schedule(struct tegra_drm *tegra, static void tegra_atomic_schedule(struct tegra_drm *tegra,
...@@ -126,8 +131,9 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) ...@@ -126,8 +131,9 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
return -ENOMEM; return -ENOMEM;
if (iommu_present(&platform_bus_type)) { if (iommu_present(&platform_bus_type)) {
u64 carveout_start, carveout_end, gem_start, gem_end;
struct iommu_domain_geometry *geometry; struct iommu_domain_geometry *geometry;
u64 start, end; unsigned long order;
tegra->domain = iommu_domain_alloc(&platform_bus_type); tegra->domain = iommu_domain_alloc(&platform_bus_type);
if (!tegra->domain) { if (!tegra->domain) {
...@@ -136,12 +142,26 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) ...@@ -136,12 +142,26 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
} }
geometry = &tegra->domain->geometry; geometry = &tegra->domain->geometry;
start = geometry->aperture_start; gem_start = geometry->aperture_start;
end = geometry->aperture_end; gem_end = geometry->aperture_end - CARVEOUT_SZ;
carveout_start = gem_end + 1;
DRM_DEBUG_DRIVER("IOMMU aperture initialized (%#llx-%#llx)\n", carveout_end = geometry->aperture_end;
start, end);
drm_mm_init(&tegra->mm, start, end - start + 1); order = __ffs(tegra->domain->pgsize_bitmap);
init_iova_domain(&tegra->carveout.domain, 1UL << order,
carveout_start >> order,
carveout_end >> order);
tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
mutex_init(&tegra->mm_lock);
DRM_DEBUG("IOMMU apertures:\n");
DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
carveout_end);
} }
mutex_init(&tegra->clients_lock); mutex_init(&tegra->clients_lock);
...@@ -161,6 +181,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) ...@@ -161,6 +181,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
drm->mode_config.max_width = 4096; drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096; drm->mode_config.max_height = 4096;
drm->mode_config.allow_fb_modifiers = true;
drm->mode_config.funcs = &tegra_drm_mode_funcs; drm->mode_config.funcs = &tegra_drm_mode_funcs;
err = tegra_drm_fb_prepare(drm); err = tegra_drm_fb_prepare(drm);
...@@ -208,6 +230,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) ...@@ -208,6 +230,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (tegra->domain) { if (tegra->domain) {
iommu_domain_free(tegra->domain); iommu_domain_free(tegra->domain);
drm_mm_takedown(&tegra->mm); drm_mm_takedown(&tegra->mm);
mutex_destroy(&tegra->mm_lock);
put_iova_domain(&tegra->carveout.domain);
} }
free: free:
kfree(tegra); kfree(tegra);
...@@ -232,6 +256,8 @@ static void tegra_drm_unload(struct drm_device *drm) ...@@ -232,6 +256,8 @@ static void tegra_drm_unload(struct drm_device *drm)
if (tegra->domain) { if (tegra->domain) {
iommu_domain_free(tegra->domain); iommu_domain_free(tegra->domain);
drm_mm_takedown(&tegra->mm); drm_mm_takedown(&tegra->mm);
mutex_destroy(&tegra->mm_lock);
put_iova_domain(&tegra->carveout.domain);
} }
kfree(tegra); kfree(tegra);
...@@ -245,7 +271,8 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) ...@@ -245,7 +271,8 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
if (!fpriv) if (!fpriv)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&fpriv->contexts); idr_init(&fpriv->contexts);
mutex_init(&fpriv->lock);
filp->driver_priv = fpriv; filp->driver_priv = fpriv;
return 0; return 0;
...@@ -424,21 +451,16 @@ int tegra_drm_submit(struct tegra_drm_context *context, ...@@ -424,21 +451,16 @@ int tegra_drm_submit(struct tegra_drm_context *context,
#ifdef CONFIG_DRM_TEGRA_STAGING #ifdef CONFIG_DRM_TEGRA_STAGING
static struct tegra_drm_context *tegra_drm_get_context(__u64 context) static struct tegra_drm_context *
{ tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id)
return (struct tegra_drm_context *)(uintptr_t)context;
}
static bool tegra_drm_file_owns_context(struct tegra_drm_file *file,
struct tegra_drm_context *context)
{ {
struct tegra_drm_context *ctx; struct tegra_drm_context *context;
list_for_each_entry(ctx, &file->contexts, list) mutex_lock(&file->lock);
if (ctx == context) context = idr_find(&file->contexts, id);
return true; mutex_unlock(&file->lock);
return false; return context;
} }
static int tegra_gem_create(struct drm_device *drm, void *data, static int tegra_gem_create(struct drm_device *drm, void *data,
...@@ -519,6 +541,28 @@ static int tegra_syncpt_wait(struct drm_device *drm, void *data, ...@@ -519,6 +541,28 @@ static int tegra_syncpt_wait(struct drm_device *drm, void *data,
&args->value); &args->value);
} }
static int tegra_client_open(struct tegra_drm_file *fpriv,
struct tegra_drm_client *client,
struct tegra_drm_context *context)
{
int err;
err = client->ops->open_channel(client, context);
if (err < 0)
return err;
err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL);
if (err < 0) {
client->ops->close_channel(context);
return err;
}
context->client = client;
context->id = err;
return 0;
}
static int tegra_open_channel(struct drm_device *drm, void *data, static int tegra_open_channel(struct drm_device *drm, void *data,
struct drm_file *file) struct drm_file *file)
{ {
...@@ -533,19 +577,22 @@ static int tegra_open_channel(struct drm_device *drm, void *data, ...@@ -533,19 +577,22 @@ static int tegra_open_channel(struct drm_device *drm, void *data,
if (!context) if (!context)
return -ENOMEM; return -ENOMEM;
mutex_lock(&fpriv->lock);
list_for_each_entry(client, &tegra->clients, list) list_for_each_entry(client, &tegra->clients, list)
if (client->base.class == args->client) { if (client->base.class == args->client) {
err = client->ops->open_channel(client, context); err = tegra_client_open(fpriv, client, context);
if (err) if (err < 0)
break; break;
list_add(&context->list, &fpriv->contexts); args->context = context->id;
args->context = (uintptr_t)context; break;
context->client = client;
return 0;
} }
if (err < 0)
kfree(context); kfree(context);
mutex_unlock(&fpriv->lock);
return err; return err;
} }
...@@ -555,16 +602,22 @@ static int tegra_close_channel(struct drm_device *drm, void *data, ...@@ -555,16 +602,22 @@ static int tegra_close_channel(struct drm_device *drm, void *data,
struct tegra_drm_file *fpriv = file->driver_priv; struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_close_channel *args = data; struct drm_tegra_close_channel *args = data;
struct tegra_drm_context *context; struct tegra_drm_context *context;
int err = 0;
context = tegra_drm_get_context(args->context); mutex_lock(&fpriv->lock);
if (!tegra_drm_file_owns_context(fpriv, context)) context = tegra_drm_file_get_context(fpriv, args->context);
return -EINVAL; if (!context) {
err = -EINVAL;
goto unlock;
}
list_del(&context->list); idr_remove(&fpriv->contexts, context->id);
tegra_drm_context_free(context); tegra_drm_context_free(context);
return 0; unlock:
mutex_unlock(&fpriv->lock);
return err;
} }
static int tegra_get_syncpt(struct drm_device *drm, void *data, static int tegra_get_syncpt(struct drm_device *drm, void *data,
...@@ -574,19 +627,27 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data, ...@@ -574,19 +627,27 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,
struct drm_tegra_get_syncpt *args = data; struct drm_tegra_get_syncpt *args = data;
struct tegra_drm_context *context; struct tegra_drm_context *context;
struct host1x_syncpt *syncpt; struct host1x_syncpt *syncpt;
int err = 0;
context = tegra_drm_get_context(args->context); mutex_lock(&fpriv->lock);
if (!tegra_drm_file_owns_context(fpriv, context)) context = tegra_drm_file_get_context(fpriv, args->context);
return -ENODEV; if (!context) {
err = -ENODEV;
goto unlock;
}
if (args->index >= context->client->base.num_syncpts) if (args->index >= context->client->base.num_syncpts) {
return -EINVAL; err = -EINVAL;
goto unlock;
}
syncpt = context->client->base.syncpts[args->index]; syncpt = context->client->base.syncpts[args->index];
args->id = host1x_syncpt_id(syncpt); args->id = host1x_syncpt_id(syncpt);
return 0; unlock:
mutex_unlock(&fpriv->lock);
return err;
} }
static int tegra_submit(struct drm_device *drm, void *data, static int tegra_submit(struct drm_device *drm, void *data,
...@@ -595,13 +656,21 @@ static int tegra_submit(struct drm_device *drm, void *data, ...@@ -595,13 +656,21 @@ static int tegra_submit(struct drm_device *drm, void *data,
struct tegra_drm_file *fpriv = file->driver_priv; struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_submit *args = data; struct drm_tegra_submit *args = data;
struct tegra_drm_context *context; struct tegra_drm_context *context;
int err;
mutex_lock(&fpriv->lock);
context = tegra_drm_get_context(args->context); context = tegra_drm_file_get_context(fpriv, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
}
if (!tegra_drm_file_owns_context(fpriv, context)) err = context->client->ops->submit(context, args, drm, file);
return -ENODEV;
return context->client->ops->submit(context, args, drm, file); unlock:
mutex_unlock(&fpriv->lock);
return err;
} }
static int tegra_get_syncpt_base(struct drm_device *drm, void *data, static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
...@@ -612,24 +681,34 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data, ...@@ -612,24 +681,34 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
struct tegra_drm_context *context; struct tegra_drm_context *context;
struct host1x_syncpt_base *base; struct host1x_syncpt_base *base;
struct host1x_syncpt *syncpt; struct host1x_syncpt *syncpt;
int err = 0;
context = tegra_drm_get_context(args->context); mutex_lock(&fpriv->lock);
if (!tegra_drm_file_owns_context(fpriv, context)) context = tegra_drm_file_get_context(fpriv, args->context);
return -ENODEV; if (!context) {
err = -ENODEV;
goto unlock;
}
if (args->syncpt >= context->client->base.num_syncpts) if (args->syncpt >= context->client->base.num_syncpts) {
return -EINVAL; err = -EINVAL;
goto unlock;
}
syncpt = context->client->base.syncpts[args->syncpt]; syncpt = context->client->base.syncpts[args->syncpt];
base = host1x_syncpt_get_base(syncpt); base = host1x_syncpt_get_base(syncpt);
if (!base) if (!base) {
return -ENXIO; err = -ENXIO;
goto unlock;
}
args->id = host1x_syncpt_base_id(base); args->id = host1x_syncpt_base_id(base);
return 0; unlock:
mutex_unlock(&fpriv->lock);
return err;
} }
static int tegra_gem_set_tiling(struct drm_device *drm, void *data, static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
...@@ -804,14 +883,25 @@ static const struct file_operations tegra_drm_fops = { ...@@ -804,14 +883,25 @@ static const struct file_operations tegra_drm_fops = {
.llseek = noop_llseek, .llseek = noop_llseek,
}; };
static int tegra_drm_context_cleanup(int id, void *p, void *data)
{
struct tegra_drm_context *context = p;
tegra_drm_context_free(context);
return 0;
}
static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file) static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
{ {
struct tegra_drm_file *fpriv = file->driver_priv; struct tegra_drm_file *fpriv = file->driver_priv;
struct tegra_drm_context *context, *tmp;
list_for_each_entry_safe(context, tmp, &fpriv->contexts, list) mutex_lock(&fpriv->lock);
tegra_drm_context_free(context); idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
mutex_unlock(&fpriv->lock);
idr_destroy(&fpriv->contexts);
mutex_destroy(&fpriv->lock);
kfree(fpriv); kfree(fpriv);
} }
...@@ -844,7 +934,9 @@ static int tegra_debugfs_iova(struct seq_file *s, void *data) ...@@ -844,7 +934,9 @@ static int tegra_debugfs_iova(struct seq_file *s, void *data)
struct tegra_drm *tegra = drm->dev_private; struct tegra_drm *tegra = drm->dev_private;
struct drm_printer p = drm_seq_file_printer(s); struct drm_printer p = drm_seq_file_printer(s);
mutex_lock(&tegra->mm_lock);
drm_mm_print(&tegra->mm, &p); drm_mm_print(&tegra->mm, &p);
mutex_unlock(&tegra->mm_lock);
return 0; return 0;
} }
...@@ -919,6 +1011,84 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra, ...@@ -919,6 +1011,84 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
return 0; return 0;
} }
void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size,
dma_addr_t *dma)
{
struct iova *alloc;
void *virt;
gfp_t gfp;
int err;
if (tegra->domain)
size = iova_align(&tegra->carveout.domain, size);
else
size = PAGE_ALIGN(size);
gfp = GFP_KERNEL | __GFP_ZERO;
if (!tegra->domain) {
/*
* Many units only support 32-bit addresses, even on 64-bit
* SoCs. If there is no IOMMU to translate into a 32-bit IO
* virtual address space, force allocations to be in the
* lower 32-bit range.
*/
gfp |= GFP_DMA;
}
virt = (void *)__get_free_pages(gfp, get_order(size));
if (!virt)
return ERR_PTR(-ENOMEM);
if (!tegra->domain) {
/*
* If IOMMU is disabled, devices address physical memory
* directly.
*/
*dma = virt_to_phys(virt);
return virt;
}
alloc = alloc_iova(&tegra->carveout.domain,
size >> tegra->carveout.shift,
tegra->carveout.limit, true);
if (!alloc) {
err = -EBUSY;
goto free_pages;
}
*dma = iova_dma_addr(&tegra->carveout.domain, alloc);
err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
size, IOMMU_READ | IOMMU_WRITE);
if (err < 0)
goto free_iova;
return virt;
free_iova:
__free_iova(&tegra->carveout.domain, alloc);
free_pages:
free_pages((unsigned long)virt, get_order(size));
return ERR_PTR(err);
}
void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
dma_addr_t dma)
{
if (tegra->domain)
size = iova_align(&tegra->carveout.domain, size);
else
size = PAGE_ALIGN(size);
if (tegra->domain) {
iommu_unmap(tegra->domain, dma, size);
free_iova(&tegra->carveout.domain,
iova_pfn(&tegra->carveout.domain, dma));
}
free_pages((unsigned long)virt, get_order(size));
}
static int host1x_drm_probe(struct host1x_device *dev) static int host1x_drm_probe(struct host1x_device *dev)
{ {
struct drm_driver *driver = &tegra_drm_driver; struct drm_driver *driver = &tegra_drm_driver;
...@@ -1003,11 +1173,13 @@ static const struct of_device_id host1x_drm_subdevs[] = { ...@@ -1003,11 +1173,13 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra124-sor", }, { .compatible = "nvidia,tegra124-sor", },
{ .compatible = "nvidia,tegra124-hdmi", }, { .compatible = "nvidia,tegra124-hdmi", },
{ .compatible = "nvidia,tegra124-dsi", }, { .compatible = "nvidia,tegra124-dsi", },
{ .compatible = "nvidia,tegra124-vic", },
{ .compatible = "nvidia,tegra132-dsi", }, { .compatible = "nvidia,tegra132-dsi", },
{ .compatible = "nvidia,tegra210-dc", }, { .compatible = "nvidia,tegra210-dc", },
{ .compatible = "nvidia,tegra210-dsi", }, { .compatible = "nvidia,tegra210-dsi", },
{ .compatible = "nvidia,tegra210-sor", }, { .compatible = "nvidia,tegra210-sor", },
{ .compatible = "nvidia,tegra210-sor1", }, { .compatible = "nvidia,tegra210-sor1", },
{ .compatible = "nvidia,tegra210-vic", },
{ /* sentinel */ } { /* sentinel */ }
}; };
...@@ -1029,6 +1201,7 @@ static struct platform_driver * const drivers[] = { ...@@ -1029,6 +1201,7 @@ static struct platform_driver * const drivers[] = {
&tegra_sor_driver, &tegra_sor_driver,
&tegra_gr2d_driver, &tegra_gr2d_driver,
&tegra_gr3d_driver, &tegra_gr3d_driver,
&tegra_vic_driver,
}; };
static int __init host1x_drm_init(void) static int __init host1x_drm_init(void)
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <uapi/drm/tegra_drm.h> #include <uapi/drm/tegra_drm.h>
#include <linux/host1x.h> #include <linux/host1x.h>
#include <linux/iova.h>
#include <linux/of_gpio.h> #include <linux/of_gpio.h>
#include <drm/drmP.h> #include <drm/drmP.h>
...@@ -42,8 +43,15 @@ struct tegra_drm { ...@@ -42,8 +43,15 @@ struct tegra_drm {
struct drm_device *drm; struct drm_device *drm;
struct iommu_domain *domain; struct iommu_domain *domain;
struct mutex mm_lock;
struct drm_mm mm; struct drm_mm mm;
struct {
struct iova_domain domain;
unsigned long shift;
unsigned long limit;
} carveout;
struct mutex clients_lock; struct mutex clients_lock;
struct list_head clients; struct list_head clients;
...@@ -67,7 +75,7 @@ struct tegra_drm_client; ...@@ -67,7 +75,7 @@ struct tegra_drm_client;
struct tegra_drm_context { struct tegra_drm_context {
struct tegra_drm_client *client; struct tegra_drm_client *client;
struct host1x_channel *channel; struct host1x_channel *channel;
struct list_head list; unsigned int id;
}; };
struct tegra_drm_client_ops { struct tegra_drm_client_ops {
...@@ -105,6 +113,10 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra, ...@@ -105,6 +113,10 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm); int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
int tegra_drm_exit(struct tegra_drm *tegra); int tegra_drm_exit(struct tegra_drm *tegra);
void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *iova);
void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
dma_addr_t iova);
struct tegra_dc_soc_info; struct tegra_dc_soc_info;
struct tegra_output; struct tegra_output;
...@@ -283,5 +295,6 @@ extern struct platform_driver tegra_dpaux_driver; ...@@ -283,5 +295,6 @@ extern struct platform_driver tegra_dpaux_driver;
extern struct platform_driver tegra_sor_driver; extern struct platform_driver tegra_sor_driver;
extern struct platform_driver tegra_gr2d_driver; extern struct platform_driver tegra_gr2d_driver;
extern struct platform_driver tegra_gr3d_driver; extern struct platform_driver tegra_gr3d_driver;
extern struct platform_driver tegra_vic_driver;
#endif /* HOST1X_DRM_H */ #endif /* HOST1X_DRM_H */
/*
* Copyright (c) 2015, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/pci_ids.h>
#include <linux/iopoll.h>
#include "falcon.h"
#include "drm.h"
enum falcon_memory {
FALCON_MEMORY_IMEM,
FALCON_MEMORY_DATA,
};
static void falcon_writel(struct falcon *falcon, u32 value, u32 offset)
{
writel(value, falcon->regs + offset);
}
int falcon_wait_idle(struct falcon *falcon)
{
u32 value;
return readl_poll_timeout(falcon->regs + FALCON_IDLESTATE, value,
(value == 0), 10, 100000);
}
static int falcon_dma_wait_idle(struct falcon *falcon)
{
u32 value;
return readl_poll_timeout(falcon->regs + FALCON_DMATRFCMD, value,
(value & FALCON_DMATRFCMD_IDLE), 10, 100000);
}
static int falcon_copy_chunk(struct falcon *falcon,
phys_addr_t base,
unsigned long offset,
enum falcon_memory target)
{
u32 cmd = FALCON_DMATRFCMD_SIZE_256B;
if (target == FALCON_MEMORY_IMEM)
cmd |= FALCON_DMATRFCMD_IMEM;
falcon_writel(falcon, offset, FALCON_DMATRFMOFFS);
falcon_writel(falcon, base, FALCON_DMATRFFBOFFS);
falcon_writel(falcon, cmd, FALCON_DMATRFCMD);
return falcon_dma_wait_idle(falcon);
}
static void falcon_copy_firmware_image(struct falcon *falcon,
const struct firmware *firmware)
{
u32 *firmware_vaddr = falcon->firmware.vaddr;
dma_addr_t daddr;
size_t i;
int err;
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
/* ensure that caches are flushed and falcon can see the firmware */
daddr = dma_map_single(falcon->dev, firmware_vaddr,
falcon->firmware.size, DMA_TO_DEVICE);
err = dma_mapping_error(falcon->dev, daddr);
if (err) {
dev_err(falcon->dev, "failed to map firmware: %d\n", err);
return;
}
dma_sync_single_for_device(falcon->dev, daddr,
falcon->firmware.size, DMA_TO_DEVICE);
dma_unmap_single(falcon->dev, daddr, falcon->firmware.size,
DMA_TO_DEVICE);
}
static int falcon_parse_firmware_image(struct falcon *falcon)
{
struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr;
struct falcon_fw_os_header_v1 *os;
/* endian problems would show up right here */
if (bin->magic != PCI_VENDOR_ID_NVIDIA) {
dev_err(falcon->dev, "incorrect firmware magic\n");
return -EINVAL;
}
/* currently only version 1 is supported */
if (bin->version != 1) {
dev_err(falcon->dev, "unsupported firmware version\n");
return -EINVAL;
}
/* check that the firmware size is consistent */
if (bin->size > falcon->firmware.size) {
dev_err(falcon->dev, "firmware image size inconsistency\n");
return -EINVAL;
}
os = falcon->firmware.vaddr + bin->os_header_offset;
falcon->firmware.bin_data.size = bin->os_size;
falcon->firmware.bin_data.offset = bin->os_data_offset;
falcon->firmware.code.offset = os->code_offset;
falcon->firmware.code.size = os->code_size;
falcon->firmware.data.offset = os->data_offset;
falcon->firmware.data.size = os->data_size;
return 0;
}
int falcon_read_firmware(struct falcon *falcon, const char *name)
{
int err;
/* request_firmware prints error if it fails */
err = request_firmware(&falcon->firmware.firmware, name, falcon->dev);
if (err < 0)
return err;
return 0;
}
int falcon_load_firmware(struct falcon *falcon)
{
const struct firmware *firmware = falcon->firmware.firmware;
int err;
falcon->firmware.size = firmware->size;
/* allocate iova space for the firmware */
falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
&falcon->firmware.paddr);
if (!falcon->firmware.vaddr) {
dev_err(falcon->dev, "dma memory mapping failed\n");
return -ENOMEM;
}
/* copy firmware image into local area. this also ensures endianness */
falcon_copy_firmware_image(falcon, firmware);
/* parse the image data */
err = falcon_parse_firmware_image(falcon);
if (err < 0) {
dev_err(falcon->dev, "failed to parse firmware image\n");
goto err_setup_firmware_image;
}
release_firmware(firmware);
falcon->firmware.firmware = NULL;
return 0;
err_setup_firmware_image:
falcon->ops->free(falcon, falcon->firmware.size,
falcon->firmware.paddr, falcon->firmware.vaddr);
return err;
}
int falcon_init(struct falcon *falcon)
{
/* check mandatory ops */
if (!falcon->ops || !falcon->ops->alloc || !falcon->ops->free)
return -EINVAL;
falcon->firmware.vaddr = NULL;
return 0;
}
void falcon_exit(struct falcon *falcon)
{
if (falcon->firmware.firmware) {
release_firmware(falcon->firmware.firmware);
falcon->firmware.firmware = NULL;
}
if (falcon->firmware.vaddr) {
falcon->ops->free(falcon, falcon->firmware.size,
falcon->firmware.paddr,
falcon->firmware.vaddr);
falcon->firmware.vaddr = NULL;
}
}
int falcon_boot(struct falcon *falcon)
{
unsigned long offset;
int err;
if (!falcon->firmware.vaddr)
return -EINVAL;
falcon_writel(falcon, 0, FALCON_DMACTL);
/* setup the address of the binary data so Falcon can access it later */
falcon_writel(falcon, (falcon->firmware.paddr +
falcon->firmware.bin_data.offset) >> 8,
FALCON_DMATRFBASE);
/* copy the data segment into Falcon internal memory */
for (offset = 0; offset < falcon->firmware.data.size; offset += 256)
falcon_copy_chunk(falcon,
falcon->firmware.data.offset + offset,
offset, FALCON_MEMORY_DATA);
/* copy the first code segment into Falcon internal memory */
falcon_copy_chunk(falcon, falcon->firmware.code.offset,
0, FALCON_MEMORY_IMEM);
/* setup falcon interrupts */
falcon_writel(falcon, FALCON_IRQMSET_EXT(0xff) |
FALCON_IRQMSET_SWGEN1 |
FALCON_IRQMSET_SWGEN0 |
FALCON_IRQMSET_EXTERR |
FALCON_IRQMSET_HALT |
FALCON_IRQMSET_WDTMR,
FALCON_IRQMSET);
falcon_writel(falcon, FALCON_IRQDEST_EXT(0xff) |
FALCON_IRQDEST_SWGEN1 |
FALCON_IRQDEST_SWGEN0 |
FALCON_IRQDEST_EXTERR |
FALCON_IRQDEST_HALT,
FALCON_IRQDEST);
/* enable interface */
falcon_writel(falcon, FALCON_ITFEN_MTHDEN |
FALCON_ITFEN_CTXEN,
FALCON_ITFEN);
/* boot falcon */
falcon_writel(falcon, 0x00000000, FALCON_BOOTVEC);
falcon_writel(falcon, FALCON_CPUCTL_STARTCPU, FALCON_CPUCTL);
err = falcon_wait_idle(falcon);
if (err < 0) {
dev_err(falcon->dev, "Falcon boot failed due to timeout\n");
return err;
}
return 0;
}
void falcon_execute_method(struct falcon *falcon, u32 method, u32 data)
{
falcon_writel(falcon, method >> 2, FALCON_UCLASS_METHOD_OFFSET);
falcon_writel(falcon, data, FALCON_UCLASS_METHOD_DATA);
}
/*
* Copyright (c) 2015, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _FALCON_H_
#define _FALCON_H_
#include <linux/types.h>
#define FALCON_UCLASS_METHOD_OFFSET 0x00000040
#define FALCON_UCLASS_METHOD_DATA 0x00000044
#define FALCON_IRQMSET 0x00001010
#define FALCON_IRQMSET_WDTMR (1 << 1)
#define FALCON_IRQMSET_HALT (1 << 4)
#define FALCON_IRQMSET_EXTERR (1 << 5)
#define FALCON_IRQMSET_SWGEN0 (1 << 6)
#define FALCON_IRQMSET_SWGEN1 (1 << 7)
#define FALCON_IRQMSET_EXT(v) (((v) & 0xff) << 8)
#define FALCON_IRQDEST 0x0000101c
#define FALCON_IRQDEST_HALT (1 << 4)
#define FALCON_IRQDEST_EXTERR (1 << 5)
#define FALCON_IRQDEST_SWGEN0 (1 << 6)
#define FALCON_IRQDEST_SWGEN1 (1 << 7)
#define FALCON_IRQDEST_EXT(v) (((v) & 0xff) << 8)
#define FALCON_ITFEN 0x00001048
#define FALCON_ITFEN_CTXEN (1 << 0)
#define FALCON_ITFEN_MTHDEN (1 << 1)
#define FALCON_IDLESTATE 0x0000104c
#define FALCON_CPUCTL 0x00001100
#define FALCON_CPUCTL_STARTCPU (1 << 1)
#define FALCON_BOOTVEC 0x00001104
#define FALCON_DMACTL 0x0000110c
#define FALCON_DMACTL_DMEM_SCRUBBING (1 << 1)
#define FALCON_DMACTL_IMEM_SCRUBBING (1 << 2)
#define FALCON_DMATRFBASE 0x00001110
#define FALCON_DMATRFMOFFS 0x00001114
#define FALCON_DMATRFCMD 0x00001118
#define FALCON_DMATRFCMD_IDLE (1 << 1)
#define FALCON_DMATRFCMD_IMEM (1 << 4)
#define FALCON_DMATRFCMD_SIZE_256B (6 << 8)
#define FALCON_DMATRFFBOFFS 0x0000111c
struct falcon_fw_bin_header_v1 {
u32 magic; /* 0x10de */
u32 version; /* version of bin format (1) */
u32 size; /* entire image size including this header */
u32 os_header_offset;
u32 os_data_offset;
u32 os_size;
};
struct falcon_fw_os_app_v1 {
u32 offset;
u32 size;
};
struct falcon_fw_os_header_v1 {
u32 code_offset;
u32 code_size;
u32 data_offset;
u32 data_size;
};
struct falcon;
struct falcon_ops {
void *(*alloc)(struct falcon *falcon, size_t size,
dma_addr_t *paddr);
void (*free)(struct falcon *falcon, size_t size,
dma_addr_t paddr, void *vaddr);
};
struct falcon_firmware_section {
unsigned long offset;
size_t size;
};
struct falcon_firmware {
/* Firmware after it is read but not loaded */
const struct firmware *firmware;
/* Raw firmware data */
dma_addr_t paddr;
void *vaddr;
size_t size;
/* Parsed firmware information */
struct falcon_firmware_section bin_data;
struct falcon_firmware_section data;
struct falcon_firmware_section code;
};
struct falcon {
/* Set by falcon client */
struct device *dev;
void __iomem *regs;
const struct falcon_ops *ops;
void *data;
struct falcon_firmware firmware;
};
int falcon_init(struct falcon *falcon);
void falcon_exit(struct falcon *falcon);
int falcon_read_firmware(struct falcon *falcon, const char *firmware_name);
int falcon_load_firmware(struct falcon *falcon);
int falcon_boot(struct falcon *falcon);
void falcon_execute_method(struct falcon *falcon, u32 method, u32 data);
int falcon_wait_idle(struct falcon *falcon);
#endif /* _FALCON_H_ */
...@@ -52,9 +52,26 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer, ...@@ -52,9 +52,26 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
struct tegra_bo_tiling *tiling) struct tegra_bo_tiling *tiling)
{ {
struct tegra_fb *fb = to_tegra_fb(framebuffer); struct tegra_fb *fb = to_tegra_fb(framebuffer);
uint64_t modifier = fb->base.modifier;
switch (fourcc_mod_tegra_mod(modifier)) {
case NV_FORMAT_MOD_TEGRA_TILED:
tiling->mode = TEGRA_BO_TILING_MODE_TILED;
tiling->value = 0;
break;
case NV_FORMAT_MOD_TEGRA_16BX2_BLOCK(0):
tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
tiling->value = fourcc_mod_tegra_param(modifier);
if (tiling->value > 5)
return -EINVAL;
break;
default:
/* TODO: handle YUV formats? */ /* TODO: handle YUV formats? */
*tiling = fb->planes[0]->tiling; *tiling = fb->planes[0]->tiling;
break;
}
return 0; return 0;
} }
......
...@@ -128,12 +128,14 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) ...@@ -128,12 +128,14 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
if (!bo->mm) if (!bo->mm)
return -ENOMEM; return -ENOMEM;
mutex_lock(&tegra->mm_lock);
err = drm_mm_insert_node_generic(&tegra->mm, err = drm_mm_insert_node_generic(&tegra->mm,
bo->mm, bo->gem.size, PAGE_SIZE, 0, 0); bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
if (err < 0) { if (err < 0) {
dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n", dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
err); err);
goto free; goto unlock;
} }
bo->paddr = bo->mm->start; bo->paddr = bo->mm->start;
...@@ -147,11 +149,14 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) ...@@ -147,11 +149,14 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
bo->size = err; bo->size = err;
mutex_unlock(&tegra->mm_lock);
return 0; return 0;
remove: remove:
drm_mm_remove_node(bo->mm); drm_mm_remove_node(bo->mm);
free: unlock:
mutex_unlock(&tegra->mm_lock);
kfree(bo->mm); kfree(bo->mm);
return err; return err;
} }
...@@ -161,8 +166,11 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) ...@@ -161,8 +166,11 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
if (!bo->mm) if (!bo->mm)
return 0; return 0;
mutex_lock(&tegra->mm_lock);
iommu_unmap(tegra->domain, bo->paddr, bo->size); iommu_unmap(tegra->domain, bo->paddr, bo->size);
drm_mm_remove_node(bo->mm); drm_mm_remove_node(bo->mm);
mutex_unlock(&tegra->mm_lock);
kfree(bo->mm); kfree(bo->mm);
return 0; return 0;
......
/*
* Copyright (c) 2015, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <soc/tegra/pmc.h>
#include "drm.h"
#include "falcon.h"
#include "vic.h"
struct vic_config {
const char *firmware;
};
struct vic {
struct falcon falcon;
bool booted;
void __iomem *regs;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct iommu_domain *domain;
struct device *dev;
struct clk *clk;
/* Platform configuration */
const struct vic_config *config;
};
static inline struct vic *to_vic(struct tegra_drm_client *client)
{
return container_of(client, struct vic, client);
}
static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
{
writel(value, vic->regs + offset);
}
static int vic_runtime_resume(struct device *dev)
{
struct vic *vic = dev_get_drvdata(dev);
return clk_prepare_enable(vic->clk);
}
static int vic_runtime_suspend(struct device *dev)
{
struct vic *vic = dev_get_drvdata(dev);
clk_disable_unprepare(vic->clk);
vic->booted = false;
return 0;
}
static int vic_boot(struct vic *vic)
{
u32 fce_ucode_size, fce_bin_data_offset;
void *hdr;
int err = 0;
if (vic->booted)
return 0;
/* setup clockgating registers */
vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
CG_IDLE_CG_EN |
CG_WAKEUP_DLY_CNT(4),
NV_PVIC_MISC_PRI_VIC_CG);
err = falcon_boot(&vic->falcon);
if (err < 0)
return err;
hdr = vic->falcon.firmware.vaddr;
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
hdr = vic->falcon.firmware.vaddr +
*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
falcon_execute_method(&vic->falcon, VIC_SET_APPLICATION_ID, 1);
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
fce_ucode_size);
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
(vic->falcon.firmware.paddr + fce_bin_data_offset)
>> 8);
err = falcon_wait_idle(&vic->falcon);
if (err < 0) {
dev_err(vic->dev,
"failed to set application ID and FCE base\n");
return err;
}
vic->booted = true;
return 0;
}
static void *vic_falcon_alloc(struct falcon *falcon, size_t size,
dma_addr_t *iova)
{
struct tegra_drm *tegra = falcon->data;
return tegra_drm_alloc(tegra, size, iova);
}
static void vic_falcon_free(struct falcon *falcon, size_t size,
dma_addr_t iova, void *va)
{
struct tegra_drm *tegra = falcon->data;
return tegra_drm_free(tegra, size, va, iova);
}
static const struct falcon_ops vic_falcon_ops = {
.alloc = vic_falcon_alloc,
.free = vic_falcon_free
};
static int vic_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
if (tegra->domain) {
err = iommu_attach_device(tegra->domain, vic->dev);
if (err < 0) {
dev_err(vic->dev, "failed to attach to domain: %d\n",
err);
return err;
}
vic->domain = tegra->domain;
}
if (!vic->falcon.data) {
vic->falcon.data = tegra;
err = falcon_load_firmware(&vic->falcon);
if (err < 0)
goto detach_device;
}
vic->channel = host1x_channel_request(client->dev);
if (!vic->channel) {
err = -ENOMEM;
goto detach_device;
}
client->syncpts[0] = host1x_syncpt_request(client->dev, 0);
if (!client->syncpts[0]) {
err = -ENOMEM;
goto free_channel;
}
err = tegra_drm_register_client(tegra, drm);
if (err < 0)
goto free_syncpt;
return 0;
free_syncpt:
host1x_syncpt_free(client->syncpts[0]);
free_channel:
host1x_channel_free(vic->channel);
detach_device:
if (tegra->domain)
iommu_detach_device(tegra->domain, vic->dev);
return err;
}
static int vic_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
err = tegra_drm_unregister_client(tegra, drm);
if (err < 0)
return err;
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_free(vic->channel);
if (vic->domain) {
iommu_detach_device(vic->domain, vic->dev);
vic->domain = NULL;
}
return 0;
}
static const struct host1x_client_ops vic_client_ops = {
.init = vic_init,
.exit = vic_exit,
};
static int vic_open_channel(struct tegra_drm_client *client,
struct tegra_drm_context *context)
{
struct vic *vic = to_vic(client);
int err;
err = pm_runtime_get_sync(vic->dev);
if (err < 0)
return err;
err = vic_boot(vic);
if (err < 0) {
pm_runtime_put(vic->dev);
return err;
}
context->channel = host1x_channel_get(vic->channel);
if (!context->channel) {
pm_runtime_put(vic->dev);
return -ENOMEM;
}
return 0;
}
static void vic_close_channel(struct tegra_drm_context *context)
{
struct vic *vic = to_vic(context->client);
host1x_channel_put(context->channel);
pm_runtime_put(vic->dev);
}
static const struct tegra_drm_client_ops vic_ops = {
.open_channel = vic_open_channel,
.close_channel = vic_close_channel,
.submit = tegra_drm_submit,
};
static const struct vic_config vic_t124_config = {
.firmware = "nvidia/tegra124/vic03_ucode.bin",
};
static const struct vic_config vic_t210_config = {
.firmware = "nvidia/tegra210/vic04_ucode.bin",
};
static const struct of_device_id vic_match[] = {
{ .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
{ .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
{ },
};
static int vic_probe(struct platform_device *pdev)
{
struct vic_config *vic_config = NULL;
struct device *dev = &pdev->dev;
struct host1x_syncpt **syncpts;
struct resource *regs;
const struct of_device_id *match;
struct vic *vic;
int err;
match = of_match_device(vic_match, dev);
vic_config = (struct vic_config *)match->data;
vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
if (!vic)
return -ENOMEM;
syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
if (!syncpts)
return -ENOMEM;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
dev_err(&pdev->dev, "failed to get registers\n");
return -ENXIO;
}
vic->regs = devm_ioremap_resource(dev, regs);
if (IS_ERR(vic->regs))
return PTR_ERR(vic->regs);
vic->clk = devm_clk_get(dev, NULL);
if (IS_ERR(vic->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return PTR_ERR(vic->clk);
}
vic->falcon.dev = dev;
vic->falcon.regs = vic->regs;
vic->falcon.ops = &vic_falcon_ops;
err = falcon_init(&vic->falcon);
if (err < 0)
return err;
err = falcon_read_firmware(&vic->falcon, vic_config->firmware);
if (err < 0)
goto exit_falcon;
platform_set_drvdata(pdev, vic);
INIT_LIST_HEAD(&vic->client.base.list);
vic->client.base.ops = &vic_client_ops;
vic->client.base.dev = dev;
vic->client.base.class = HOST1X_CLASS_VIC;
vic->client.base.syncpts = syncpts;
vic->client.base.num_syncpts = 1;
vic->dev = dev;
vic->config = vic_config;
INIT_LIST_HEAD(&vic->client.list);
vic->client.ops = &vic_ops;
err = host1x_client_register(&vic->client.base);
if (err < 0) {
dev_err(dev, "failed to register host1x client: %d\n", err);
platform_set_drvdata(pdev, NULL);
goto exit_falcon;
}
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
err = vic_runtime_resume(&pdev->dev);
if (err < 0)
goto unregister_client;
}
return 0;
unregister_client:
host1x_client_unregister(&vic->client.base);
exit_falcon:
falcon_exit(&vic->falcon);
return err;
}
static int vic_remove(struct platform_device *pdev)
{
struct vic *vic = platform_get_drvdata(pdev);
int err;
err = host1x_client_unregister(&vic->client.base);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
err);
return err;
}
if (pm_runtime_enabled(&pdev->dev))
pm_runtime_disable(&pdev->dev);
else
vic_runtime_suspend(&pdev->dev);
falcon_exit(&vic->falcon);
return 0;
}
static const struct dev_pm_ops vic_pm_ops = {
SET_RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL)
};
struct platform_driver tegra_vic_driver = {
.driver = {
.name = "tegra-vic",
.of_match_table = vic_match,
.pm = &vic_pm_ops
},
.probe = vic_probe,
.remove = vic_remove,
};
/*
* Copyright (c) 2015, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef TEGRA_VIC_H
#define TEGRA_VIC_H
/* VIC methods */
#define VIC_SET_APPLICATION_ID 0x00000200
#define VIC_SET_FCE_UCODE_SIZE 0x0000071C
#define VIC_SET_FCE_UCODE_OFFSET 0x0000072C
/* VIC registers */
#define NV_PVIC_MISC_PRI_VIC_CG 0x000016d0
#define CG_IDLE_CG_DLY_CNT(val) ((val & 0x3f) << 0)
#define CG_IDLE_CG_EN (1 << 6)
#define CG_WAKEUP_DLY_CNT(val) ((val & 0xf) << 16)
/* Firmware offsets */
#define VIC_UCODE_FCE_HEADER_OFFSET (6*4)
#define VIC_UCODE_FCE_DATA_OFFSET (7*4)
#define FCE_UCODE_SIZE_OFFSET (2*4)
#endif /* TEGRA_VIC_H */
...@@ -267,37 +267,6 @@ static int host1x_device_match(struct device *dev, struct device_driver *drv) ...@@ -267,37 +267,6 @@ static int host1x_device_match(struct device *dev, struct device_driver *drv)
return strcmp(dev_name(dev), drv->name) == 0; return strcmp(dev_name(dev), drv->name) == 0;
} }
static int host1x_device_probe(struct device *dev)
{
struct host1x_driver *driver = to_host1x_driver(dev->driver);
struct host1x_device *device = to_host1x_device(dev);
if (driver->probe)
return driver->probe(device);
return 0;
}
static int host1x_device_remove(struct device *dev)
{
struct host1x_driver *driver = to_host1x_driver(dev->driver);
struct host1x_device *device = to_host1x_device(dev);
if (driver->remove)
return driver->remove(device);
return 0;
}
static void host1x_device_shutdown(struct device *dev)
{
struct host1x_driver *driver = to_host1x_driver(dev->driver);
struct host1x_device *device = to_host1x_device(dev);
if (driver->shutdown)
driver->shutdown(device);
}
static const struct dev_pm_ops host1x_device_pm_ops = { static const struct dev_pm_ops host1x_device_pm_ops = {
.suspend = pm_generic_suspend, .suspend = pm_generic_suspend,
.resume = pm_generic_resume, .resume = pm_generic_resume,
...@@ -310,9 +279,6 @@ static const struct dev_pm_ops host1x_device_pm_ops = { ...@@ -310,9 +279,6 @@ static const struct dev_pm_ops host1x_device_pm_ops = {
struct bus_type host1x_bus_type = { struct bus_type host1x_bus_type = {
.name = "host1x", .name = "host1x",
.match = host1x_device_match, .match = host1x_device_match,
.probe = host1x_device_probe,
.remove = host1x_device_remove,
.shutdown = host1x_device_shutdown,
.pm = &host1x_device_pm_ops, .pm = &host1x_device_pm_ops,
}; };
...@@ -516,6 +482,37 @@ int host1x_unregister(struct host1x *host1x) ...@@ -516,6 +482,37 @@ int host1x_unregister(struct host1x *host1x)
return 0; return 0;
} }
static int host1x_device_probe(struct device *dev)
{
struct host1x_driver *driver = to_host1x_driver(dev->driver);
struct host1x_device *device = to_host1x_device(dev);
if (driver->probe)
return driver->probe(device);
return 0;
}
static int host1x_device_remove(struct device *dev)
{
struct host1x_driver *driver = to_host1x_driver(dev->driver);
struct host1x_device *device = to_host1x_device(dev);
if (driver->remove)
return driver->remove(device);
return 0;
}
static void host1x_device_shutdown(struct device *dev)
{
struct host1x_driver *driver = to_host1x_driver(dev->driver);
struct host1x_device *device = to_host1x_device(dev);
if (driver->shutdown)
driver->shutdown(device);
}
int host1x_driver_register_full(struct host1x_driver *driver, int host1x_driver_register_full(struct host1x_driver *driver,
struct module *owner) struct module *owner)
{ {
...@@ -536,6 +533,9 @@ int host1x_driver_register_full(struct host1x_driver *driver, ...@@ -536,6 +533,9 @@ int host1x_driver_register_full(struct host1x_driver *driver,
driver->driver.bus = &host1x_bus_type; driver->driver.bus = &host1x_bus_type;
driver->driver.owner = owner; driver->driver.owner = owner;
driver->driver.probe = host1x_device_probe;
driver->driver.remove = host1x_device_remove;
driver->driver.shutdown = host1x_device_shutdown;
return driver_register(&driver->driver); return driver_register(&driver->driver);
} }
......
...@@ -51,9 +51,15 @@ static void host1x_pushbuffer_destroy(struct push_buffer *pb) ...@@ -51,9 +51,15 @@ static void host1x_pushbuffer_destroy(struct push_buffer *pb)
struct host1x_cdma *cdma = pb_to_cdma(pb); struct host1x_cdma *cdma = pb_to_cdma(pb);
struct host1x *host1x = cdma_to_host1x(cdma); struct host1x *host1x = cdma_to_host1x(cdma);
if (pb->phys != 0) if (!pb->phys)
dma_free_wc(host1x->dev, pb->size_bytes + 4, pb->mapped, return;
pb->phys);
if (host1x->domain) {
iommu_unmap(host1x->domain, pb->dma, pb->alloc_size);
free_iova(&host1x->iova, iova_pfn(&host1x->iova, pb->dma));
}
dma_free_wc(host1x->dev, pb->alloc_size, pb->mapped, pb->phys);
pb->mapped = NULL; pb->mapped = NULL;
pb->phys = 0; pb->phys = 0;
...@@ -66,28 +72,64 @@ static int host1x_pushbuffer_init(struct push_buffer *pb) ...@@ -66,28 +72,64 @@ static int host1x_pushbuffer_init(struct push_buffer *pb)
{ {
struct host1x_cdma *cdma = pb_to_cdma(pb); struct host1x_cdma *cdma = pb_to_cdma(pb);
struct host1x *host1x = cdma_to_host1x(cdma); struct host1x *host1x = cdma_to_host1x(cdma);
struct iova *alloc;
u32 size;
int err;
pb->mapped = NULL; pb->mapped = NULL;
pb->phys = 0; pb->phys = 0;
pb->size_bytes = HOST1X_PUSHBUFFER_SLOTS * 8; pb->size = HOST1X_PUSHBUFFER_SLOTS * 8;
size = pb->size + 4;
/* initialize buffer pointers */ /* initialize buffer pointers */
pb->fence = pb->size_bytes - 8; pb->fence = pb->size - 8;
pb->pos = 0; pb->pos = 0;
/* allocate and map pushbuffer memory */ if (host1x->domain) {
pb->mapped = dma_alloc_wc(host1x->dev, pb->size_bytes + 4, &pb->phys, unsigned long shift;
size = iova_align(&host1x->iova, size);
pb->mapped = dma_alloc_wc(host1x->dev, size, &pb->phys,
GFP_KERNEL); GFP_KERNEL);
if (!pb->mapped) if (!pb->mapped)
goto fail; return -ENOMEM;
shift = iova_shift(&host1x->iova);
alloc = alloc_iova(&host1x->iova, size >> shift,
host1x->iova_end >> shift, true);
if (!alloc) {
err = -ENOMEM;
goto iommu_free_mem;
}
pb->dma = iova_dma_addr(&host1x->iova, alloc);
err = iommu_map(host1x->domain, pb->dma, pb->phys, size,
IOMMU_READ);
if (err)
goto iommu_free_iova;
} else {
pb->mapped = dma_alloc_wc(host1x->dev, size, &pb->phys,
GFP_KERNEL);
if (!pb->mapped)
return -ENOMEM;
pb->dma = pb->phys;
}
pb->alloc_size = size;
host1x_hw_pushbuffer_init(host1x, pb); host1x_hw_pushbuffer_init(host1x, pb);
return 0; return 0;
fail: iommu_free_iova:
host1x_pushbuffer_destroy(pb); __free_iova(&host1x->iova, alloc);
return -ENOMEM; iommu_free_mem:
dma_free_wc(host1x->dev, pb->alloc_size, pb->mapped, pb->phys);
return err;
} }
/* /*
...@@ -101,7 +143,7 @@ static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2) ...@@ -101,7 +143,7 @@ static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
WARN_ON(pb->pos == pb->fence); WARN_ON(pb->pos == pb->fence);
*(p++) = op1; *(p++) = op1;
*(p++) = op2; *(p++) = op2;
pb->pos = (pb->pos + 8) & (pb->size_bytes - 1); pb->pos = (pb->pos + 8) & (pb->size - 1);
} }
/* /*
...@@ -111,7 +153,7 @@ static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2) ...@@ -111,7 +153,7 @@ static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots) static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
{ {
/* Advance the next write position */ /* Advance the next write position */
pb->fence = (pb->fence + slots * 8) & (pb->size_bytes - 1); pb->fence = (pb->fence + slots * 8) & (pb->size - 1);
} }
/* /*
...@@ -119,7 +161,7 @@ static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots) ...@@ -119,7 +161,7 @@ static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
*/ */
static u32 host1x_pushbuffer_space(struct push_buffer *pb) static u32 host1x_pushbuffer_space(struct push_buffer *pb)
{ {
return ((pb->fence - pb->pos) & (pb->size_bytes - 1)) / 8; return ((pb->fence - pb->pos) & (pb->size - 1)) / 8;
} }
/* /*
......
...@@ -43,10 +43,12 @@ struct host1x_job; ...@@ -43,10 +43,12 @@ struct host1x_job;
struct push_buffer { struct push_buffer {
void *mapped; /* mapped pushbuffer memory */ void *mapped; /* mapped pushbuffer memory */
dma_addr_t phys; /* physical address of pushbuffer */ dma_addr_t dma; /* device address of pushbuffer */
phys_addr_t phys; /* physical address of pushbuffer */
u32 fence; /* index we've written */ u32 fence; /* index we've written */
u32 pos; /* index to write to */ u32 pos; /* index to write to */
u32 size_bytes; u32 size;
u32 alloc_size;
}; };
struct buffer_timeout { struct buffer_timeout {
......
...@@ -16,23 +16,25 @@ ...@@ -16,23 +16,25 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/io.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/slab.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/host1x.h> #include <trace/events/host1x.h>
#undef CREATE_TRACE_POINTS
#include "bus.h" #include "bus.h"
#include "dev.h"
#include "intr.h"
#include "channel.h" #include "channel.h"
#include "debug.h" #include "debug.h"
#include "dev.h"
#include "intr.h"
#include "hw/host1x01.h" #include "hw/host1x01.h"
#include "hw/host1x02.h" #include "hw/host1x02.h"
#include "hw/host1x04.h" #include "hw/host1x04.h"
...@@ -168,22 +170,56 @@ static int host1x_probe(struct platform_device *pdev) ...@@ -168,22 +170,56 @@ static int host1x_probe(struct platform_device *pdev)
return err; return err;
} }
host->rst = devm_reset_control_get(&pdev->dev, "host1x");
if (IS_ERR(host->rst)) {
err = PTR_ERR(host->clk);
dev_err(&pdev->dev, "failed to get reset: %d\n", err);
return err;
}
if (iommu_present(&platform_bus_type)) {
struct iommu_domain_geometry *geometry;
unsigned long order;
host->domain = iommu_domain_alloc(&platform_bus_type);
if (!host->domain)
return -ENOMEM;
err = iommu_attach_device(host->domain, &pdev->dev);
if (err)
goto fail_free_domain;
geometry = &host->domain->geometry;
order = __ffs(host->domain->pgsize_bitmap);
init_iova_domain(&host->iova, 1UL << order,
geometry->aperture_start >> order,
geometry->aperture_end >> order);
host->iova_end = geometry->aperture_end;
}
err = host1x_channel_list_init(host); err = host1x_channel_list_init(host);
if (err) { if (err) {
dev_err(&pdev->dev, "failed to initialize channel list\n"); dev_err(&pdev->dev, "failed to initialize channel list\n");
return err; goto fail_detach_device;
} }
err = clk_prepare_enable(host->clk); err = clk_prepare_enable(host->clk);
if (err < 0) { if (err < 0) {
dev_err(&pdev->dev, "failed to enable clock\n"); dev_err(&pdev->dev, "failed to enable clock\n");
return err; goto fail_detach_device;
}
err = reset_control_deassert(host->rst);
if (err < 0) {
dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
goto fail_unprepare_disable;
} }
err = host1x_syncpt_init(host); err = host1x_syncpt_init(host);
if (err) { if (err) {
dev_err(&pdev->dev, "failed to initialize syncpts\n"); dev_err(&pdev->dev, "failed to initialize syncpts\n");
goto fail_unprepare_disable; goto fail_reset_assert;
} }
err = host1x_intr_init(host, syncpt_irq); err = host1x_intr_init(host, syncpt_irq);
...@@ -204,8 +240,19 @@ static int host1x_probe(struct platform_device *pdev) ...@@ -204,8 +240,19 @@ static int host1x_probe(struct platform_device *pdev)
host1x_intr_deinit(host); host1x_intr_deinit(host);
fail_deinit_syncpt: fail_deinit_syncpt:
host1x_syncpt_deinit(host); host1x_syncpt_deinit(host);
fail_reset_assert:
reset_control_assert(host->rst);
fail_unprepare_disable: fail_unprepare_disable:
clk_disable_unprepare(host->clk); clk_disable_unprepare(host->clk);
fail_detach_device:
if (host->domain) {
put_iova_domain(&host->iova);
iommu_detach_device(host->domain, &pdev->dev);
}
fail_free_domain:
if (host->domain)
iommu_domain_free(host->domain);
return err; return err;
} }
...@@ -216,8 +263,15 @@ static int host1x_remove(struct platform_device *pdev) ...@@ -216,8 +263,15 @@ static int host1x_remove(struct platform_device *pdev)
host1x_unregister(host); host1x_unregister(host);
host1x_intr_deinit(host); host1x_intr_deinit(host);
host1x_syncpt_deinit(host); host1x_syncpt_deinit(host);
reset_control_assert(host->rst);
clk_disable_unprepare(host->clk); clk_disable_unprepare(host->clk);
if (host->domain) {
put_iova_domain(&host->iova);
iommu_detach_device(host->domain, &pdev->dev);
iommu_domain_free(host->domain);
}
return 0; return 0;
} }
......
...@@ -17,14 +17,17 @@ ...@@ -17,14 +17,17 @@
#ifndef HOST1X_DEV_H #ifndef HOST1X_DEV_H
#define HOST1X_DEV_H #define HOST1X_DEV_H
#include <linux/platform_device.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/iommu.h>
#include <linux/iova.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include "cdma.h"
#include "channel.h" #include "channel.h"
#include "syncpt.h"
#include "intr.h" #include "intr.h"
#include "cdma.h"
#include "job.h" #include "job.h"
#include "syncpt.h"
struct host1x_syncpt; struct host1x_syncpt;
struct host1x_syncpt_base; struct host1x_syncpt_base;
...@@ -107,6 +110,11 @@ struct host1x { ...@@ -107,6 +110,11 @@ struct host1x {
struct host1x_syncpt_base *bases; struct host1x_syncpt_base *bases;
struct device *dev; struct device *dev;
struct clk *clk; struct clk *clk;
struct reset_control *rst;
struct iommu_domain *domain;
struct iova_domain iova;
dma_addr_t iova_end;
struct mutex intr_mutex; struct mutex intr_mutex;
int intr_syncpt_irq; int intr_syncpt_irq;
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
*/ */
static void push_buffer_init(struct push_buffer *pb) static void push_buffer_init(struct push_buffer *pb)
{ {
*(u32 *)(pb->mapped + pb->size_bytes) = host1x_opcode_restart(0); *(u32 *)(pb->mapped + pb->size) = host1x_opcode_restart(0);
} }
/* /*
...@@ -55,8 +55,8 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr, ...@@ -55,8 +55,8 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
*(p++) = HOST1X_OPCODE_NOP; *(p++) = HOST1X_OPCODE_NOP;
*(p++) = HOST1X_OPCODE_NOP; *(p++) = HOST1X_OPCODE_NOP;
dev_dbg(host1x->dev, "%s: NOP at %pad+%#x\n", __func__, dev_dbg(host1x->dev, "%s: NOP at %pad+%#x\n", __func__,
&pb->phys, getptr); &pb->dma, getptr);
getptr = (getptr + 8) & (pb->size_bytes - 1); getptr = (getptr + 8) & (pb->size - 1);
} }
wmb(); wmb();
...@@ -78,10 +78,9 @@ static void cdma_start(struct host1x_cdma *cdma) ...@@ -78,10 +78,9 @@ static void cdma_start(struct host1x_cdma *cdma)
HOST1X_CHANNEL_DMACTRL); HOST1X_CHANNEL_DMACTRL);
/* set base, put and end pointer */ /* set base, put and end pointer */
host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART); host1x_ch_writel(ch, cdma->push_buffer.dma, HOST1X_CHANNEL_DMASTART);
host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT); host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
host1x_ch_writel(ch, cdma->push_buffer.phys + host1x_ch_writel(ch, cdma->push_buffer.dma + cdma->push_buffer.size + 4,
cdma->push_buffer.size_bytes + 4,
HOST1X_CHANNEL_DMAEND); HOST1X_CHANNEL_DMAEND);
/* reset GET */ /* reset GET */
...@@ -115,9 +114,8 @@ static void cdma_timeout_restart(struct host1x_cdma *cdma, u32 getptr) ...@@ -115,9 +114,8 @@ static void cdma_timeout_restart(struct host1x_cdma *cdma, u32 getptr)
HOST1X_CHANNEL_DMACTRL); HOST1X_CHANNEL_DMACTRL);
/* set base, end pointer (all of memory) */ /* set base, end pointer (all of memory) */
host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART); host1x_ch_writel(ch, cdma->push_buffer.dma, HOST1X_CHANNEL_DMASTART);
host1x_ch_writel(ch, cdma->push_buffer.phys + host1x_ch_writel(ch, cdma->push_buffer.dma + cdma->push_buffer.size,
cdma->push_buffer.size_bytes,
HOST1X_CHANNEL_DMAEND); HOST1X_CHANNEL_DMAEND);
/* set GET, by loading the value in PUT (then reset GET) */ /* set GET, by loading the value in PUT (then reset GET) */
......
...@@ -174,9 +174,10 @@ static int do_waitchks(struct host1x_job *job, struct host1x *host, ...@@ -174,9 +174,10 @@ static int do_waitchks(struct host1x_job *job, struct host1x *host,
return 0; return 0;
} }
static unsigned int pin_job(struct host1x_job *job) static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{ {
unsigned int i; unsigned int i;
int err;
job->num_unpins = 0; job->num_unpins = 0;
...@@ -186,12 +187,16 @@ static unsigned int pin_job(struct host1x_job *job) ...@@ -186,12 +187,16 @@ static unsigned int pin_job(struct host1x_job *job)
dma_addr_t phys_addr; dma_addr_t phys_addr;
reloc->target.bo = host1x_bo_get(reloc->target.bo); reloc->target.bo = host1x_bo_get(reloc->target.bo);
if (!reloc->target.bo) if (!reloc->target.bo) {
err = -EINVAL;
goto unpin; goto unpin;
}
phys_addr = host1x_bo_pin(reloc->target.bo, &sgt); phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
if (!phys_addr) if (!phys_addr) {
err = -EINVAL;
goto unpin; goto unpin;
}
job->addr_phys[job->num_unpins] = phys_addr; job->addr_phys[job->num_unpins] = phys_addr;
job->unpins[job->num_unpins].bo = reloc->target.bo; job->unpins[job->num_unpins].bo = reloc->target.bo;
...@@ -201,28 +206,67 @@ static unsigned int pin_job(struct host1x_job *job) ...@@ -201,28 +206,67 @@ static unsigned int pin_job(struct host1x_job *job)
for (i = 0; i < job->num_gathers; i++) { for (i = 0; i < job->num_gathers; i++) {
struct host1x_job_gather *g = &job->gathers[i]; struct host1x_job_gather *g = &job->gathers[i];
size_t gather_size = 0;
struct scatterlist *sg;
struct sg_table *sgt; struct sg_table *sgt;
dma_addr_t phys_addr; dma_addr_t phys_addr;
unsigned long shift;
struct iova *alloc;
unsigned int j;
g->bo = host1x_bo_get(g->bo); g->bo = host1x_bo_get(g->bo);
if (!g->bo) if (!g->bo) {
err = -EINVAL;
goto unpin; goto unpin;
}
phys_addr = host1x_bo_pin(g->bo, &sgt); phys_addr = host1x_bo_pin(g->bo, &sgt);
if (!phys_addr) if (!phys_addr) {
err = -EINVAL;
goto unpin;
}
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
for_each_sg(sgt->sgl, sg, sgt->nents, j)
gather_size += sg->length;
gather_size = iova_align(&host->iova, gather_size);
shift = iova_shift(&host->iova);
alloc = alloc_iova(&host->iova, gather_size >> shift,
host->iova_end >> shift, true);
if (!alloc) {
err = -ENOMEM;
goto unpin;
}
err = iommu_map_sg(host->domain,
iova_dma_addr(&host->iova, alloc),
sgt->sgl, sgt->nents, IOMMU_READ);
if (err == 0) {
__free_iova(&host->iova, alloc);
err = -EINVAL;
goto unpin; goto unpin;
}
job->addr_phys[job->num_unpins] =
iova_dma_addr(&host->iova, alloc);
job->unpins[job->num_unpins].size = gather_size;
} else {
job->addr_phys[job->num_unpins] = phys_addr; job->addr_phys[job->num_unpins] = phys_addr;
}
job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
job->unpins[job->num_unpins].bo = g->bo; job->unpins[job->num_unpins].bo = g->bo;
job->unpins[job->num_unpins].sgt = sgt; job->unpins[job->num_unpins].sgt = sgt;
job->num_unpins++; job->num_unpins++;
} }
return job->num_unpins; return 0;
unpin: unpin:
host1x_job_unpin(job); host1x_job_unpin(job);
return 0; return err;
} }
static int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf) static int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
...@@ -525,8 +569,8 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev) ...@@ -525,8 +569,8 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
host1x_syncpt_load(host->syncpt + i); host1x_syncpt_load(host->syncpt + i);
/* pin memory */ /* pin memory */
err = pin_job(job); err = pin_job(host, job);
if (!err) if (err)
goto out; goto out;
/* patch gathers */ /* patch gathers */
...@@ -572,11 +616,19 @@ EXPORT_SYMBOL(host1x_job_pin); ...@@ -572,11 +616,19 @@ EXPORT_SYMBOL(host1x_job_pin);
void host1x_job_unpin(struct host1x_job *job) void host1x_job_unpin(struct host1x_job *job)
{ {
struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
unsigned int i; unsigned int i;
for (i = 0; i < job->num_unpins; i++) { for (i = 0; i < job->num_unpins; i++) {
struct host1x_job_unpin_data *unpin = &job->unpins[i]; struct host1x_job_unpin_data *unpin = &job->unpins[i];
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
iommu_unmap(host->domain, job->addr_phys[i],
unpin->size);
free_iova(&host->iova,
iova_pfn(&host->iova, job->addr_phys[i]));
}
host1x_bo_unpin(unpin->bo, unpin->sgt); host1x_bo_unpin(unpin->bo, unpin->sgt);
host1x_bo_put(unpin->bo); host1x_bo_put(unpin->bo);
} }
......
...@@ -44,6 +44,7 @@ struct host1x_waitchk { ...@@ -44,6 +44,7 @@ struct host1x_waitchk {
struct host1x_job_unpin_data { struct host1x_job_unpin_data {
struct host1x_bo *bo; struct host1x_bo *bo;
struct sg_table *sgt; struct sg_table *sgt;
size_t size;
}; };
/* /*
......
...@@ -484,7 +484,7 @@ unsigned int host1x_syncpt_nb_mlocks(struct host1x *host) ...@@ -484,7 +484,7 @@ unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id) struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
{ {
if (host->info->nb_pts < id) if (id >= host->info->nb_pts)
return NULL; return NULL;
return host->syncpt + id; return host->syncpt + id;
......
...@@ -100,6 +100,34 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) ...@@ -100,6 +100,34 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
} }
} }
/* Insert the iova into domain rbtree by holding writer lock */
static void
iova_insert_rbtree(struct rb_root *root, struct iova *iova,
struct rb_node *start)
{
struct rb_node **new, *parent = NULL;
new = (start) ? &start : &(root->rb_node);
/* Figure out where to put new node */
while (*new) {
struct iova *this = rb_entry(*new, struct iova, node);
parent = *new;
if (iova->pfn_lo < this->pfn_lo)
new = &((*new)->rb_left);
else if (iova->pfn_lo > this->pfn_lo)
new = &((*new)->rb_right);
else {
WARN_ON(1); /* this should not happen */
return;
}
}
/* Add new node and rebalance tree. */
rb_link_node(&iova->node, parent, new);
rb_insert_color(&iova->node, root);
}
/* /*
* Computes the padding size required, to make the start address * Computes the padding size required, to make the start address
* naturally aligned on the power-of-two order of its size * naturally aligned on the power-of-two order of its size
...@@ -157,35 +185,8 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, ...@@ -157,35 +185,8 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
new->pfn_lo = limit_pfn - (size + pad_size) + 1; new->pfn_lo = limit_pfn - (size + pad_size) + 1;
new->pfn_hi = new->pfn_lo + size - 1; new->pfn_hi = new->pfn_lo + size - 1;
/* Insert the new_iova into domain rbtree by holding writer lock */ /* If we have 'prev', it's a valid place to start the insertion. */
/* Add new node and rebalance tree. */ iova_insert_rbtree(&iovad->rbroot, new, prev);
{
struct rb_node **entry, *parent = NULL;
/* If we have 'prev', it's a valid place to start the
insertion. Otherwise, start from the root. */
if (prev)
entry = &prev;
else
entry = &iovad->rbroot.rb_node;
/* Figure out where to put new node */
while (*entry) {
struct iova *this = rb_entry(*entry, struct iova, node);
parent = *entry;
if (new->pfn_lo < this->pfn_lo)
entry = &((*entry)->rb_left);
else if (new->pfn_lo > this->pfn_lo)
entry = &((*entry)->rb_right);
else
BUG(); /* this should not happen */
}
/* Add new node and rebalance tree. */
rb_link_node(&new->node, parent, entry);
rb_insert_color(&new->node, &iovad->rbroot);
}
__cached_rbnode_insert_update(iovad, saved_pfn, new); __cached_rbnode_insert_update(iovad, saved_pfn, new);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
...@@ -194,28 +195,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, ...@@ -194,28 +195,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
return 0; return 0;
} }
static void
iova_insert_rbtree(struct rb_root *root, struct iova *iova)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
/* Figure out where to put new node */
while (*new) {
struct iova *this = rb_entry(*new, struct iova, node);
parent = *new;
if (iova->pfn_lo < this->pfn_lo)
new = &((*new)->rb_left);
else if (iova->pfn_lo > this->pfn_lo)
new = &((*new)->rb_right);
else
BUG(); /* this should not happen */
}
/* Add new node and rebalance tree. */
rb_link_node(&iova->node, parent, new);
rb_insert_color(&iova->node, root);
}
static struct kmem_cache *iova_cache; static struct kmem_cache *iova_cache;
static unsigned int iova_cache_users; static unsigned int iova_cache_users;
static DEFINE_MUTEX(iova_cache_mutex); static DEFINE_MUTEX(iova_cache_mutex);
...@@ -505,7 +484,7 @@ __insert_new_range(struct iova_domain *iovad, ...@@ -505,7 +484,7 @@ __insert_new_range(struct iova_domain *iovad,
iova = alloc_and_init_iova(pfn_lo, pfn_hi); iova = alloc_and_init_iova(pfn_lo, pfn_hi);
if (iova) if (iova)
iova_insert_rbtree(&iovad->rbroot, iova); iova_insert_rbtree(&iovad->rbroot, iova, NULL);
return iova; return iova;
} }
...@@ -612,11 +591,11 @@ split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, ...@@ -612,11 +591,11 @@ split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
rb_erase(&iova->node, &iovad->rbroot); rb_erase(&iova->node, &iovad->rbroot);
if (prev) { if (prev) {
iova_insert_rbtree(&iovad->rbroot, prev); iova_insert_rbtree(&iovad->rbroot, prev, NULL);
iova->pfn_lo = pfn_lo; iova->pfn_lo = pfn_lo;
} }
if (next) { if (next) {
iova_insert_rbtree(&iovad->rbroot, next); iova_insert_rbtree(&iovad->rbroot, next, NULL);
iova->pfn_hi = pfn_hi; iova->pfn_hi = pfn_hi;
} }
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
......
...@@ -26,6 +26,7 @@ enum host1x_class { ...@@ -26,6 +26,7 @@ enum host1x_class {
HOST1X_CLASS_HOST1X = 0x1, HOST1X_CLASS_HOST1X = 0x1,
HOST1X_CLASS_GR2D = 0x51, HOST1X_CLASS_GR2D = 0x51,
HOST1X_CLASS_GR2D_SB = 0x52, HOST1X_CLASS_GR2D_SB = 0x52,
HOST1X_CLASS_VIC = 0x5D,
HOST1X_CLASS_GR3D = 0x60, HOST1X_CLASS_GR3D = 0x60,
}; };
......
...@@ -82,6 +82,7 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) ...@@ -82,6 +82,7 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad); return iova >> iova_shift(iovad);
} }
#if IS_ENABLED(CONFIG_IOMMU_IOVA)
int iova_cache_get(void); int iova_cache_get(void);
void iova_cache_put(void); void iova_cache_put(void);
...@@ -106,5 +107,95 @@ void put_iova_domain(struct iova_domain *iovad); ...@@ -106,5 +107,95 @@ void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad, struct iova *split_and_remove_iova(struct iova_domain *iovad,
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
#else
static inline int iova_cache_get(void)
{
return -ENOTSUPP;
}
static inline void iova_cache_put(void)
{
}
static inline struct iova *alloc_iova_mem(void)
{
return NULL;
}
static inline void free_iova_mem(struct iova *iova)
{
}
static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
{
}
static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
{
}
static inline struct iova *alloc_iova(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn,
bool size_aligned)
{
return NULL;
}
static inline void free_iova_fast(struct iova_domain *iovad,
unsigned long pfn,
unsigned long size)
{
}
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn)
{
return 0;
}
static inline struct iova *reserve_iova(struct iova_domain *iovad,
unsigned long pfn_lo,
unsigned long pfn_hi)
{
return NULL;
}
static inline void copy_reserved_iova(struct iova_domain *from,
struct iova_domain *to)
{
}
static inline void init_iova_domain(struct iova_domain *iovad,
unsigned long granule,
unsigned long start_pfn,
unsigned long pfn_32bit)
{
}
static inline struct iova *find_iova(struct iova_domain *iovad,
unsigned long pfn)
{
return NULL;
}
static inline void put_iova_domain(struct iova_domain *iovad)
{
}
static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
struct iova *iova,
unsigned long pfn_lo,
unsigned long pfn_hi)
{
return NULL;
}
static inline void free_cpu_cached_iovas(unsigned int cpu,
struct iova_domain *iovad)
{
}
#endif
#endif #endif
...@@ -306,6 +306,51 @@ extern "C" { ...@@ -306,6 +306,51 @@ extern "C" {
*/ */
#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4) #define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
/* NVIDIA Tegra frame buffer modifiers */
/*
* Some modifiers take parameters, for example the number of vertical GOBs in
* a block. Reserve the lower 32 bits for parameters
*/
#define __fourcc_mod_tegra_mode_shift 32
#define fourcc_mod_tegra_code(val, params) \
fourcc_mod_code(NV, ((((__u64)val) << __fourcc_mod_tegra_mode_shift) | params))
#define fourcc_mod_tegra_mod(m) \
(m & ~((1ULL << __fourcc_mod_tegra_mode_shift) - 1))
#define fourcc_mod_tegra_param(m) \
(m & ((1ULL << __fourcc_mod_tegra_mode_shift) - 1))
/*
* Tegra Tiled Layout, used by Tegra 2, 3 and 4.
*
* Pixels are arranged in simple tiles of 16 x 16 bytes.
*/
#define NV_FORMAT_MOD_TEGRA_TILED fourcc_mod_tegra_code(1, 0)
/*
* Tegra 16Bx2 Block Linear layout, used by TK1/TX1
*
* Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked
* vertically by a power of 2 (1 to 32 GOBs) to form a block.
*
* Within a GOB, data is ordered as 16B x 2 lines sectors laid in Z-shape.
*
* Parameter 'v' is the log2 encoding of the number of GOBs stacked vertically.
* Valid values are:
*
* 0 == ONE_GOB
* 1 == TWO_GOBS
* 2 == FOUR_GOBS
* 3 == EIGHT_GOBS
* 4 == SIXTEEN_GOBS
* 5 == THIRTYTWO_GOBS
*
* Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
* in full detail.
*/
#define NV_FORMAT_MOD_TEGRA_16BX2_BLOCK(v) fourcc_mod_tegra_code(2, v)
#if defined(__cplusplus) #if defined(__cplusplus)
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment