Commit 43a70661 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/tegra: merge platform setup from nouveau drm

The copyright header in nvkm/engine/device/platform.c has been replaced
with the NVIDIA one from drm/nouveau_platform.c, as most of the actual
code is now theirs.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 340b0e7c
......@@ -25,10 +25,13 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/agp_backend.h>
#include <linux/reset.h>
#include <linux/iommu.h>
#include <asm/unaligned.h>
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
#ifndef ioread32_native
#ifdef __BIG_ENDIAN
......
......@@ -139,8 +139,6 @@ struct nvkm_device {
struct nvkm_sw *sw;
struct nvkm_engine *vic;
struct nvkm_engine *vp;
struct nouveau_platform_gpu *gpu;
};
struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int index);
......
#ifndef __NVKM_DEVICE_TEGRA_H__
#define __NVKM_DEVICE_TEGRA_H__
#include <core/device.h>
#include <core/mm.h>
struct nvkm_device_tegra {
struct nvkm_device device;
struct platform_device *pdev;
int irq;
struct reset_control *rst;
struct clk *clk;
struct clk *clk_pwr;
struct regulator *vdd;
struct {
/*
* Protects accesses to mm from subsystems
*/
struct mutex mutex;
struct nvkm_mm mm;
struct iommu_domain *domain;
unsigned long pgshift;
} iommu;
int gpu_speedo;
};
int nvkm_device_tegra_new(struct platform_device *,
......
......@@ -19,234 +19,32 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
#include <linux/iommu.h>
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
#include "nouveau_drm.h"
#include "nouveau_platform.h"
static int nouveau_platform_power_up(struct nouveau_platform_gpu *gpu)
{
int err;
err = regulator_enable(gpu->vdd);
if (err)
goto err_power;
err = clk_prepare_enable(gpu->clk);
if (err)
goto err_clk;
err = clk_prepare_enable(gpu->clk_pwr);
if (err)
goto err_clk_pwr;
clk_set_rate(gpu->clk_pwr, 204000000);
udelay(10);
reset_control_assert(gpu->rst);
udelay(10);
err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
if (err)
goto err_clamp;
udelay(10);
reset_control_deassert(gpu->rst);
udelay(10);
return 0;
err_clamp:
clk_disable_unprepare(gpu->clk_pwr);
err_clk_pwr:
clk_disable_unprepare(gpu->clk);
err_clk:
regulator_disable(gpu->vdd);
err_power:
return err;
}
static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
{
int err;
reset_control_assert(gpu->rst);
udelay(10);
clk_disable_unprepare(gpu->clk_pwr);
clk_disable_unprepare(gpu->clk);
udelay(10);
err = regulator_disable(gpu->vdd);
if (err)
return err;
return 0;
}
#if IS_ENABLED(CONFIG_IOMMU_API)
static void nouveau_platform_probe_iommu(struct device *dev,
struct nouveau_platform_gpu *gpu)
{
int err;
unsigned long pgsize_bitmap;
mutex_init(&gpu->iommu.mutex);
if (iommu_present(&platform_bus_type)) {
gpu->iommu.domain = iommu_domain_alloc(&platform_bus_type);
if (IS_ERR(gpu->iommu.domain))
goto error;
/*
* A IOMMU is only usable if it supports page sizes smaller
* or equal to the system's PAGE_SIZE, with a preference if
* both are equal.
*/
pgsize_bitmap = gpu->iommu.domain->ops->pgsize_bitmap;
if (pgsize_bitmap & PAGE_SIZE) {
gpu->iommu.pgshift = PAGE_SHIFT;
} else {
gpu->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
if (gpu->iommu.pgshift == 0) {
dev_warn(dev, "unsupported IOMMU page size\n");
goto free_domain;
}
gpu->iommu.pgshift -= 1;
}
err = iommu_attach_device(gpu->iommu.domain, dev);
if (err)
goto free_domain;
err = nvkm_mm_init(&gpu->iommu._mm, 0,
(1ULL << 40) >> gpu->iommu.pgshift, 1);
if (err)
goto detach_device;
gpu->iommu.mm = &gpu->iommu._mm;
}
return;
detach_device:
iommu_detach_device(gpu->iommu.domain, dev);
free_domain:
iommu_domain_free(gpu->iommu.domain);
error:
gpu->iommu.domain = NULL;
gpu->iommu.pgshift = 0;
dev_err(dev, "cannot initialize IOMMU MM\n");
}
static void nouveau_platform_remove_iommu(struct device *dev,
struct nouveau_platform_gpu *gpu)
{
if (gpu->iommu.domain) {
nvkm_mm_fini(&gpu->iommu._mm);
iommu_detach_device(gpu->iommu.domain, dev);
iommu_domain_free(gpu->iommu.domain);
}
}
#else
static void nouveau_platform_probe_iommu(struct device *dev,
struct nouveau_platform_gpu *gpu)
{
}
static void nouveau_platform_remove_iommu(struct device *dev,
struct nouveau_platform_gpu *gpu)
{
}
#endif
static int nouveau_platform_probe(struct platform_device *pdev)
{
struct nouveau_platform_gpu *gpu;
struct nvkm_device *device;
struct drm_device *drm;
int err;
gpu = devm_kzalloc(&pdev->dev, sizeof(*gpu), GFP_KERNEL);
if (!gpu)
return -ENOMEM;
gpu->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(gpu->vdd))
return PTR_ERR(gpu->vdd);
gpu->rst = devm_reset_control_get(&pdev->dev, "gpu");
if (IS_ERR(gpu->rst))
return PTR_ERR(gpu->rst);
gpu->clk = devm_clk_get(&pdev->dev, "gpu");
if (IS_ERR(gpu->clk))
return PTR_ERR(gpu->clk);
gpu->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
if (IS_ERR(gpu->clk_pwr))
return PTR_ERR(gpu->clk_pwr);
nouveau_platform_probe_iommu(&pdev->dev, gpu);
err = nouveau_platform_power_up(gpu);
if (err)
return err;
int ret;
drm = nouveau_platform_device_create(pdev, &device);
if (IS_ERR(drm)) {
err = PTR_ERR(drm);
goto power_down;
}
if (IS_ERR(drm))
return PTR_ERR(drm);
device->gpu = gpu;
gpu->gpu_speedo = tegra_sku_info.gpu_speedo_value;
err = drm_dev_register(drm, 0);
if (err < 0)
goto err_unref;
ret = drm_dev_register(drm, 0);
if (ret < 0) {
drm_dev_unref(drm);
return ret;
}
return 0;
err_unref:
drm_dev_unref(drm);
power_down:
nouveau_platform_power_down(gpu);
nouveau_platform_remove_iommu(&pdev->dev, gpu);
return err;
}
static int nouveau_platform_remove(struct platform_device *pdev)
{
struct drm_device *drm_dev = platform_get_drvdata(pdev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvkm_device *device = nvxx_device(&drm->device);
struct nouveau_platform_gpu *gpu = device->gpu;
int err;
nouveau_drm_device_remove(drm_dev);
err = nouveau_platform_power_down(gpu);
nouveau_platform_remove_iommu(&pdev->dev, gpu);
return err;
struct drm_device *dev = platform_get_drvdata(pdev);
nouveau_drm_device_remove(dev);
return 0;
}
#if IS_ENABLED(CONFIG_OF)
......
......@@ -19,44 +19,9 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NOUVEAU_PLATFORM_H__
#define __NOUVEAU_PLATFORM_H__
#include "core/device.h"
#include "core/mm.h"
struct reset_control;
struct clk;
struct regulator;
struct iommu_domain;
struct platform_driver;
struct nouveau_platform_gpu {
struct reset_control *rst;
struct clk *clk;
struct clk *clk_pwr;
struct regulator *vdd;
struct {
/*
* Protects accesses to mm from subsystems
*/
struct mutex mutex;
struct nvkm_mm _mm;
/*
* Just points to _mm. We need this to avoid embedding
* struct nvkm_mm in os.h
*/
struct nvkm_mm *mm;
struct iommu_domain *domain;
unsigned long pgshift;
} iommu;
int gpu_speedo;
};
#include "nouveau_drm.h"
extern struct platform_driver nouveau_platform_driver;
#endif
/*
* Copyright 2015 Red Hat Inc.
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
......@@ -14,17 +14,138 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <core/tegra.h>
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
#include "priv.h"
static int
nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
{
int ret;
ret = regulator_enable(tdev->vdd);
if (ret)
goto err_power;
ret = clk_prepare_enable(tdev->clk);
if (ret)
goto err_clk;
ret = clk_prepare_enable(tdev->clk_pwr);
if (ret)
goto err_clk_pwr;
clk_set_rate(tdev->clk_pwr, 204000000);
udelay(10);
reset_control_assert(tdev->rst);
udelay(10);
ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
if (ret)
goto err_clamp;
udelay(10);
reset_control_deassert(tdev->rst);
udelay(10);
return 0;
err_clamp:
clk_disable_unprepare(tdev->clk_pwr);
err_clk_pwr:
clk_disable_unprepare(tdev->clk);
err_clk:
regulator_disable(tdev->vdd);
err_power:
return ret;
}
static int
nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
{
reset_control_assert(tdev->rst);
udelay(10);
clk_disable_unprepare(tdev->clk_pwr);
clk_disable_unprepare(tdev->clk);
udelay(10);
return regulator_disable(tdev->vdd);
}
static void
nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
{
#if IS_ENABLED(CONFIG_IOMMU_API)
struct device *dev = &tdev->pdev->dev;
unsigned long pgsize_bitmap;
int ret;
mutex_init(&tdev->iommu.mutex);
if (iommu_present(&platform_bus_type)) {
tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
if (IS_ERR(tdev->iommu.domain))
goto error;
/*
* A IOMMU is only usable if it supports page sizes smaller
* or equal to the system's PAGE_SIZE, with a preference if
* both are equal.
*/
pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
if (pgsize_bitmap & PAGE_SIZE) {
tdev->iommu.pgshift = PAGE_SHIFT;
} else {
tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
if (tdev->iommu.pgshift == 0) {
dev_warn(dev, "unsupported IOMMU page size\n");
goto free_domain;
}
tdev->iommu.pgshift -= 1;
}
ret = iommu_attach_device(tdev->iommu.domain, dev);
if (ret)
goto free_domain;
ret = nvkm_mm_init(&tdev->iommu.mm, 0,
(1ULL << 40) >> tdev->iommu.pgshift, 1);
if (ret)
goto detach_device;
}
return;
detach_device:
iommu_detach_device(tdev->iommu.domain, dev);
free_domain:
iommu_domain_free(tdev->iommu.domain);
error:
tdev->iommu.domain = NULL;
tdev->iommu.pgshift = 0;
dev_err(dev, "cannot initialize IOMMU MM\n");
#endif
}
static void
nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
{
#if IS_ENABLED(CONFIG_IOMMU_API)
if (tdev->iommu.domain) {
nvkm_mm_fini(&tdev->iommu.mm);
iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
iommu_domain_free(tdev->iommu.domain);
}
#endif
}
static struct nvkm_device_tegra *
nvkm_device_tegra(struct nvkm_device *device)
{
......@@ -95,9 +216,19 @@ nvkm_device_tegra_init(struct nvkm_device *device)
return 0;
}
static void *
nvkm_device_tegra_dtor(struct nvkm_device *device)
{
struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
nvkm_device_tegra_power_down(tdev);
nvkm_device_tegra_remove_iommu(tdev);
return tdev;
}
static const struct nvkm_device_func
nvkm_device_tegra_func = {
.tegra = nvkm_device_tegra,
.dtor = nvkm_device_tegra_dtor,
.init = nvkm_device_tegra_init,
.fini = nvkm_device_tegra_fini,
.resource_addr = nvkm_device_tegra_resource_addr,
......@@ -112,6 +243,7 @@ nvkm_device_tegra_new(struct platform_device *pdev,
struct nvkm_device **pdevice)
{
struct nvkm_device_tegra *tdev;
int ret;
if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
return -ENOMEM;
......@@ -119,10 +251,37 @@ nvkm_device_tegra_new(struct platform_device *pdev,
tdev->pdev = pdev;
tdev->irq = -1;
return nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
NVKM_DEVICE_TEGRA, pdev->id, NULL,
cfg, dbg, detect, mmio, subdev_mask,
&tdev->device);
tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(tdev->vdd))
return PTR_ERR(tdev->vdd);
tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
if (IS_ERR(tdev->rst))
return PTR_ERR(tdev->rst);
tdev->clk = devm_clk_get(&pdev->dev, "gpu");
if (IS_ERR(tdev->clk))
return PTR_ERR(tdev->clk);
tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
if (IS_ERR(tdev->clk_pwr))
return PTR_ERR(tdev->clk_pwr);
nvkm_device_tegra_probe_iommu(tdev);
ret = nvkm_device_tegra_power_up(tdev);
if (ret)
return ret;
tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
NVKM_DEVICE_TEGRA, pdev->id, NULL,
cfg, dbg, detect, mmio, subdev_mask,
&tdev->device);
if (ret)
return ret;
return 0;
}
#else
int
......
......@@ -25,12 +25,9 @@
#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
#include "priv.h"
#include <core/tegra.h>
#include <subdev/timer.h>
#ifdef __KERNEL__
#include <nouveau_platform.h>
#endif
#define MHZ (1000 * 1000)
#define MASK(w) ((1 << w) - 1)
......@@ -649,6 +646,7 @@ gk20a_clk = {
int
gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
struct gk20a_clk *clk;
int ret, i;
......@@ -663,7 +661,7 @@ gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
}
clk->params = &gk20a_pllg_params;
clk->parent_rate = clk_get_rate(device->gpu->clk);
clk->parent_rate = clk_get_rate(tdev->clk);
ret = nvkm_clk_ctor(&gk20a_clk, device, index, true, &clk->base);
nvkm_info(&clk->base.subdev, "parent clock rate: %d Mhz\n",
......
......@@ -42,14 +42,9 @@
#include <core/memory.h>
#include <core/mm.h>
#include <core/tegra.h>
#include <subdev/fb.h>
#ifdef __KERNEL__
#include <linux/dma-attrs.h>
#include <linux/iommu.h>
#include <nouveau_platform.h>
#endif
#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
struct gk20a_instobj {
......@@ -423,8 +418,9 @@ gk20a_instmem = {
int
gk20a_instmem_new(struct nvkm_device *device, int index,
struct nvkm_instmem **pimem)
struct nvkm_instmem **pimem)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
struct gk20a_instmem *imem;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
......@@ -433,11 +429,11 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
spin_lock_init(&imem->lock);
*pimem = &imem->base;
if (device->gpu->iommu.domain) {
imem->domain = device->gpu->iommu.domain;
imem->mm = device->gpu->iommu.mm;
imem->iommu_pgshift = device->gpu->iommu.pgshift;
imem->mm_mutex = &device->gpu->iommu.mutex;
if (tdev->iommu.domain) {
imem->domain = tdev->iommu.domain;
imem->mm = &tdev->iommu.mm;
imem->iommu_pgshift = tdev->iommu.pgshift;
imem->mm_mutex = &tdev->iommu.mutex;
nvkm_info(&imem->base.subdev, "using IOMMU\n");
} else {
......
......@@ -22,9 +22,7 @@
#define gk20a_volt(p) container_of((p), struct gk20a_volt, base)
#include "priv.h"
#ifdef __KERNEL__
#include <nouveau_platform.h>
#endif
#include <core/tegra.h>
struct cvb_coef {
int c0;
......@@ -159,6 +157,7 @@ gk20a_volt = {
int
gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
struct gk20a_volt *volt;
int i, uv;
......@@ -168,10 +167,10 @@ gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
nvkm_volt_ctor(&gk20a_volt, device, index, &volt->base);
*pvolt = &volt->base;
uv = regulator_get_voltage(device->gpu->vdd);
uv = regulator_get_voltage(tdev->vdd);
nvkm_info(&volt->base.subdev, "The default voltage is %duV\n", uv);
volt->vdd = device->gpu->vdd;
volt->vdd = tdev->vdd;
volt->base.vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
nvkm_debug(&volt->base.subdev, "%s - vid_nr = %d\n", __func__,
......@@ -180,7 +179,7 @@ gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
volt->base.vid[i].vid = i;
volt->base.vid[i].uv =
gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
device->gpu->gpu_speedo);
tdev->gpu_speedo);
nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
volt->base.vid[i].vid, volt->base.vid[i].uv);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment