Commit 6606e9af authored by Dave Airlie's avatar Dave Airlie

Merge branch 'msm-fixes-4.13-rc3' of git://people.freedesktop.org/~robclark/linux into drm-fixes

Bunch of msm fixes for 4.13

* 'msm-fixes-4.13-rc3' of git://people.freedesktop.org/~robclark/linux:
  drm/msm: gpu: don't abuse dma_alloc for non-DMA allocations
  drm/msm: gpu: call qcom_mdt interfaces only for ARCH_QCOM
  drm/msm/adreno: Prevent unclocked access when retrieving timestamps
  drm/msm: Remove __user from __u64 data types
  drm/msm: args->fence should be args->flags
  drm/msm: Turn off hardware clock gating before reading A5XX registers
  drm/msm: Allow hardware clock gating to be toggled
  drm/msm: Remove some potentially blocked register ranges
  drm/msm/mdp5: Drop clock names with "_clk" suffix
  drm/msm/mdp5: Fix typo in encoder_enable path
  drm/msm: NULL pointer dereference in drivers/gpu/drm/msm/msm_gem_vma.c
  drm/msm: fix WARN_ON in add_vma() with no iommu
  drm/msm/dsi: Calculate link clock rates with updated dsi->lanes
  drm/msm/mdp5: fix unclocked register access in _cursor_set()
  drm/msm: unlock on error in msm_gem_get_iova()
  drm/msm: fix an integer overflow test
  drm/msm/mdp5: Fix compilation warnings
parents aae4e7a8 8f93e043
......@@ -5,7 +5,7 @@ config DRM_MSM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
select QCOM_MDT_LOADER
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
......
......@@ -15,7 +15,7 @@
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
#include <linux/dma-mapping.h>
#include <linux/of_reserved_mem.h>
#include <linux/of_address.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "msm_gem.h"
#include "msm_mmu.h"
......@@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
{
const struct firmware *fw;
struct device_node *np;
struct resource r;
phys_addr_t mem_phys;
ssize_t mem_size;
void *mem_region = NULL;
int ret;
if (!IS_ENABLED(CONFIG_ARCH_QCOM))
return -EINVAL;
np = of_get_child_by_name(dev->of_node, "zap-shader");
if (!np)
return -ENODEV;
np = of_parse_phandle(np, "memory-region", 0);
if (!np)
return -EINVAL;
ret = of_address_to_resource(np, 0, &r);
if (ret)
return ret;
mem_phys = r.start;
mem_size = resource_size(&r);
/* Request the MDT file for the firmware */
ret = request_firmware(&fw, fwname, dev);
if (ret) {
......@@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
}
/* Allocate memory for the firmware image */
mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL);
mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
if (!mem_region) {
ret = -ENOMEM;
goto out;
......@@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
out:
if (mem_region)
memunmap(mem_region);
release_firmware(fw);
return ret;
}
#else
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
{
return -ENODEV;
}
#endif
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
......@@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
gpu->funcs->flush(gpu);
}
struct a5xx_hwcg {
static const struct {
u32 offset;
u32 value;
};
static const struct a5xx_hwcg a530_hwcg[] = {
} a5xx_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
......@@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
};
static const struct {
int (*test)(struct adreno_gpu *gpu);
const struct a5xx_hwcg *regs;
unsigned int count;
} a5xx_hwcg_regs[] = {
{ adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
};
static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
const struct a5xx_hwcg *regs, unsigned int count)
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
unsigned int i;
for (i = 0; i < count; i++)
gpu_write(gpu, regs[i].offset, regs[i].value);
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
gpu_write(gpu, a5xx_hwcg[i].offset,
state ? a5xx_hwcg[i].value : 0);
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
}
static void a5xx_enable_hwcg(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
_a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
a5xx_hwcg_regs[i].count);
return;
}
}
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
}
static int a5xx_me_init(struct msm_gpu *gpu)
......@@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
return ret;
}
/* Set up a child device to "own" the zap shader */
static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
{
struct device_node *node;
int ret;
if (dev->parent)
return 0;
/* Find the sub-node for the zap shader */
node = of_get_child_by_name(parent->of_node, "zap-shader");
if (!node) {
DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
return -ENODEV;
}
dev->parent = parent;
dev->of_node = node;
dev_set_name(dev, "adreno_zap_shader");
ret = device_register(dev);
if (ret) {
DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
goto out;
}
ret = of_reserved_mem_device_init(dev);
if (ret) {
DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
device_unregister(dev);
}
out:
if (ret)
dev->parent = NULL;
return ret;
}
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
......@@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
return -ENODEV;
}
ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev);
if (!ret)
ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
adreno_gpu->info->zapfw);
ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
loaded = !ret;
......@@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
/* Enable HWCG */
a5xx_enable_hwcg(gpu);
a5xx_set_hwcg(gpu, true);
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
......@@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
DBG("%s", gpu->name);
if (a5xx_gpu->zap_dev.parent)
device_unregister(&a5xx_gpu->zap_dev);
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
......@@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = {
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
~0
0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
0xB9A0, 0xB9BF, ~0
};
static void a5xx_dump(struct msm_gpu *gpu)
......@@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
/*
* Temporarily disable hardware clock gating before going into
* adreno_show to avoid issues while reading the registers
*/
a5xx_set_hwcg(gpu, false);
adreno_show(gpu, m);
a5xx_set_hwcg(gpu, true);
}
#endif
......
......@@ -36,8 +36,6 @@ struct a5xx_gpu {
uint32_t gpmu_dwords;
uint32_t lm_leakage;
struct device zap_dev;
};
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
......@@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
}
bool a5xx_idle(struct msm_gpu *gpu);
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
#endif /* __A5XX_GPU_H__ */
......@@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
*value = adreno_gpu->base.fast_rate;
return 0;
case MSM_PARAM_TIMESTAMP:
if (adreno_gpu->funcs->get_timestamp)
return adreno_gpu->funcs->get_timestamp(gpu, value);
if (adreno_gpu->funcs->get_timestamp) {
int ret;
pm_runtime_get_sync(&gpu->pdev->dev);
ret = adreno_gpu->funcs->get_timestamp(gpu, value);
pm_runtime_put_autosuspend(&gpu->pdev->dev);
return ret;
}
return -EINVAL;
default:
DBG("%s: invalid param: %u", gpu->name, param);
......
......@@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
int ret;
ret = dsi_calc_clk_rate(msm_host);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
return;
}
clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
clk_req->escclk_rate = msm_host->esc_clk_rate;
......@@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
int ret;
if (msm_host->mode) {
drm_mode_destroy(msm_host->dev, msm_host->mode);
......@@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
return -ENOMEM;
}
ret = dsi_calc_clk_rate(msm_host);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
return ret;
}
return 0;
}
......
......@@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
unsigned long flags;
enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
int i, plane_cnt = 0;
bool bg_alpha_enabled = false;
u32 mixer_op_mode = 0;
......@@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DBG("Cursor off");
cursor_enable = false;
mdp5_enable(mdp5_kms);
goto set_cursor;
}
......@@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
get_roi(crtc, &roi_w, &roi_h);
mdp5_enable(mdp5_kms);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
......@@ -804,6 +807,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
crtc_flush(crtc, flush_mask);
end:
mdp5_disable(mdp5_kms);
if (old_bo) {
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
/* enable vblank to complete cursor work: */
......@@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
get_roi(crtc, &roi_w, &roi_h);
mdp5_enable(mdp5_kms);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
......@@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
crtc_flush(crtc, flush_mask);
mdp5_disable(mdp5_kms);
return 0;
}
......
......@@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_disable(encoder);
mdp5_cmd_encoder_enable(encoder);
else
mdp5_vid_encoder_enable(encoder);
}
......
......@@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name, bool mandatory)
{
struct device *dev = &pdev->dev;
struct clk *clk = devm_clk_get(dev, name);
struct clk *clk = msm_clk_get(pdev, name);
if (IS_ERR(clk) && mandatory) {
dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
return PTR_ERR(clk);
......@@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
}
/* mandatory clocks: */
ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);
ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
if (ret)
goto fail;
ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
if (ret)
goto fail;
ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
if (ret)
goto fail;
ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);
ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
if (ret)
goto fail;
/* optional clocks: */
get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);
get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
/* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a
......
......@@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
struct mdp5_hw_pipe *right_hwpipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
struct phase_step step = { 0 };
struct pixel_ext pe = { 0 };
struct phase_step step = { { 0 } };
struct pixel_ext pe = { { 0 } };
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
unsigned int rotation;
......
......@@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
struct page **pages;
vma = add_vma(obj, aspace);
if (IS_ERR(vma))
return PTR_ERR(vma);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto unlock;
}
pages = get_pages(obj);
if (IS_ERR(pages)) {
......@@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
fail:
del_vma(vma);
unlock:
mutex_unlock(&msm_obj->lock);
return ret;
}
......@@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
if (use_vram) {
struct msm_gem_vma *vma;
struct page **pages;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
mutex_lock(&msm_obj->lock);
vma = add_vma(obj, NULL);
mutex_unlock(&msm_obj->lock);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto fail;
......
......@@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
(nr_cmds * sizeof(submit->cmd[0]));
uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX)
return NULL;
......@@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
ret = submit_fence_sync(submit);
if (ret)
goto out;
......
......@@ -42,7 +42,7 @@ void
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt)
{
if (!vma->iova)
if (!aspace || !vma->iova)
return;
if (aspace->mmu) {
......
......@@ -171,7 +171,7 @@ struct drm_msm_gem_submit_cmd {
__u32 size; /* in, cmdstream size */
__u32 pad;
__u32 nr_relocs; /* in, number of submit_reloc's */
__u64 __user relocs; /* in, ptr to array of submit_reloc's */
__u64 relocs; /* in, ptr to array of submit_reloc's */
};
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
......@@ -215,8 +215,8 @@ struct drm_msm_gem_submit {
__u32 fence; /* out */
__u32 nr_bos; /* in, number of submit_bo's */
__u32 nr_cmds; /* in, number of submit_cmd's */
__u64 __user bos; /* in, ptr to array of submit_bo's */
__u64 __user cmds; /* in, ptr to array of submit_cmd's */
__u64 bos; /* in, ptr to array of submit_bo's */
__u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment