Commit 2c3d68ab authored by Dave Airlie's avatar Dave Airlie

Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next

Mostly cleanups, fixes, and 'struct fence' conversion this time
around, with one reservation patch which is a-b Sumit (which the fence
conversion patches depend on).

* 'msm-next' of git://people.freedesktop.org/~robclark/linux: (25 commits)
  drm/msm: Drop load/unload drm_driver ops
  drm/msm: Centralize connector registration/unregistration
  drm/msm/hdmi: Prevent gpio_free related kernel warnings
  drm/msm: print offender task name on hangcheck recovery
  drm/msm: fix leak in failed submit path
  drm/msm: de-indent submit_create()
  drm/msm: drop return from gpu->submit()
  drm/msm/mdp4: Don't manage DSI PLL regulators in MDP driver
  drm/msm/edp: Drop regulator_set_voltage call
  drm/msm/dsi: Fix regulator API abuse
  drm/msm: Move call to PTR_ERR_OR_ZERO after reassignment
  drm/msm/mdp: Add support for more RGBX formats
  drm: msm: remove unused variable
  drm/msm: fix ->last_fence() after recover
  drm/msm: 'struct fence' conversion
  drm/msm: remove fence_cbs
  drm/msm: introduce msm_fence_context
  drm/msm: split locking and pinning BO's
  drm/msm/gpu: simplify tracking in-flight bo's
  drm/msm: split out timeout_to_jiffies helper
  ...
parents 2958cf0e 2b669875
...@@ -23,6 +23,13 @@ config DRM_MSM_REGISTER_LOGGING ...@@ -23,6 +23,13 @@ config DRM_MSM_REGISTER_LOGGING
that can be parsed by envytools demsm tool. If enabled, register that can be parsed by envytools demsm tool. If enabled, register
logging can be switched on via msm.reglog=y module param. logging can be switched on via msm.reglog=y module param.
config DRM_MSM_HDMI_HDCP
bool "Enable HDMI HDCP support in MSM DRM driver"
depends on DRM_MSM && QCOM_SCM
default y
help
Choose this option to enable HDCP state machine
config DRM_MSM_DSI config DRM_MSM_DSI
bool "Enable DSI support in MSM DRM driver" bool "Enable DSI support in MSM DRM driver"
depends on DRM_MSM depends on DRM_MSM
......
...@@ -10,7 +10,6 @@ msm-y := \ ...@@ -10,7 +10,6 @@ msm-y := \
hdmi/hdmi_audio.o \ hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \ hdmi/hdmi_bridge.o \
hdmi/hdmi_connector.o \ hdmi/hdmi_connector.o \
hdmi/hdmi_hdcp.o \
hdmi/hdmi_i2c.o \ hdmi/hdmi_i2c.o \
hdmi/hdmi_phy.o \ hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \ hdmi/hdmi_phy_8960.o \
...@@ -40,8 +39,10 @@ msm-y := \ ...@@ -40,8 +39,10 @@ msm-y := \
mdp/mdp5/mdp5_plane.o \ mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \ mdp/mdp5/mdp5_smp.o \
msm_atomic.o \ msm_atomic.o \
msm_debugfs.o \
msm_drv.o \ msm_drv.o \
msm_fb.o \ msm_fb.o \
msm_fence.o \
msm_gem.o \ msm_gem.o \
msm_gem_prime.o \ msm_gem_prime.o \
msm_gem_submit.o \ msm_gem_submit.o \
...@@ -56,6 +57,8 @@ msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o ...@@ -56,6 +57,8 @@ msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
mdp/mdp4/mdp4_dsi_encoder.o \ mdp/mdp4/mdp4_dsi_encoder.o \
dsi/dsi_cfg.o \ dsi/dsi_cfg.o \
......
...@@ -120,8 +120,8 @@ void adreno_recover(struct msm_gpu *gpu) ...@@ -120,8 +120,8 @@ void adreno_recover(struct msm_gpu *gpu)
/* reset ringbuffer: */ /* reset ringbuffer: */
gpu->rb->cur = gpu->rb->start; gpu->rb->cur = gpu->rb->start;
/* reset completed fence seqno, just discard anything pending: */ /* reset completed fence seqno: */
adreno_gpu->memptrs->fence = gpu->submitted_fence; adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
adreno_gpu->memptrs->rptr = 0; adreno_gpu->memptrs->rptr = 0;
adreno_gpu->memptrs->wptr = 0; adreno_gpu->memptrs->wptr = 0;
...@@ -133,7 +133,7 @@ void adreno_recover(struct msm_gpu *gpu) ...@@ -133,7 +133,7 @@ void adreno_recover(struct msm_gpu *gpu)
} }
} }
int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx) struct msm_file_private *ctx)
{ {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
...@@ -168,7 +168,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -168,7 +168,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT2(ring); OUT_PKT2(ring);
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->fence); OUT_RING(ring, submit->fence->seqno);
if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) { if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
/* Flush HLSQ lazy updates to make sure there is nothing /* Flush HLSQ lazy updates to make sure there is nothing
...@@ -185,7 +185,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -185,7 +185,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT3(ring, CP_EVENT_WRITE, 3); OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS); OUT_RING(ring, CACHE_FLUSH_TS);
OUT_RING(ring, rbmemptr(adreno_gpu, fence)); OUT_RING(ring, rbmemptr(adreno_gpu, fence));
OUT_RING(ring, submit->fence); OUT_RING(ring, submit->fence->seqno);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */ /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
OUT_PKT3(ring, CP_INTERRUPT, 1); OUT_PKT3(ring, CP_INTERRUPT, 1);
...@@ -212,8 +212,6 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -212,8 +212,6 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
#endif #endif
gpu->funcs->flush(gpu); gpu->funcs->flush(gpu);
return 0;
} }
void adreno_flush(struct msm_gpu *gpu) void adreno_flush(struct msm_gpu *gpu)
...@@ -254,7 +252,7 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) ...@@ -254,7 +252,7 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
adreno_gpu->rev.patchid); adreno_gpu->rev.patchid);
seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
gpu->submitted_fence); gpu->fctx->last_fence);
seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu)); seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu));
seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
...@@ -295,7 +293,7 @@ void adreno_dump_info(struct msm_gpu *gpu) ...@@ -295,7 +293,7 @@ void adreno_dump_info(struct msm_gpu *gpu)
adreno_gpu->rev.patchid); adreno_gpu->rev.patchid);
printk("fence: %d/%d\n", adreno_gpu->memptrs->fence, printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
gpu->submitted_fence); gpu->fctx->last_fence);
printk("rptr: %d\n", get_rptr(adreno_gpu)); printk("rptr: %d\n", get_rptr(adreno_gpu));
printk("wptr: %d\n", adreno_gpu->memptrs->wptr); printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
printk("rb wptr: %d\n", get_wptr(gpu->rb)); printk("rb wptr: %d\n", get_wptr(gpu->rb));
......
...@@ -238,7 +238,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); ...@@ -238,7 +238,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu); int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu); uint32_t adreno_last_fence(struct msm_gpu *gpu);
void adreno_recover(struct msm_gpu *gpu); void adreno_recover(struct msm_gpu *gpu);
int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx); struct msm_file_private *ctx);
void adreno_flush(struct msm_gpu *gpu); void adreno_flush(struct msm_gpu *gpu);
void adreno_idle(struct msm_gpu *gpu); void adreno_idle(struct msm_gpu *gpu);
......
...@@ -41,8 +41,6 @@ enum msm_dsi_phy_type { ...@@ -41,8 +41,6 @@ enum msm_dsi_phy_type {
/* Regulators for DSI devices */ /* Regulators for DSI devices */
struct dsi_reg_entry { struct dsi_reg_entry {
char name[32]; char name[32];
int min_voltage;
int max_voltage;
int enable_load; int enable_load;
int disable_load; int disable_load;
}; };
......
...@@ -22,9 +22,9 @@ static const struct msm_dsi_config apq8064_dsi_cfg = { ...@@ -22,9 +22,9 @@ static const struct msm_dsi_config apq8064_dsi_cfg = {
.reg_cfg = { .reg_cfg = {
.num = 3, .num = 3,
.regs = { .regs = {
{"vdda", 1200000, 1200000, 100000, 100}, {"vdda", 100000, 100}, /* 1.2 V */
{"avdd", 3000000, 3000000, 110000, 100}, {"avdd", 10000, 100}, /* 3.0 V */
{"vddio", 1800000, 1800000, 100000, 100}, {"vddio", 100000, 100}, /* 1.8 V */
}, },
}, },
.bus_clk_names = dsi_v2_bus_clk_names, .bus_clk_names = dsi_v2_bus_clk_names,
...@@ -40,10 +40,10 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { ...@@ -40,10 +40,10 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
.reg_cfg = { .reg_cfg = {
.num = 4, .num = 4,
.regs = { .regs = {
{"gdsc", -1, -1, -1, -1}, {"gdsc", -1, -1},
{"vdd", 3000000, 3000000, 150000, 100}, {"vdd", 150000, 100}, /* 3.0 V */
{"vdda", 1200000, 1200000, 100000, 100}, {"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 1800000, 1800000, 100000, 100}, {"vddio", 100000, 100}, /* 1.8 V */
}, },
}, },
.bus_clk_names = dsi_6g_bus_clk_names, .bus_clk_names = dsi_6g_bus_clk_names,
...@@ -59,9 +59,9 @@ static const struct msm_dsi_config msm8916_dsi_cfg = { ...@@ -59,9 +59,9 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
.reg_cfg = { .reg_cfg = {
.num = 3, .num = 3,
.regs = { .regs = {
{"gdsc", -1, -1, -1, -1}, {"gdsc", -1, -1},
{"vdda", 1200000, 1200000, 100000, 100}, {"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 1800000, 1800000, 100000, 100}, {"vddio", 100000, 100}, /* 1.8 V */
}, },
}, },
.bus_clk_names = dsi_8916_bus_clk_names, .bus_clk_names = dsi_8916_bus_clk_names,
...@@ -73,13 +73,13 @@ static const struct msm_dsi_config msm8994_dsi_cfg = { ...@@ -73,13 +73,13 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
.reg_cfg = { .reg_cfg = {
.num = 7, .num = 7,
.regs = { .regs = {
{"gdsc", -1, -1, -1, -1}, {"gdsc", -1, -1},
{"vdda", 1250000, 1250000, 100000, 100}, {"vdda", 100000, 100}, /* 1.25 V */
{"vddio", 1800000, 1800000, 100000, 100}, {"vddio", 100000, 100}, /* 1.8 V */
{"vcca", 1000000, 1000000, 10000, 100}, {"vcca", 10000, 100}, /* 1.0 V */
{"vdd", 1800000, 1800000, 100000, 100}, {"vdd", 100000, 100}, /* 1.8 V */
{"lab_reg", -1, -1, -1, -1}, {"lab_reg", -1, -1},
{"ibb_reg", -1, -1, -1, -1}, {"ibb_reg", -1, -1},
}, },
}, },
.bus_clk_names = dsi_6g_bus_clk_names, .bus_clk_names = dsi_6g_bus_clk_names,
......
...@@ -325,18 +325,6 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host) ...@@ -325,18 +325,6 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
return ret; return ret;
} }
for (i = 0; i < num; i++) {
if (regulator_can_change_voltage(s[i].consumer)) {
ret = regulator_set_voltage(s[i].consumer,
regs[i].min_voltage, regs[i].max_voltage);
if (ret < 0) {
pr_err("regulator %d set voltage failed, %d\n",
i, ret);
return ret;
}
}
}
return 0; return 0;
} }
......
...@@ -198,9 +198,13 @@ static enum drm_connector_status dsi_mgr_connector_detect( ...@@ -198,9 +198,13 @@ static enum drm_connector_status dsi_mgr_connector_detect(
static void dsi_mgr_connector_destroy(struct drm_connector *connector) static void dsi_mgr_connector_destroy(struct drm_connector *connector)
{ {
struct dsi_connector *dsi_connector = to_dsi_connector(connector);
DBG(""); DBG("");
drm_connector_unregister(connector);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
kfree(dsi_connector);
} }
static void dsi_dual_connector_fix_modes(struct drm_connector *connector) static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
...@@ -538,12 +542,9 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) ...@@ -538,12 +542,9 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
struct dsi_connector *dsi_connector; struct dsi_connector *dsi_connector;
int ret, i; int ret, i;
dsi_connector = devm_kzalloc(msm_dsi->dev->dev, dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL);
sizeof(*dsi_connector), GFP_KERNEL); if (!dsi_connector)
if (!dsi_connector) { return ERR_PTR(-ENOMEM);
ret = -ENOMEM;
goto fail;
}
dsi_connector->id = id; dsi_connector->id = id;
...@@ -552,7 +553,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) ...@@ -552,7 +553,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
ret = drm_connector_init(msm_dsi->dev, connector, ret = drm_connector_init(msm_dsi->dev, connector,
&dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI); &dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
if (ret) if (ret)
goto fail; return ERR_PTR(ret);
drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs); drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
...@@ -565,21 +566,11 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) ...@@ -565,21 +566,11 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
connector->interlace_allowed = 0; connector->interlace_allowed = 0;
connector->doublescan_allowed = 0; connector->doublescan_allowed = 0;
ret = drm_connector_register(connector);
if (ret)
goto fail;
for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) for (i = 0; i < MSM_DSI_ENCODER_NUM; i++)
drm_mode_connector_attach_encoder(connector, drm_mode_connector_attach_encoder(connector,
msm_dsi->encoders[i]); msm_dsi->encoders[i]);
return connector; return connector;
fail:
if (connector)
dsi_mgr_connector_destroy(connector);
return ERR_PTR(ret);
} }
/* initialize bridge */ /* initialize bridge */
......
...@@ -177,19 +177,6 @@ static int dsi_phy_regulator_init(struct msm_dsi_phy *phy) ...@@ -177,19 +177,6 @@ static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
return ret; return ret;
} }
for (i = 0; i < num; i++) {
if (regulator_can_change_voltage(s[i].consumer)) {
ret = regulator_set_voltage(s[i].consumer,
regs[i].min_voltage, regs[i].max_voltage);
if (ret < 0) {
dev_err(dev,
"regulator %d set voltage failed, %d\n",
i, ret);
return ret;
}
}
}
return 0; return 0;
} }
......
...@@ -138,8 +138,8 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { ...@@ -138,8 +138,8 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.reg_cfg = { .reg_cfg = {
.num = 2, .num = 2,
.regs = { .regs = {
{"vddio", 1800000, 1800000, 100000, 100}, {"vddio", 100000, 100}, /* 1.8 V */
{"vcca", 1000000, 1000000, 10000, 100}, {"vcca", 10000, 100}, /* 1.0 V */
}, },
}, },
.ops = { .ops = {
......
...@@ -138,7 +138,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = { ...@@ -138,7 +138,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.reg_cfg = { .reg_cfg = {
.num = 1, .num = 1,
.regs = { .regs = {
{"vddio", 1800000, 1800000, 100000, 100}, {"vddio", 100000, 100},
}, },
}, },
.ops = { .ops = {
...@@ -153,7 +153,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { ...@@ -153,7 +153,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.reg_cfg = { .reg_cfg = {
.num = 1, .num = 1,
.regs = { .regs = {
{"vddio", 1800000, 1800000, 100000, 100}, {"vddio", 100000, 100}, /* 1.8 V */
}, },
}, },
.ops = { .ops = {
......
...@@ -185,7 +185,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = { ...@@ -185,7 +185,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
.reg_cfg = { .reg_cfg = {
.num = 1, .num = 1,
.regs = { .regs = {
{"vddio", 1800000, 1800000, 100000, 100}, {"vddio", 100000, 100}, /* 1.8 V */
}, },
}, },
.ops = { .ops = {
......
...@@ -37,7 +37,7 @@ static void edp_connector_destroy(struct drm_connector *connector) ...@@ -37,7 +37,7 @@ static void edp_connector_destroy(struct drm_connector *connector)
struct edp_connector *edp_connector = to_edp_connector(connector); struct edp_connector *edp_connector = to_edp_connector(connector);
DBG(""); DBG("");
drm_connector_unregister(connector);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
kfree(edp_connector); kfree(edp_connector);
...@@ -124,10 +124,8 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp) ...@@ -124,10 +124,8 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
int ret; int ret;
edp_connector = kzalloc(sizeof(*edp_connector), GFP_KERNEL); edp_connector = kzalloc(sizeof(*edp_connector), GFP_KERNEL);
if (!edp_connector) { if (!edp_connector)
ret = -ENOMEM; return ERR_PTR(-ENOMEM);
goto fail;
}
edp_connector->edp = edp; edp_connector->edp = edp;
...@@ -136,7 +134,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp) ...@@ -136,7 +134,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
ret = drm_connector_init(edp->dev, connector, &edp_connector_funcs, ret = drm_connector_init(edp->dev, connector, &edp_connector_funcs,
DRM_MODE_CONNECTOR_eDP); DRM_MODE_CONNECTOR_eDP);
if (ret) if (ret)
goto fail; return ERR_PTR(ret);
drm_connector_helper_add(connector, &edp_connector_helper_funcs); drm_connector_helper_add(connector, &edp_connector_helper_funcs);
...@@ -147,17 +145,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp) ...@@ -147,17 +145,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
connector->interlace_allowed = false; connector->interlace_allowed = false;
connector->doublescan_allowed = false; connector->doublescan_allowed = false;
ret = drm_connector_register(connector);
if (ret)
goto fail;
drm_mode_connector_attach_encoder(connector, edp->encoder); drm_mode_connector_attach_encoder(connector, edp->encoder);
return connector; return connector;
fail:
if (connector)
edp_connector_destroy(connector);
return ERR_PTR(ret);
} }
...@@ -21,8 +21,6 @@ ...@@ -21,8 +21,6 @@
#include "edp.h" #include "edp.h"
#include "edp.xml.h" #include "edp.xml.h"
#define VDDA_MIN_UV 1800000 /* uV units */
#define VDDA_MAX_UV 1800000 /* uV units */
#define VDDA_UA_ON_LOAD 100000 /* uA units */ #define VDDA_UA_ON_LOAD 100000 /* uA units */
#define VDDA_UA_OFF_LOAD 100 /* uA units */ #define VDDA_UA_OFF_LOAD 100 /* uA units */
...@@ -67,7 +65,7 @@ struct edp_ctrl { ...@@ -67,7 +65,7 @@ struct edp_ctrl {
void __iomem *base; void __iomem *base;
/* regulators */ /* regulators */
struct regulator *vdda_vreg; struct regulator *vdda_vreg; /* 1.8 V */
struct regulator *lvl_vreg; struct regulator *lvl_vreg;
/* clocks */ /* clocks */
...@@ -302,21 +300,24 @@ static void edp_clk_disable(struct edp_ctrl *ctrl, u32 clk_mask) ...@@ -302,21 +300,24 @@ static void edp_clk_disable(struct edp_ctrl *ctrl, u32 clk_mask)
static int edp_regulator_init(struct edp_ctrl *ctrl) static int edp_regulator_init(struct edp_ctrl *ctrl)
{ {
struct device *dev = &ctrl->pdev->dev; struct device *dev = &ctrl->pdev->dev;
int ret;
DBG(""); DBG("");
ctrl->vdda_vreg = devm_regulator_get(dev, "vdda"); ctrl->vdda_vreg = devm_regulator_get(dev, "vdda");
if (IS_ERR(ctrl->vdda_vreg)) { ret = PTR_ERR_OR_ZERO(ctrl->vdda_vreg);
pr_err("%s: Could not get vdda reg, ret = %ld\n", __func__, if (ret) {
PTR_ERR(ctrl->vdda_vreg)); pr_err("%s: Could not get vdda reg, ret = %d\n", __func__,
ret);
ctrl->vdda_vreg = NULL; ctrl->vdda_vreg = NULL;
return PTR_ERR(ctrl->vdda_vreg); return ret;
} }
ctrl->lvl_vreg = devm_regulator_get(dev, "lvl-vdd"); ctrl->lvl_vreg = devm_regulator_get(dev, "lvl-vdd");
if (IS_ERR(ctrl->lvl_vreg)) { ret = PTR_ERR_OR_ZERO(ctrl->lvl_vreg);
pr_err("Could not get lvl-vdd reg, %ld", if (ret) {
PTR_ERR(ctrl->lvl_vreg)); pr_err("%s: Could not get lvl-vdd reg, ret = %d\n", __func__,
ret);
ctrl->lvl_vreg = NULL; ctrl->lvl_vreg = NULL;
return PTR_ERR(ctrl->lvl_vreg); return ret;
} }
return 0; return 0;
...@@ -326,12 +327,6 @@ static int edp_regulator_enable(struct edp_ctrl *ctrl) ...@@ -326,12 +327,6 @@ static int edp_regulator_enable(struct edp_ctrl *ctrl)
{ {
int ret; int ret;
ret = regulator_set_voltage(ctrl->vdda_vreg, VDDA_MIN_UV, VDDA_MAX_UV);
if (ret) {
pr_err("%s:vdda_vreg set_voltage failed, %d\n", __func__, ret);
goto vdda_set_fail;
}
ret = regulator_set_load(ctrl->vdda_vreg, VDDA_UA_ON_LOAD); ret = regulator_set_load(ctrl->vdda_vreg, VDDA_UA_ON_LOAD);
if (ret < 0) { if (ret < 0) {
pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__); pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__);
......
...@@ -243,10 +243,21 @@ struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi); ...@@ -243,10 +243,21 @@ struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi);
/* /*
* hdcp * hdcp
*/ */
#ifdef CONFIG_DRM_MSM_HDMI_HDCP
struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi); struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi);
void msm_hdmi_hdcp_destroy(struct hdmi *hdmi); void msm_hdmi_hdcp_destroy(struct hdmi *hdmi);
void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl); void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl); void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl); void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
#else
static inline struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
{
return ERR_PTR(-ENXIO);
}
static inline void msm_hdmi_hdcp_destroy(struct hdmi *hdmi) {}
static inline void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
static inline void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
static inline void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
#endif
#endif /* __HDMI_CONNECTOR_H__ */ #endif /* __HDMI_CONNECTOR_H__ */
...@@ -112,6 +112,9 @@ static int gpio_config(struct hdmi *hdmi, bool on) ...@@ -112,6 +112,9 @@ static int gpio_config(struct hdmi *hdmi, bool on)
for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
struct hdmi_gpio_data gpio = config->gpios[i]; struct hdmi_gpio_data gpio = config->gpios[i];
if (gpio.num == -1)
continue;
if (gpio.output) { if (gpio.output) {
int value = gpio.value ? 0 : 1; int value = gpio.value ? 0 : 1;
...@@ -126,8 +129,10 @@ static int gpio_config(struct hdmi *hdmi, bool on) ...@@ -126,8 +129,10 @@ static int gpio_config(struct hdmi *hdmi, bool on)
return 0; return 0;
err: err:
while (i--) while (i--) {
if (config->gpios[i].num != -1)
gpio_free(config->gpios[i].num); gpio_free(config->gpios[i].num);
}
return ret; return ret;
} }
...@@ -341,7 +346,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector) ...@@ -341,7 +346,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
hdp_disable(hdmi_connector); hdp_disable(hdmi_connector);
drm_connector_unregister(connector);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
kfree(hdmi_connector); kfree(hdmi_connector);
...@@ -433,10 +437,8 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) ...@@ -433,10 +437,8 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
int ret; int ret;
hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL); hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
if (!hdmi_connector) { if (!hdmi_connector)
ret = -ENOMEM; return ERR_PTR(-ENOMEM);
goto fail;
}
hdmi_connector->hdmi = hdmi; hdmi_connector->hdmi = hdmi;
INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work); INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work);
...@@ -453,21 +455,13 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) ...@@ -453,21 +455,13 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
connector->interlace_allowed = 0; connector->interlace_allowed = 0;
connector->doublescan_allowed = 0; connector->doublescan_allowed = 0;
drm_connector_register(connector);
ret = hpd_enable(hdmi_connector); ret = hpd_enable(hdmi_connector);
if (ret) { if (ret) {
dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret); dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
goto fail; return ERR_PTR(ret);
} }
drm_mode_connector_attach_encoder(connector, hdmi->encoder); drm_mode_connector_attach_encoder(connector, hdmi->encoder);
return connector; return connector;
fail:
if (connector)
hdmi_connector_destroy(connector);
return ERR_PTR(ret);
} }
...@@ -50,30 +50,6 @@ static int mdp4_hw_init(struct msm_kms *kms) ...@@ -50,30 +50,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
mdp4_kms->rev = minor; mdp4_kms->rev = minor;
if (mdp4_kms->dsi_pll_vdda) {
if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
1200000, 1200000);
if (ret) {
dev_err(dev->dev,
"failed to set dsi_pll_vdda voltage: %d\n", ret);
goto out;
}
}
}
if (mdp4_kms->dsi_pll_vddio) {
if (mdp4_kms->rev == 2) {
ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
1800000, 1800000);
if (ret) {
dev_err(dev->dev,
"failed to set dsi_pll_vddio voltage: %d\n", ret);
goto out;
}
}
}
if (mdp4_kms->rev > 1) { if (mdp4_kms->rev > 1) {
mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff); mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f); mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
...@@ -485,16 +461,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -485,16 +461,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail; goto fail;
} }
mdp4_kms->dsi_pll_vdda =
devm_regulator_get_optional(&pdev->dev, "dsi_pll_vdda");
if (IS_ERR(mdp4_kms->dsi_pll_vdda))
mdp4_kms->dsi_pll_vdda = NULL;
mdp4_kms->dsi_pll_vddio =
devm_regulator_get_optional(&pdev->dev, "dsi_pll_vddio");
if (IS_ERR(mdp4_kms->dsi_pll_vddio))
mdp4_kms->dsi_pll_vddio = NULL;
/* NOTE: driver for this regulator still missing upstream.. use /* NOTE: driver for this regulator still missing upstream.. use
* _get_exclusive() and ignore the error if it does not exist * _get_exclusive() and ignore the error if it does not exist
* (and hope that the bootloader left it on for us) * (and hope that the bootloader left it on for us)
......
...@@ -37,8 +37,6 @@ struct mdp4_kms { ...@@ -37,8 +37,6 @@ struct mdp4_kms {
void __iomem *mmio; void __iomem *mmio;
struct regulator *dsi_pll_vdda;
struct regulator *dsi_pll_vddio;
struct regulator *vdd; struct regulator *vdd;
struct clk *clk; struct clk *clk;
......
...@@ -48,7 +48,6 @@ static void mdp4_lvds_connector_destroy(struct drm_connector *connector) ...@@ -48,7 +48,6 @@ static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
struct mdp4_lvds_connector *mdp4_lvds_connector = struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector); to_mdp4_lvds_connector(connector);
drm_connector_unregister(connector);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
kfree(mdp4_lvds_connector); kfree(mdp4_lvds_connector);
...@@ -121,13 +120,10 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, ...@@ -121,13 +120,10 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
{ {
struct drm_connector *connector = NULL; struct drm_connector *connector = NULL;
struct mdp4_lvds_connector *mdp4_lvds_connector; struct mdp4_lvds_connector *mdp4_lvds_connector;
int ret;
mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL); mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL);
if (!mdp4_lvds_connector) { if (!mdp4_lvds_connector)
ret = -ENOMEM; return ERR_PTR(-ENOMEM);
goto fail;
}
mdp4_lvds_connector->encoder = encoder; mdp4_lvds_connector->encoder = encoder;
mdp4_lvds_connector->panel_node = panel_node; mdp4_lvds_connector->panel_node = panel_node;
...@@ -143,15 +139,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, ...@@ -143,15 +139,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
connector->interlace_allowed = 0; connector->interlace_allowed = 0;
connector->doublescan_allowed = 0; connector->doublescan_allowed = 0;
drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder); drm_mode_connector_attach_encoder(connector, encoder);
return connector; return connector;
fail:
if (connector)
mdp4_lvds_connector_destroy(connector);
return ERR_PTR(ret);
} }
...@@ -105,6 +105,12 @@ static const struct mdp_format formats[] = { ...@@ -105,6 +105,12 @@ static const struct mdp_format formats[] = {
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4, FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(XBGR8888, 8, 8, 8, 8, 2, 0, 1, 3, false, true, 4, 4,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(RGBX8888, 8, 8, 8, 8, 3, 1, 0, 2, false, true, 4, 4,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(BGRX8888, 8, 8, 8, 8, 3, 2, 0, 1, false, true, 4, 4,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3, FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3, FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3,
......
...@@ -18,16 +18,16 @@ ...@@ -18,16 +18,16 @@
#include "msm_drv.h" #include "msm_drv.h"
#include "msm_kms.h" #include "msm_kms.h"
#include "msm_gem.h" #include "msm_gem.h"
#include "msm_fence.h"
struct msm_commit { struct msm_commit {
struct drm_device *dev; struct drm_device *dev;
struct drm_atomic_state *state; struct drm_atomic_state *state;
uint32_t fence; struct work_struct work;
struct msm_fence_cb fence_cb;
uint32_t crtc_mask; uint32_t crtc_mask;
}; };
static void fence_cb(struct msm_fence_cb *cb); static void commit_worker(struct work_struct *work);
/* block until specified crtcs are no longer pending update, and /* block until specified crtcs are no longer pending update, and
* atomically mark them as pending update * atomically mark them as pending update
...@@ -69,11 +69,7 @@ static struct msm_commit *commit_init(struct drm_atomic_state *state) ...@@ -69,11 +69,7 @@ static struct msm_commit *commit_init(struct drm_atomic_state *state)
c->dev = state->dev; c->dev = state->dev;
c->state = state; c->state = state;
/* TODO we might need a way to indicate to run the cb on a INIT_WORK(&c->work, commit_worker);
* different wq so wait_for_vblanks() doesn't block retiring
* bo's..
*/
INIT_FENCE_CB(&c->fence_cb, fence_cb);
return c; return c;
} }
...@@ -114,13 +110,15 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, ...@@ -114,13 +110,15 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
/* The (potentially) asynchronous part of the commit. At this point /* The (potentially) asynchronous part of the commit. At this point
* nothing can fail short of armageddon. * nothing can fail short of armageddon.
*/ */
static void complete_commit(struct msm_commit *c) static void complete_commit(struct msm_commit *c, bool async)
{ {
struct drm_atomic_state *state = c->state; struct drm_atomic_state *state = c->state;
struct drm_device *dev = state->dev; struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms; struct msm_kms *kms = priv->kms;
drm_atomic_helper_wait_for_fences(dev, state);
kms->funcs->prepare_commit(kms, state); kms->funcs->prepare_commit(kms, state);
drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_modeset_disables(dev, state);
...@@ -153,17 +151,9 @@ static void complete_commit(struct msm_commit *c) ...@@ -153,17 +151,9 @@ static void complete_commit(struct msm_commit *c)
commit_destroy(c); commit_destroy(c);
} }
static void fence_cb(struct msm_fence_cb *cb) static void commit_worker(struct work_struct *work)
{
struct msm_commit *c =
container_of(cb, struct msm_commit, fence_cb);
complete_commit(c);
}
static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
{ {
struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0); complete_commit(container_of(work, struct msm_commit, work), true);
c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
} }
int msm_atomic_check(struct drm_device *dev, int msm_atomic_check(struct drm_device *dev,
...@@ -201,9 +191,9 @@ int msm_atomic_check(struct drm_device *dev, ...@@ -201,9 +191,9 @@ int msm_atomic_check(struct drm_device *dev,
int msm_atomic_commit(struct drm_device *dev, int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock) struct drm_atomic_state *state, bool nonblock)
{ {
struct msm_drm_private *priv = dev->dev_private;
int nplanes = dev->mode_config.num_total_plane; int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc; int ncrtcs = dev->mode_config.num_crtc;
ktime_t timeout;
struct msm_commit *c; struct msm_commit *c;
int i, ret; int i, ret;
...@@ -237,8 +227,12 @@ int msm_atomic_commit(struct drm_device *dev, ...@@ -237,8 +227,12 @@ int msm_atomic_commit(struct drm_device *dev,
if (!plane) if (!plane)
continue; continue;
if ((plane->state->fb != new_state->fb) && new_state->fb) if ((plane->state->fb != new_state->fb) && new_state->fb) {
add_fb(c, new_state->fb); struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0);
struct msm_gem_object *msm_obj = to_msm_bo(obj);
new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
}
} }
/* /*
...@@ -276,16 +270,11 @@ int msm_atomic_commit(struct drm_device *dev, ...@@ -276,16 +270,11 @@ int msm_atomic_commit(struct drm_device *dev,
*/ */
if (nonblock) { if (nonblock) {
msm_queue_fence_cb(dev, &c->fence_cb, c->fence); queue_work(priv->atomic_wq, &c->work);
return 0; return 0;
} }
timeout = ktime_add_ms(ktime_get(), 1000); complete_commit(c, false);
/* uninterruptible wait */
msm_wait_fence(dev, c->fence, &timeout, false);
complete_commit(c);
return 0; return 0;
......
/*
* Copyright (C) 2013-2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef CONFIG_DEBUG_FS
#include "msm_drv.h"
#include "msm_gpu.h"
static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
if (gpu) {
seq_printf(m, "%s Status:\n", gpu->name);
gpu->funcs->show(gpu, m);
}
return 0;
}
static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
if (gpu) {
seq_printf(m, "Active Objects (%s):\n", gpu->name);
msm_gem_describe_objects(&gpu->active_list, m);
}
seq_printf(m, "Inactive Objects:\n");
msm_gem_describe_objects(&priv->inactive_list, m);
return 0;
}
static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
{
return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
}
static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb, *fbdev_fb = NULL;
if (priv->fbdev) {
seq_printf(m, "fbcon ");
fbdev_fb = priv->fbdev->fb;
msm_framebuffer_describe(fbdev_fb, m);
}
mutex_lock(&dev->mode_config.fb_lock);
list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
if (fb == fbdev_fb)
continue;
seq_printf(m, "user ");
msm_framebuffer_describe(fb, m);
}
mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
static int show_locked(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
int (*show)(struct drm_device *dev, struct seq_file *m) =
node->info_ent->data;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
ret = show(dev, m);
mutex_unlock(&dev->struct_mutex);
return ret;
}
static struct drm_info_list msm_debugfs_list[] = {
{"gpu", show_locked, 0, msm_gpu_show},
{"gem", show_locked, 0, msm_gem_show},
{ "mm", show_locked, 0, msm_mm_show },
{ "fb", show_locked, 0, msm_fb_show },
};
static int late_init_minor(struct drm_minor *minor)
{
int ret;
if (!minor)
return 0;
ret = msm_rd_debugfs_init(minor);
if (ret) {
dev_err(minor->dev->dev, "could not install rd debugfs\n");
return ret;
}
ret = msm_perf_debugfs_init(minor);
if (ret) {
dev_err(minor->dev->dev, "could not install perf debugfs\n");
return ret;
}
return 0;
}
int msm_debugfs_late_init(struct drm_device *dev)
{
int ret;
ret = late_init_minor(dev->primary);
if (ret)
return ret;
ret = late_init_minor(dev->render);
if (ret)
return ret;
ret = late_init_minor(dev->control);
return ret;
}
int msm_debugfs_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
int ret;
ret = drm_debugfs_create_files(msm_debugfs_list,
ARRAY_SIZE(msm_debugfs_list),
minor->debugfs_root, minor);
if (ret) {
dev_err(dev->dev, "could not install msm_debugfs_list\n");
return ret;
}
return 0;
}
void msm_debugfs_cleanup(struct drm_minor *minor)
{
drm_debugfs_remove_files(msm_debugfs_list,
ARRAY_SIZE(msm_debugfs_list), minor);
if (!minor->dev->dev_private)
return;
msm_rd_debugfs_cleanup(minor);
msm_perf_debugfs_cleanup(minor);
}
#endif
/*
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_DEBUGFS_H__
#define __MSM_DEBUGFS_H__
#ifdef CONFIG_DEBUG_FS
int msm_debugfs_init(struct drm_minor *minor);
void msm_debugfs_cleanup(struct drm_minor *minor);
#endif
#endif /* __MSM_DEBUGFS_H__ */
This diff is collapsed.
...@@ -49,6 +49,8 @@ struct msm_mmu; ...@@ -49,6 +49,8 @@ struct msm_mmu;
struct msm_rd_state; struct msm_rd_state;
struct msm_perf_state; struct msm_perf_state;
struct msm_gem_submit; struct msm_gem_submit;
struct msm_fence_context;
struct msm_fence_cb;
#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ #define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
...@@ -100,9 +102,6 @@ struct msm_drm_private { ...@@ -100,9 +102,6 @@ struct msm_drm_private {
struct drm_fb_helper *fbdev; struct drm_fb_helper *fbdev;
uint32_t next_fence, completed_fence;
wait_queue_head_t fence_event;
struct msm_rd_state *rd; struct msm_rd_state *rd;
struct msm_perf_state *perf; struct msm_perf_state *perf;
...@@ -110,9 +109,7 @@ struct msm_drm_private { ...@@ -110,9 +109,7 @@ struct msm_drm_private {
struct list_head inactive_list; struct list_head inactive_list;
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct workqueue_struct *atomic_wq;
/* callbacks deferred until bo is inactive: */
struct list_head fence_cbs;
/* crtcs pending async atomic updates: */ /* crtcs pending async atomic updates: */
uint32_t pending_crtcs; uint32_t pending_crtcs;
...@@ -157,20 +154,6 @@ struct msm_format { ...@@ -157,20 +154,6 @@ struct msm_format {
uint32_t pixel_format; uint32_t pixel_format;
}; };
/* callback from wq once fence has passed: */
struct msm_fence_cb {
struct work_struct work;
uint32_t fence;
void (*func)(struct msm_fence_cb *cb);
};
void __msm_fence_worker(struct work_struct *work);
#define INIT_FENCE_CB(_cb, _func) do { \
INIT_WORK(&(_cb)->work, __msm_fence_worker); \
(_cb)->func = _func; \
} while (0)
int msm_atomic_check(struct drm_device *dev, int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state); struct drm_atomic_state *state);
int msm_atomic_commit(struct drm_device *dev, int msm_atomic_commit(struct drm_device *dev,
...@@ -178,12 +161,7 @@ int msm_atomic_commit(struct drm_device *dev, ...@@ -178,12 +161,7 @@ int msm_atomic_commit(struct drm_device *dev,
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
int msm_wait_fence(struct drm_device *dev, uint32_t fence, void msm_gem_submit_free(struct msm_gem_submit *submit);
ktime_t *timeout, bool interruptible);
int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data, int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
...@@ -213,13 +191,12 @@ int msm_gem_prime_pin(struct drm_gem_object *obj); ...@@ -213,13 +191,12 @@ int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj); void msm_gem_prime_unpin(struct drm_gem_object *obj);
void *msm_gem_vaddr_locked(struct drm_gem_object *obj); void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
void *msm_gem_vaddr(struct drm_gem_object *obj); void *msm_gem_vaddr(struct drm_gem_object *obj);
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj, int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_cb *cb); struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj, void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, bool write, uint32_t fence); struct msm_gpu *gpu, bool exclusive, struct fence *fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj); void msm_gem_move_to_inactive(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj); int msm_gem_cpu_fini(struct drm_gem_object *obj);
void msm_gem_free_object(struct drm_gem_object *obj); void msm_gem_free_object(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
...@@ -227,7 +204,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, ...@@ -227,7 +204,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
struct drm_gem_object *msm_gem_new(struct drm_device *dev, struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags); uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct drm_gem_object *msm_gem_import(struct drm_device *dev,
uint32_t size, struct sg_table *sgt); struct dma_buf *dmabuf, struct sg_table *sgt);
int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id); int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id); void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
...@@ -303,12 +280,6 @@ u32 msm_readl(const void __iomem *addr); ...@@ -303,12 +280,6 @@ u32 msm_readl(const void __iomem *addr);
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
return priv->completed_fence >= fence;
}
static inline int align_pitch(int width, int bpp) static inline int align_pitch(int width, int bpp)
{ {
int bytespp = (bpp + 7) / 8; int bytespp = (bpp + 7) / 8;
...@@ -327,5 +298,20 @@ static inline int align_pitch(int width, int bpp) ...@@ -327,5 +298,20 @@ static inline int align_pitch(int width, int bpp)
/* for conditionally setting boolean flag(s): */ /* for conditionally setting boolean flag(s): */
#define COND(bool, val) ((bool) ? (val) : 0) #define COND(bool, val) ((bool) ? (val) : 0)
static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
{
ktime_t now = ktime_get();
unsigned long remaining_jiffies;
if (ktime_compare(*timeout, now) < 0) {
remaining_jiffies = 0;
} else {
ktime_t rem = ktime_sub(*timeout, now);
struct timespec ts = ktime_to_timespec(rem);
remaining_jiffies = timespec_to_jiffies(&ts);
}
return remaining_jiffies;
}
#endif /* __MSM_DRV_H__ */ #endif /* __MSM_DRV_H__ */
/*
* Copyright (C) 2013-2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/fence.h>
#include "msm_drv.h"
#include "msm_fence.h"
struct msm_fence_context *
msm_fence_context_alloc(struct drm_device *dev, const char *name)
{
struct msm_fence_context *fctx;
fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return ERR_PTR(-ENOMEM);
fctx->dev = dev;
fctx->name = name;
fctx->context = fence_context_alloc(1);
init_waitqueue_head(&fctx->event);
spin_lock_init(&fctx->spinlock);
return fctx;
}
void msm_fence_context_free(struct msm_fence_context *fctx)
{
kfree(fctx);
}
static inline bool fence_completed(struct msm_fence_context *fctx, uint32_t fence)
{
return (int32_t)(fctx->completed_fence - fence) >= 0;
}
/* legacy path for WAIT_FENCE ioctl: */
int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
ktime_t *timeout, bool interruptible)
{
int ret;
if (fence > fctx->last_fence) {
DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
fctx->name, fence, fctx->last_fence);
return -EINVAL;
}
if (!timeout) {
/* no-wait: */
ret = fence_completed(fctx, fence) ? 0 : -EBUSY;
} else {
unsigned long remaining_jiffies = timeout_to_jiffies(timeout);
if (interruptible)
ret = wait_event_interruptible_timeout(fctx->event,
fence_completed(fctx, fence),
remaining_jiffies);
else
ret = wait_event_timeout(fctx->event,
fence_completed(fctx, fence),
remaining_jiffies);
if (ret == 0) {
DBG("timeout waiting for fence: %u (completed: %u)",
fence, fctx->completed_fence);
ret = -ETIMEDOUT;
} else if (ret != -ERESTARTSYS) {
ret = 0;
}
}
return ret;
}
/* called from workqueue */
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
{
spin_lock(&fctx->spinlock);
fctx->completed_fence = max(fence, fctx->completed_fence);
spin_unlock(&fctx->spinlock);
wake_up_all(&fctx->event);
}
struct msm_fence {
struct msm_fence_context *fctx;
struct fence base;
};
static inline struct msm_fence *to_msm_fence(struct fence *fence)
{
return container_of(fence, struct msm_fence, base);
}
static const char *msm_fence_get_driver_name(struct fence *fence)
{
return "msm";
}
static const char *msm_fence_get_timeline_name(struct fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
return f->fctx->name;
}
static bool msm_fence_enable_signaling(struct fence *fence)
{
return true;
}
static bool msm_fence_signaled(struct fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
return fence_completed(f->fctx, f->base.seqno);
}
static void msm_fence_release(struct fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
kfree_rcu(f, base.rcu);
}
static const struct fence_ops msm_fence_ops = {
.get_driver_name = msm_fence_get_driver_name,
.get_timeline_name = msm_fence_get_timeline_name,
.enable_signaling = msm_fence_enable_signaling,
.signaled = msm_fence_signaled,
.wait = fence_default_wait,
.release = msm_fence_release,
};
struct fence *
msm_fence_alloc(struct msm_fence_context *fctx)
{
struct msm_fence *f;
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
return ERR_PTR(-ENOMEM);
f->fctx = fctx;
fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
fctx->context, ++fctx->last_fence);
return &f->base;
}
/*
* Copyright (C) 2013-2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_FENCE_H__
#define __MSM_FENCE_H__
#include "msm_drv.h"
struct msm_fence_context {
struct drm_device *dev;
const char *name;
unsigned context;
/* last_fence == completed_fence --> no pending work */
uint32_t last_fence; /* last assigned fence */
uint32_t completed_fence; /* last completed fence */
wait_queue_head_t event;
spinlock_t spinlock;
};
struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev,
const char *name);
void msm_fence_context_free(struct msm_fence_context *fctx);
int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
ktime_t *timeout, bool interruptible);
int msm_queue_fence_cb(struct msm_fence_context *fctx,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
struct fence * msm_fence_alloc(struct msm_fence_context *fctx);
#endif
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include "msm_drv.h" #include "msm_drv.h"
#include "msm_fence.h"
#include "msm_gem.h" #include "msm_gem.h"
#include "msm_gpu.h" #include "msm_gpu.h"
#include "msm_mmu.h" #include "msm_mmu.h"
...@@ -410,27 +411,62 @@ void *msm_gem_vaddr(struct drm_gem_object *obj) ...@@ -410,27 +411,62 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
return ret; return ret;
} }
/* setup callback for when bo is no longer busy.. /* must be called before _move_to_active().. */
* TODO probably want to differentiate read vs write.. int msm_gem_sync_object(struct drm_gem_object *obj,
*/ struct msm_fence_context *fctx, bool exclusive)
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
struct msm_fence_cb *cb)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
uint32_t fence = msm_gem_fence(msm_obj, struct reservation_object_list *fobj;
MSM_PREP_READ | MSM_PREP_WRITE); struct fence *fence;
return msm_queue_fence_cb(obj->dev, cb, fence); int i, ret;
if (!exclusive) {
/* NOTE: _reserve_shared() must happen before _add_shared_fence(),
* which makes this a slightly strange place to call it. OTOH this
* is a convenient can-fail point to hook it in. (And similar to
* how etnaviv and nouveau handle this.)
*/
ret = reservation_object_reserve_shared(msm_obj->resv);
if (ret)
return ret;
}
fobj = reservation_object_get_list(msm_obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
fence = reservation_object_get_excl(msm_obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
ret = fence_wait(fence, true);
if (ret)
return ret;
}
}
if (!exclusive || !fobj)
return 0;
for (i = 0; i < fobj->shared_count; i++) {
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(msm_obj->resv));
if (fence->context != fctx->context) {
ret = fence_wait(fence, true);
if (ret)
return ret;
}
}
return 0;
} }
void msm_gem_move_to_active(struct drm_gem_object *obj, void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, bool write, uint32_t fence) struct msm_gpu *gpu, bool exclusive, struct fence *fence)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_obj->gpu = gpu; msm_obj->gpu = gpu;
if (write) if (exclusive)
msm_obj->write_fence = fence; reservation_object_add_excl_fence(msm_obj->resv, fence);
else else
msm_obj->read_fence = fence; reservation_object_add_shared_fence(msm_obj->resv, fence);
list_del_init(&msm_obj->mm_list); list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list); list_add_tail(&msm_obj->mm_list, &gpu->active_list);
} }
...@@ -444,30 +480,30 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj) ...@@ -444,30 +480,30 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!mutex_is_locked(&dev->struct_mutex));
msm_obj->gpu = NULL; msm_obj->gpu = NULL;
msm_obj->read_fence = 0;
msm_obj->write_fence = 0;
list_del_init(&msm_obj->mm_list); list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list); list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
} }
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{ {
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0; bool write = !!(op & MSM_PREP_WRITE);
if (is_active(msm_obj)) { if (op & MSM_PREP_NOSYNC) {
uint32_t fence = msm_gem_fence(msm_obj, op); if (!reservation_object_test_signaled_rcu(msm_obj->resv, write))
return -EBUSY;
if (op & MSM_PREP_NOSYNC) } else {
timeout = NULL; int ret;
ret = msm_wait_fence(dev, fence, timeout, true); ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
true, timeout_to_jiffies(timeout));
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
} }
/* TODO cache maintenance */ /* TODO cache maintenance */
return ret; return 0;
} }
int msm_gem_cpu_fini(struct drm_gem_object *obj) int msm_gem_cpu_fini(struct drm_gem_object *obj)
...@@ -477,18 +513,46 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj) ...@@ -477,18 +513,46 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
} }
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
static void describe_fence(struct fence *fence, const char *type,
struct seq_file *m)
{
if (!fence_is_signaled(fence))
seq_printf(m, "\t%9s: %s %s seq %u\n", type,
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
fence->seqno);
}
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{ {
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object *robj = msm_obj->resv;
struct reservation_object_list *fobj;
struct fence *fence;
uint64_t off = drm_vma_node_start(&obj->vma_node); uint64_t off = drm_vma_node_start(&obj->vma_node);
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n",
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
msm_obj->read_fence, msm_obj->write_fence,
obj->name, obj->refcount.refcount.counter, obj->name, obj->refcount.refcount.counter,
off, msm_obj->vaddr, obj->size); off, msm_obj->vaddr, obj->size);
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
if (fobj) {
unsigned int i, shared_count = fobj->shared_count;
for (i = 0; i < shared_count; i++) {
fence = rcu_dereference(fobj->shared[i]);
describe_fence(fence, "Shared", m);
}
}
fence = rcu_dereference(robj->fence_excl);
if (fence)
describe_fence(fence, "Exclusive", m);
rcu_read_unlock();
} }
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
...@@ -583,6 +647,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, ...@@ -583,6 +647,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
static int msm_gem_new_impl(struct drm_device *dev, static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags, uint32_t size, uint32_t flags,
struct reservation_object *resv,
struct drm_gem_object **obj) struct drm_gem_object **obj)
{ {
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
...@@ -622,8 +687,12 @@ static int msm_gem_new_impl(struct drm_device *dev, ...@@ -622,8 +687,12 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->flags = flags; msm_obj->flags = flags;
if (resv) {
msm_obj->resv = resv;
} else {
msm_obj->resv = &msm_obj->_resv; msm_obj->resv = &msm_obj->_resv;
reservation_object_init(msm_obj->resv); reservation_object_init(msm_obj->resv);
}
INIT_LIST_HEAD(&msm_obj->submit_entry); INIT_LIST_HEAD(&msm_obj->submit_entry);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list); list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
...@@ -643,7 +712,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, ...@@ -643,7 +712,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
ret = msm_gem_new_impl(dev, size, flags, &obj); ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
if (ret) if (ret)
goto fail; goto fail;
...@@ -665,10 +734,11 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, ...@@ -665,10 +734,11 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
} }
struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct drm_gem_object *msm_gem_import(struct drm_device *dev,
uint32_t size, struct sg_table *sgt) struct dma_buf *dmabuf, struct sg_table *sgt)
{ {
struct msm_gem_object *msm_obj; struct msm_gem_object *msm_obj;
struct drm_gem_object *obj; struct drm_gem_object *obj;
uint32_t size;
int ret, npages; int ret, npages;
/* if we don't have IOMMU, don't bother pretending we can import: */ /* if we don't have IOMMU, don't bother pretending we can import: */
...@@ -677,9 +747,9 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, ...@@ -677,9 +747,9 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
size = PAGE_ALIGN(size); size = PAGE_ALIGN(dmabuf->size);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
if (ret) if (ret)
goto fail; goto fail;
......
...@@ -39,7 +39,6 @@ struct msm_gem_object { ...@@ -39,7 +39,6 @@ struct msm_gem_object {
*/ */
struct list_head mm_list; struct list_head mm_list;
struct msm_gpu *gpu; /* non-null if active */ struct msm_gpu *gpu; /* non-null if active */
uint32_t read_fence, write_fence;
/* Transiently in the process of submit ioctl, objects associated /* Transiently in the process of submit ioctl, objects associated
* with the submit are on submit->bo_list.. this only lasts for * with the submit are on submit->bo_list.. this only lasts for
...@@ -73,19 +72,6 @@ static inline bool is_active(struct msm_gem_object *msm_obj) ...@@ -73,19 +72,6 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
return msm_obj->gpu != NULL; return msm_obj->gpu != NULL;
} }
static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
uint32_t op)
{
uint32_t fence = 0;
if (op & MSM_PREP_READ)
fence = msm_obj->write_fence;
if (op & MSM_PREP_WRITE)
fence = max(fence, msm_obj->read_fence);
return fence;
}
#define MAX_CMDS 4 #define MAX_CMDS 4
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
...@@ -99,8 +85,9 @@ struct msm_gem_submit { ...@@ -99,8 +85,9 @@ struct msm_gem_submit {
struct list_head node; /* node in gpu submit_list */ struct list_head node; /* node in gpu submit_list */
struct list_head bo_list; struct list_head bo_list;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
uint32_t fence; struct fence *fence;
bool valid; struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */
unsigned int nr_cmds; unsigned int nr_cmds;
unsigned int nr_bos; unsigned int nr_bos;
struct { struct {
......
...@@ -55,7 +55,7 @@ int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) ...@@ -55,7 +55,7 @@ int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg) struct dma_buf_attachment *attach, struct sg_table *sg)
{ {
return msm_gem_import(dev, attach->dmabuf->size, sg); return msm_gem_import(dev, attach->dmabuf, sg);
} }
int msm_gem_prime_pin(struct drm_gem_object *obj) int msm_gem_prime_pin(struct drm_gem_object *obj)
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
*/ */
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */ /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
#define BO_VALID 0x8000 #define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
#define BO_LOCKED 0x4000 #define BO_LOCKED 0x4000
#define BO_PINNED 0x2000 #define BO_PINNED 0x2000
...@@ -40,9 +40,12 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, ...@@ -40,9 +40,12 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0])); int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (submit) { if (!submit)
return NULL;
submit->dev = dev; submit->dev = dev;
submit->gpu = gpu; submit->gpu = gpu;
submit->pid = get_pid(task_pid(current));
/* initially, until copy_from_user() and bo lookup succeeds: */ /* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0; submit->nr_bos = 0;
...@@ -50,11 +53,18 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, ...@@ -50,11 +53,18 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
INIT_LIST_HEAD(&submit->bo_list); INIT_LIST_HEAD(&submit->bo_list);
ww_acquire_init(&submit->ticket, &reservation_ww_class); ww_acquire_init(&submit->ticket, &reservation_ww_class);
}
return submit; return submit;
} }
void msm_gem_submit_free(struct msm_gem_submit *submit)
{
fence_put(submit->fence);
list_del(&submit->node);
put_pid(submit->pid);
kfree(submit);
}
static int submit_lookup_objects(struct msm_gem_submit *submit, static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file) struct drm_msm_gem_submit *args, struct drm_file *file)
{ {
...@@ -136,16 +146,13 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) ...@@ -136,16 +146,13 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
} }
/* This is where we make sure all the bo's are reserved and pin'd: */ /* This is where we make sure all the bo's are reserved and pin'd: */
static int submit_validate_objects(struct msm_gem_submit *submit) static int submit_lock_objects(struct msm_gem_submit *submit)
{ {
int contended, slow_locked = -1, i, ret = 0; int contended, slow_locked = -1, i, ret = 0;
retry: retry:
submit->valid = true;
for (i = 0; i < submit->nr_bos; i++) { for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj; struct msm_gem_object *msm_obj = submit->bos[i].obj;
uint32_t iova;
if (slow_locked == i) if (slow_locked == i)
slow_locked = -1; slow_locked = -1;
...@@ -159,30 +166,6 @@ static int submit_validate_objects(struct msm_gem_submit *submit) ...@@ -159,30 +166,6 @@ static int submit_validate_objects(struct msm_gem_submit *submit)
goto fail; goto fail;
submit->bos[i].flags |= BO_LOCKED; submit->bos[i].flags |= BO_LOCKED;
} }
/* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base,
submit->gpu->id, &iova);
/* this would break the logic in the fail path.. there is no
* reason for this to happen, but just to be on the safe side
* let's notice if this starts happening in the future:
*/
WARN_ON(ret == -EDEADLK);
if (ret)
goto fail;
submit->bos[i].flags |= BO_PINNED;
if (iova == submit->bos[i].iova) {
submit->bos[i].flags |= BO_VALID;
} else {
submit->bos[i].iova = iova;
submit->bos[i].flags &= ~BO_VALID;
submit->valid = false;
}
} }
ww_acquire_done(&submit->ticket); ww_acquire_done(&submit->ticket);
...@@ -211,6 +194,54 @@ static int submit_validate_objects(struct msm_gem_submit *submit) ...@@ -211,6 +194,54 @@ static int submit_validate_objects(struct msm_gem_submit *submit)
return ret; return ret;
} }
static int submit_fence_sync(struct msm_gem_submit *submit)
{
int i, ret = 0;
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
if (ret)
break;
}
return ret;
}
static int submit_pin_objects(struct msm_gem_submit *submit)
{
int i, ret = 0;
submit->valid = true;
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
uint32_t iova;
/* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base,
submit->gpu->id, &iova);
if (ret)
break;
submit->bos[i].flags |= BO_PINNED;
if (iova == submit->bos[i].iova) {
submit->bos[i].flags |= BO_VALID;
} else {
submit->bos[i].iova = iova;
/* iova changed, so address in cmdstream is not valid: */
submit->bos[i].flags &= ~BO_VALID;
submit->valid = false;
}
}
return ret;
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
struct msm_gem_object **obj, uint32_t *iova, bool *valid) struct msm_gem_object **obj, uint32_t *iova, bool *valid)
{ {
...@@ -302,7 +333,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob ...@@ -302,7 +333,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
return 0; return 0;
} }
static void submit_cleanup(struct msm_gem_submit *submit, bool fail) static void submit_cleanup(struct msm_gem_submit *submit)
{ {
unsigned i; unsigned i;
...@@ -349,7 +380,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -349,7 +380,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret) if (ret)
goto out; goto out;
ret = submit_validate_objects(submit); ret = submit_lock_objects(submit);
if (ret)
goto out;
ret = submit_fence_sync(submit);
if (ret)
goto out;
ret = submit_pin_objects(submit);
if (ret) if (ret)
goto out; goto out;
...@@ -415,10 +454,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -415,10 +454,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = msm_gpu_submit(gpu, submit, ctx); ret = msm_gpu_submit(gpu, submit, ctx);
args->fence = submit->fence; args->fence = submit->fence->seqno;
out: out:
submit_cleanup(submit, !!ret); submit_cleanup(submit);
if (ret)
msm_gem_submit_free(submit);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
} }
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "msm_gpu.h" #include "msm_gpu.h"
#include "msm_gem.h" #include "msm_gem.h"
#include "msm_mmu.h" #include "msm_mmu.h"
#include "msm_fence.h"
/* /*
...@@ -265,22 +266,38 @@ static void inactive_start(struct msm_gpu *gpu) ...@@ -265,22 +266,38 @@ static void inactive_start(struct msm_gpu *gpu)
* Hangcheck detection for locked gpu: * Hangcheck detection for locked gpu:
*/ */
static void retire_submits(struct msm_gpu *gpu, uint32_t fence); static void retire_submits(struct msm_gpu *gpu);
static void recover_worker(struct work_struct *work) static void recover_worker(struct work_struct *work)
{ {
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
struct drm_device *dev = gpu->dev; struct drm_device *dev = gpu->dev;
struct msm_gem_submit *submit;
uint32_t fence = gpu->funcs->last_fence(gpu);
dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); msm_update_fence(gpu->fctx, fence + 1);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (msm_gpu_active(gpu)) {
struct msm_gem_submit *submit;
uint32_t fence = gpu->funcs->last_fence(gpu);
dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
list_for_each_entry(submit, &gpu->submit_list, node) {
if (submit->fence->seqno == (fence + 1)) {
struct task_struct *task;
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
dev_err(dev->dev, "%s: offending task: %s\n",
gpu->name, task->comm);
}
rcu_read_unlock();
break;
}
}
if (msm_gpu_active(gpu)) {
/* retire completed submits, plus the one that hung: */ /* retire completed submits, plus the one that hung: */
retire_submits(gpu, fence + 1); retire_submits(gpu);
inactive_cancel(gpu); inactive_cancel(gpu);
gpu->funcs->recover(gpu); gpu->funcs->recover(gpu);
...@@ -290,6 +307,7 @@ static void recover_worker(struct work_struct *work) ...@@ -290,6 +307,7 @@ static void recover_worker(struct work_struct *work)
gpu->funcs->submit(gpu, submit, NULL); gpu->funcs->submit(gpu, submit, NULL);
} }
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
msm_gpu_retire(gpu); msm_gpu_retire(gpu);
...@@ -312,7 +330,7 @@ static void hangcheck_handler(unsigned long data) ...@@ -312,7 +330,7 @@ static void hangcheck_handler(unsigned long data)
if (fence != gpu->hangcheck_fence) { if (fence != gpu->hangcheck_fence) {
/* some progress has been made.. ya! */ /* some progress has been made.. ya! */
gpu->hangcheck_fence = fence; gpu->hangcheck_fence = fence;
} else if (fence < gpu->submitted_fence) { } else if (fence < gpu->fctx->last_fence) {
/* no progress and not done.. hung! */ /* no progress and not done.. hung! */
gpu->hangcheck_fence = fence; gpu->hangcheck_fence = fence;
dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n", dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
...@@ -320,12 +338,12 @@ static void hangcheck_handler(unsigned long data) ...@@ -320,12 +338,12 @@ static void hangcheck_handler(unsigned long data)
dev_err(dev->dev, "%s: completed fence: %u\n", dev_err(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence); gpu->name, fence);
dev_err(dev->dev, "%s: submitted fence: %u\n", dev_err(dev->dev, "%s: submitted fence: %u\n",
gpu->name, gpu->submitted_fence); gpu->name, gpu->fctx->last_fence);
queue_work(priv->wq, &gpu->recover_work); queue_work(priv->wq, &gpu->recover_work);
} }
/* if still more pending work, reset the hangcheck timer: */ /* if still more pending work, reset the hangcheck timer: */
if (gpu->submitted_fence > gpu->hangcheck_fence) if (gpu->fctx->last_fence > gpu->hangcheck_fence)
hangcheck_timer_reset(gpu); hangcheck_timer_reset(gpu);
/* workaround for missing irq: */ /* workaround for missing irq: */
...@@ -431,7 +449,22 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, ...@@ -431,7 +449,22 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
* Cmdstream submission/retirement: * Cmdstream submission/retirement:
*/ */
static void retire_submits(struct msm_gpu *gpu, uint32_t fence) static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
int i;
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
msm_gem_put_iova(&msm_obj->base, gpu->id);
drm_gem_object_unreference(&msm_obj->base);
}
msm_gem_submit_free(submit);
}
static void retire_submits(struct msm_gpu *gpu)
{ {
struct drm_device *dev = gpu->dev; struct drm_device *dev = gpu->dev;
...@@ -443,9 +476,8 @@ static void retire_submits(struct msm_gpu *gpu, uint32_t fence) ...@@ -443,9 +476,8 @@ static void retire_submits(struct msm_gpu *gpu, uint32_t fence)
submit = list_first_entry(&gpu->submit_list, submit = list_first_entry(&gpu->submit_list,
struct msm_gem_submit, node); struct msm_gem_submit, node);
if (submit->fence <= fence) { if (fence_is_signaled(submit->fence)) {
list_del(&submit->node); retire_submit(gpu, submit);
kfree(submit);
} else { } else {
break; break;
} }
...@@ -458,29 +490,10 @@ static void retire_worker(struct work_struct *work) ...@@ -458,29 +490,10 @@ static void retire_worker(struct work_struct *work)
struct drm_device *dev = gpu->dev; struct drm_device *dev = gpu->dev;
uint32_t fence = gpu->funcs->last_fence(gpu); uint32_t fence = gpu->funcs->last_fence(gpu);
msm_update_fence(gpu->dev, fence); msm_update_fence(gpu->fctx, fence);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
retire_submits(gpu);
retire_submits(gpu, fence);
while (!list_empty(&gpu->active_list)) {
struct msm_gem_object *obj;
obj = list_first_entry(&gpu->active_list,
struct msm_gem_object, mm_list);
if ((obj->read_fence <= fence) &&
(obj->write_fence <= fence)) {
/* move to inactive: */
msm_gem_move_to_inactive(&obj->base);
msm_gem_put_iova(&obj->base, gpu->id);
drm_gem_object_unreference(&obj->base);
} else {
break;
}
}
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (!msm_gpu_active(gpu)) if (!msm_gpu_active(gpu))
...@@ -505,9 +518,12 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -505,9 +518,12 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!mutex_is_locked(&dev->struct_mutex));
submit->fence = ++priv->next_fence; submit->fence = msm_fence_alloc(gpu->fctx);
if (IS_ERR(submit->fence)) {
gpu->submitted_fence = submit->fence; ret = PTR_ERR(submit->fence);
submit->fence = NULL;
return ret;
}
inactive_cancel(gpu); inactive_cancel(gpu);
...@@ -515,40 +531,34 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -515,40 +531,34 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_rd_dump_submit(submit); msm_rd_dump_submit(submit);
gpu->submitted_fence = submit->fence;
update_sw_cntrs(gpu); update_sw_cntrs(gpu);
for (i = 0; i < submit->nr_bos; i++) { for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj; struct msm_gem_object *msm_obj = submit->bos[i].obj;
uint32_t iova;
/* can't happen yet.. but when we add 2d support we'll have /* can't happen yet.. but when we add 2d support we'll have
* to deal w/ cross-ring synchronization: * to deal w/ cross-ring synchronization:
*/ */
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
if (!is_active(msm_obj)) { /* submit takes a reference to the bo and iova until retired: */
uint32_t iova;
/* ring takes a reference to the bo and iova: */
drm_gem_object_reference(&msm_obj->base); drm_gem_object_reference(&msm_obj->base);
msm_gem_get_iova_locked(&msm_obj->base, msm_gem_get_iova_locked(&msm_obj->base,
submit->gpu->id, &iova); submit->gpu->id, &iova);
}
if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
} }
ret = gpu->funcs->submit(gpu, submit, ctx); gpu->funcs->submit(gpu, submit, ctx);
priv->lastctx = ctx; priv->lastctx = ctx;
hangcheck_timer_reset(gpu); hangcheck_timer_reset(gpu);
return ret; return 0;
} }
/* /*
...@@ -580,6 +590,12 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -580,6 +590,12 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->funcs = funcs; gpu->funcs = funcs;
gpu->name = name; gpu->name = name;
gpu->inactive = true; gpu->inactive = true;
gpu->fctx = msm_fence_context_alloc(drm, name);
if (IS_ERR(gpu->fctx)) {
ret = PTR_ERR(gpu->fctx);
gpu->fctx = NULL;
goto fail;
}
INIT_LIST_HEAD(&gpu->active_list); INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker); INIT_WORK(&gpu->retire_work, retire_worker);
...@@ -700,4 +716,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) ...@@ -700,4 +716,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
if (gpu->mmu) if (gpu->mmu)
gpu->mmu->funcs->destroy(gpu->mmu); gpu->mmu->funcs->destroy(gpu->mmu);
if (gpu->fctx)
msm_fence_context_free(gpu->fctx);
} }
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/regulator/consumer.h> #include <linux/regulator/consumer.h>
#include "msm_drv.h" #include "msm_drv.h"
#include "msm_fence.h"
#include "msm_ringbuffer.h" #include "msm_ringbuffer.h"
struct msm_gem_submit; struct msm_gem_submit;
...@@ -46,7 +47,7 @@ struct msm_gpu_funcs { ...@@ -46,7 +47,7 @@ struct msm_gpu_funcs {
int (*hw_init)(struct msm_gpu *gpu); int (*hw_init)(struct msm_gpu *gpu);
int (*pm_suspend)(struct msm_gpu *gpu); int (*pm_suspend)(struct msm_gpu *gpu);
int (*pm_resume)(struct msm_gpu *gpu); int (*pm_resume)(struct msm_gpu *gpu);
int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit, void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx); struct msm_file_private *ctx);
void (*flush)(struct msm_gpu *gpu); void (*flush)(struct msm_gpu *gpu);
void (*idle)(struct msm_gpu *gpu); void (*idle)(struct msm_gpu *gpu);
...@@ -77,13 +78,15 @@ struct msm_gpu { ...@@ -77,13 +78,15 @@ struct msm_gpu {
const struct msm_gpu_perfcntr *perfcntrs; const struct msm_gpu_perfcntr *perfcntrs;
uint32_t num_perfcntrs; uint32_t num_perfcntrs;
/* ringbuffer: */
struct msm_ringbuffer *rb; struct msm_ringbuffer *rb;
uint32_t rb_iova; uint32_t rb_iova;
/* list of GEM active objects: */ /* list of GEM active objects: */
struct list_head active_list; struct list_head active_list;
uint32_t submitted_fence; /* fencing: */
struct msm_fence_context *fctx;
/* is gpu powered/active? */ /* is gpu powered/active? */
int active_cnt; int active_cnt;
...@@ -125,7 +128,7 @@ struct msm_gpu { ...@@ -125,7 +128,7 @@ struct msm_gpu {
static inline bool msm_gpu_active(struct msm_gpu *gpu) static inline bool msm_gpu_active(struct msm_gpu *gpu)
{ {
return gpu->submitted_fence > gpu->funcs->last_fence(gpu); return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu);
} }
/* Perf-Counters: /* Perf-Counters:
......
...@@ -296,7 +296,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) ...@@ -296,7 +296,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u", n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
TASK_COMM_LEN, current->comm, task_pid_nr(current), TASK_COMM_LEN, current->comm, task_pid_nr(current),
submit->fence); submit->fence->seqno);
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
......
...@@ -120,6 +120,24 @@ reservation_object_get_excl(struct reservation_object *obj) ...@@ -120,6 +120,24 @@ reservation_object_get_excl(struct reservation_object *obj)
reservation_object_held(obj)); reservation_object_held(obj));
} }
static inline struct fence *
reservation_object_get_excl_rcu(struct reservation_object *obj)
{
struct fence *fence;
unsigned seq;
retry:
seq = read_seqcount_begin(&obj->seq);
rcu_read_lock();
fence = rcu_dereference(obj->fence_excl);
if (read_seqcount_retry(&obj->seq, seq)) {
rcu_read_unlock();
goto retry;
}
fence = fence_get(fence);
rcu_read_unlock();
return fence;
}
int reservation_object_reserve_shared(struct reservation_object *obj); int reservation_object_reserve_shared(struct reservation_object *obj);
void reservation_object_add_shared_fence(struct reservation_object *obj, void reservation_object_add_shared_fence(struct reservation_object *obj,
struct fence *fence); struct fence *fence);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment