Commit 148fb2e2 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-5.1' of git://people.freedesktop.org/~agd5f/linux into drm-next

ttm:
- Replace ref/unref naming with get/put

amdgpu:
- Revert DC clang fix, causes a segfault with some compiler versions
- SR-IOV fix
- PCIE fix for vega20
- Misc DC fixes
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190201062345.7304-1-alexander.deucher@amd.com
parents 37fdaa33 47dd8048
......@@ -91,10 +91,6 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
adev->gmc.xgmi.node_id,
adev->gmc.xgmi.hive_id, ret);
else
dev_info(adev->dev, "XGMI: Set topology for node %d, hive 0x%llx.\n",
adev->gmc.xgmi.physical_node_id,
adev->gmc.xgmi.hive_id);
return ret;
}
......@@ -160,6 +156,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
break;
}
dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
mutex_unlock(&hive->hive_lock);
exit:
return ret;
......
......@@ -965,7 +965,11 @@ static int gmc_v9_0_sw_init(void *handle)
* vm size is 256TB (48bit), maximum size of Vega10,
* block size 512 (9bit)
*/
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
if (amdgpu_sriov_vf(adev))
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
else
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
break;
default:
break;
......
......@@ -4658,8 +4658,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
flip = kzalloc(sizeof(*flip), GFP_KERNEL);
full = kzalloc(sizeof(*full), GFP_KERNEL);
if (!flip || !full)
if (!flip || !full) {
dm_error("Failed to allocate update bundles\n");
goto cleanup;
}
/* update planes when needed */
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
......@@ -4883,6 +4885,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_state);
mutex_unlock(&dm->dc_lock);
}
cleanup:
kfree(flip);
kfree(full);
}
/*
......@@ -4917,10 +4923,25 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
if (drm_atomic_crtc_needs_modeset(new_crtc_state)
&& dm_old_crtc_state->stream) {
/*
* If the stream is removed and CRC capture was
* enabled on the CRTC the extra vblank reference
* needs to be dropped since CRC capture will be
* disabled.
*/
if (!dm_new_crtc_state->stream
&& dm_new_crtc_state->crc_enabled) {
drm_crtc_vblank_put(crtc);
dm_new_crtc_state->crc_enabled = false;
}
manage_dm_interrupts(adev, acrtc, false);
}
}
/*
* Add check here for SoC's that support hardware cursor plane, to
......@@ -5152,6 +5173,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
continue;
manage_dm_interrupts(adev, acrtc, true);
/* The stream has changed so CRC capture needs to re-enabled. */
if (dm_new_crtc_state->crc_enabled)
amdgpu_dm_crtc_set_crc_source(crtc, "auto");
}
/* update planes when needed per crtc*/
......
......@@ -64,8 +64,10 @@ amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
{
struct amdgpu_device *adev = crtc->dev->dev_private;
struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
struct dc_stream_state *stream_state = crtc_state->stream;
bool enable;
enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
......@@ -80,29 +82,33 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
return -EINVAL;
}
/* When enabling CRC, we should also disable dithering. */
if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
if (dc_stream_configure_crc(stream_state->ctx->dc,
stream_state,
true, true)) {
crtc_state->crc_enabled = true;
dc_stream_set_dither_option(stream_state,
DITHER_OPTION_TRUN8);
}
else
return -EINVAL;
} else {
if (dc_stream_configure_crc(stream_state->ctx->dc,
stream_state,
false, false)) {
crtc_state->crc_enabled = false;
dc_stream_set_dither_option(stream_state,
DITHER_OPTION_DEFAULT);
}
else
return -EINVAL;
enable = (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO);
mutex_lock(&adev->dm.dc_lock);
if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
enable, enable)) {
mutex_unlock(&adev->dm.dc_lock);
return -EINVAL;
}
/* When enabling CRC, we should also disable dithering. */
dc_stream_set_dither_option(stream_state,
enable ? DITHER_OPTION_TRUN8
: DITHER_OPTION_DEFAULT);
mutex_unlock(&adev->dm.dc_lock);
/*
* Reading the CRC requires the vblank interrupt handler to be
* enabled. Keep a reference until CRC capture stops.
*/
if (!crtc_state->crc_enabled && enable)
drm_crtc_vblank_get(crtc);
else if (crtc_state->crc_enabled && !enable)
drm_crtc_vblank_put(crtc);
crtc_state->crc_enabled = enable;
/* Reset crc_skipped on dm state */
crtc_state->crc_skip_count = 0;
return 0;
......
......@@ -263,6 +263,13 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
return true;
}
/*
* poll pending down reply before clear payload allocation table
*/
void dm_helpers_dp_mst_poll_pending_down_reply(
struct dc_context *ctx,
const struct dc_link *link)
{}
/*
* Clear payload allocation table before enable MST DP link.
......
......@@ -30,7 +30,7 @@ else ifneq ($(call cc-option, -mstack-alignment=16),)
cc_stack_align := -mstack-alignment=16
endif
calcs_ccflags := -mhard-float -msse -msse2 $(cc_stack_align)
calcs_ccflags := -mhard-float -msse $(cc_stack_align)
CFLAGS_dcn_calcs.o := $(calcs_ccflags)
CFLAGS_dcn_calc_auto.o := $(calcs_ccflags)
......
......@@ -1463,11 +1463,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->adjust->v_total_min,
stream_update->adjust->v_total_max);
if (stream_update->periodic_fn_vsync_delta &&
pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
if (stream_update->vline0_config && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
pipe_ctx->stream->periodic_fn_vsync_delta);
pipe_ctx->stream_res.tg, VLINE0, stream->vline0_config);
if (stream_update->vline1_config && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
pipe_ctx->stream_res.tg, VLINE1, stream->vline1_config);
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
stream_update->vrr_infopacket ||
......
......@@ -1467,6 +1467,11 @@ static enum dc_status enable_link_dp_mst(
if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
return DC_OK;
/* to make sure the pending down rep can be processed
* before clear payload table
*/
dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link);
/* clear payload table */
dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
......
......@@ -45,6 +45,11 @@ struct freesync_context {
bool dummy;
};
struct vline_config {
unsigned int start_line;
unsigned int end_line;
};
struct dc_stream_state {
// sink is deprecated, new code should not reference
// this pointer
......@@ -85,8 +90,6 @@ struct dc_stream_state {
uint8_t qs_bit;
uint8_t qy_bit;
unsigned long long periodic_fn_vsync_delta;
/* TODO: custom INFO packets */
/* TODO: ABM info (DMCU) */
/* PSR info */
......@@ -96,6 +99,9 @@ struct dc_stream_state {
/* DMCU info */
unsigned int abm_level;
struct vline_config vline0_config;
struct vline_config vline1_config;
/* from core_stream struct */
struct dc_context *ctx;
......@@ -143,7 +149,9 @@ struct dc_stream_update {
struct dc_info_packet *hdr_static_metadata;
unsigned int *abm_level;
unsigned long long *periodic_fn_vsync_delta;
struct vline_config *vline0_config;
struct vline_config *vline1_config;
struct dc_crtc_timing_adjust *adjust;
struct dc_info_packet *vrr_infopacket;
struct dc_info_packet *vsc_infopacket;
......
......@@ -92,68 +92,26 @@ static void optc1_disable_stereo(struct timing_generator *optc)
OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
}
static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing)
{
struct dc_crtc_timing patched_crtc_timing;
int vesa_sync_start;
int asic_blank_end;
int vertical_line_start;
patched_crtc_timing = *dc_crtc_timing;
optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
vesa_sync_start = patched_crtc_timing.v_addressable +
patched_crtc_timing.v_border_bottom +
patched_crtc_timing.v_front_porch;
asic_blank_end = (patched_crtc_timing.v_total -
vesa_sync_start -
patched_crtc_timing.v_border_top);
vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
if (vertical_line_start < 0)
vertical_line_start = 0;
return vertical_line_start;
}
void optc1_program_vline_interrupt(
struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing,
unsigned long long vsync_delta)
enum vline_select vline,
struct vline_config vline_config)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
unsigned long long req_delta_tens_of_usec = div64_u64((vsync_delta + 9999), 10000);
unsigned long long pix_clk_hundreds_khz = div64_u64((dc_crtc_timing->pix_clk_100hz + 999), 1000);
uint32_t req_delta_lines = (uint32_t) div64_u64(
(req_delta_tens_of_usec * pix_clk_hundreds_khz + dc_crtc_timing->h_total - 1),
dc_crtc_timing->h_total);
uint32_t vsync_line = get_start_vline(optc, dc_crtc_timing);
uint32_t start_line = 0;
uint32_t end_line = 0;
if (req_delta_lines != 0)
req_delta_lines--;
if (req_delta_lines > vsync_line)
start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) + 2;
else
start_line = vsync_line - req_delta_lines;
end_line = start_line + 2;
if (start_line >= dc_crtc_timing->v_total)
start_line = start_line % dc_crtc_timing->v_total;
if (end_line >= dc_crtc_timing->v_total)
end_line = end_line % dc_crtc_timing->v_total;
REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
OTG_VERTICAL_INTERRUPT0_LINE_START, start_line,
OTG_VERTICAL_INTERRUPT0_LINE_END, end_line);
switch (vline) {
case VLINE0:
REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
OTG_VERTICAL_INTERRUPT0_LINE_START, vline_config.start_line,
OTG_VERTICAL_INTERRUPT0_LINE_END, vline_config.end_line);
break;
case VLINE1:
REG_SET(OTG_VERTICAL_INTERRUPT1_POSITION, 0,
OTG_VERTICAL_INTERRUPT1_LINE_START, vline_config.start_line);
break;
default:
break;
}
}
/**
......
......@@ -67,6 +67,8 @@
SRI(OTG_CLOCK_CONTROL, OTG, inst),\
SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\
SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\
SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\
SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\
SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\
SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\
SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
......@@ -135,6 +137,8 @@ struct dcn_optc_registers {
uint32_t OTG_CLOCK_CONTROL;
uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL;
uint32_t OTG_VERTICAL_INTERRUPT0_POSITION;
uint32_t OTG_VERTICAL_INTERRUPT1_CONTROL;
uint32_t OTG_VERTICAL_INTERRUPT1_POSITION;
uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL;
uint32_t OTG_VERTICAL_INTERRUPT2_POSITION;
uint32_t OPTC_INPUT_CLOCK_CONTROL;
......@@ -227,6 +231,8 @@ struct dcn_optc_registers {
SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\
......@@ -361,6 +367,8 @@ struct dcn_optc_registers {
type OTG_VERTICAL_INTERRUPT0_INT_ENABLE;\
type OTG_VERTICAL_INTERRUPT0_LINE_START;\
type OTG_VERTICAL_INTERRUPT0_LINE_END;\
type OTG_VERTICAL_INTERRUPT1_INT_ENABLE;\
type OTG_VERTICAL_INTERRUPT1_LINE_START;\
type OTG_VERTICAL_INTERRUPT2_INT_ENABLE;\
type OTG_VERTICAL_INTERRUPT2_LINE_START;\
type OPTC_INPUT_CLK_EN;\
......@@ -476,8 +484,8 @@ void optc1_program_timing(
bool use_vbios);
void optc1_program_vline_interrupt(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing,
unsigned long long vsync_delta);
enum vline_select vline,
struct vline_config vline_config);
void optc1_program_global_sync(
struct timing_generator *optc);
......
......@@ -57,6 +57,13 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
struct dp_mst_stream_allocation_table *proposed_table,
bool enable);
/*
* poll pending down reply before clear payload allocation table
*/
void dm_helpers_dp_mst_poll_pending_down_reply(
struct dc_context *ctx,
const struct dc_link *link);
/*
* Clear payload allocation table before enable MST DP link.
*/
......
......@@ -30,7 +30,7 @@ else ifneq ($(call cc-option, -mstack-alignment=16),)
cc_stack_align := -mstack-alignment=16
endif
dml_ccflags := -mhard-float -msse -msse2 $(cc_stack_align)
dml_ccflags := -mhard-float -msse $(cc_stack_align)
CFLAGS_display_mode_lib.o := $(dml_ccflags)
CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
......
......@@ -134,6 +134,15 @@ struct dc_crtc_timing;
struct drr_params;
struct vline_config;
enum vline_select {
VLINE0,
VLINE1,
VLINE2
};
struct timing_generator_funcs {
bool (*validate_timing)(struct timing_generator *tg,
const struct dc_crtc_timing *timing);
......@@ -141,8 +150,8 @@ struct timing_generator_funcs {
const struct dc_crtc_timing *timing,
bool use_vbios);
void (*program_vline_interrupt)(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing,
unsigned long long vsync_delta);
enum vline_select vline,
struct vline_config vline_config);
bool (*enable_crtc)(struct timing_generator *tg);
bool (*disable_crtc)(struct timing_generator *tg);
bool (*is_counter_moving)(struct timing_generator *tg);
......
......@@ -144,6 +144,14 @@ enum dc_irq_source {
DC_IRQ_SOURCE_DC5_VLINE0,
DC_IRQ_SOURCE_DC6_VLINE0,
DC_IRQ_SOURCE_DC1_VLINE1,
DC_IRQ_SOURCE_DC2_VLINE1,
DC_IRQ_SOURCE_DC3_VLINE1,
DC_IRQ_SOURCE_DC4_VLINE1,
DC_IRQ_SOURCE_DC5_VLINE1,
DC_IRQ_SOURCE_DC6_VLINE1,
DAL_IRQ_SOURCES_NUMBER
};
......
......@@ -771,6 +771,47 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t pcie_speed = 0, pcie_width = 0, pcie_arg;
int ret;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
pcie_speed = 16;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
pcie_speed = 8;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
pcie_speed = 5;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
pcie_speed = 2;
if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32)
pcie_width = 32;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
pcie_width = 16;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
pcie_width = 12;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
pcie_width = 8;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
pcie_width = 4;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
pcie_width = 2;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
pcie_width = 1;
pcie_arg = pcie_width | (pcie_speed << 8);
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverridePcieParameters, pcie_arg);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
return 0;
}
static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data =
......@@ -1570,6 +1611,11 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"[EnableDPMTasks] Failed to initialize SMC table!",
return result);
result = vega20_override_pcie_parameters(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"[EnableDPMTasks] Failed to override pcie parameters!",
return result);
result = vega20_run_btc(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"[EnableDPMTasks] Failed to run btc!",
......
......@@ -639,13 +639,9 @@ int ast_dumb_create(struct drm_file *file,
static void ast_bo_unref(struct ast_bo **bo)
{
struct ttm_buffer_object *tbo;
if ((*bo) == NULL)
return;
tbo = &((*bo)->bo);
ttm_bo_unref(&tbo);
ttm_bo_put(&((*bo)->bo));
*bo = NULL;
}
......
......@@ -33,7 +33,7 @@ int mgag200_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj)
{
int ret;
drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
gfb->obj = obj;
ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
......@@ -318,13 +318,9 @@ int mgag200_dumb_create(struct drm_file *file,
static void mgag200_bo_unref(struct mgag200_bo **bo)
{
struct ttm_buffer_object *tbo;
if ((*bo) == NULL)
return;
tbo = &((*bo)->bo);
ttm_bo_unref(&tbo);
ttm_bo_put(&((*bo)->bo));
*bo = NULL;
}
......
......@@ -61,12 +61,14 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
return -EINVAL;
prev = *pnvbo;
*pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
if (prev) {
struct ttm_buffer_object *bo = &prev->bo;
ttm_bo_unref(&bo);
if (ref) {
ttm_bo_get(&ref->bo);
*pnvbo = nouveau_bo(&ref->bo);
} else {
*pnvbo = NULL;
}
if (prev)
ttm_bo_put(&prev->bo);
return 0;
}
......
......@@ -41,7 +41,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
struct device *dev = drm->dev->dev;
int ret;
......@@ -56,7 +55,7 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
/* reset filp so nouveau_bo_del_ttm() can test for it */
gem->filp = NULL;
ttm_bo_unref(&bo);
ttm_bo_put(&nvbo->bo);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
......
......@@ -679,15 +679,6 @@ void ttm_bo_put(struct ttm_buffer_object *bo)
}
EXPORT_SYMBOL(ttm_bo_put);
void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo = *p_bo;
*p_bo = NULL;
ttm_bo_put(bo);
}
EXPORT_SYMBOL(ttm_bo_unref);
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
{
return cancel_delayed_work_sync(&bdev->wq);
......
......@@ -534,7 +534,6 @@ static void vmw_user_bo_release(struct ttm_base_object **p_base)
{
struct vmw_user_buffer_object *vmw_user_bo;
struct ttm_base_object *base = *p_base;
struct ttm_buffer_object *bo;
*p_base = NULL;
......@@ -543,8 +542,7 @@ static void vmw_user_bo_release(struct ttm_base_object **p_base)
vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
prime.base);
bo = &vmw_user_bo->vbo.base;
ttm_bo_unref(&bo);
ttm_bo_put(&vmw_user_bo->vbo.base);
}
......@@ -597,7 +595,6 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
struct ttm_base_object **p_base)
{
struct vmw_user_buffer_object *user_bo;
struct ttm_buffer_object *tmp;
int ret;
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
......@@ -614,7 +611,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
tmp = ttm_bo_reference(&user_bo->vbo.base);
ttm_bo_get(&user_bo->vbo.base);
ret = ttm_prime_object_init(tfile,
size,
&user_bo->prime,
......@@ -623,7 +620,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
&vmw_user_bo_release,
&vmw_user_bo_ref_obj_release);
if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp);
ttm_bo_put(&user_bo->vbo.base);
goto out_no_base_object;
}
......@@ -911,7 +908,7 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile,
vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
prime.base);
(void)ttm_bo_reference(&vmw_user_bo->vbo.base);
ttm_bo_get(&vmw_user_bo->vbo.base);
if (p_base)
*p_base = base;
else
......
......@@ -765,7 +765,7 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
if (info->done)
return true;
memset(info->node, 0, sizeof(*info->node));
spin_lock(&man->lock);
ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
......@@ -1276,8 +1276,10 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
return 0;
out_no_map:
if (man->using_mob)
ttm_bo_unref(&man->cmd_space);
if (man->using_mob) {
ttm_bo_put(man->cmd_space);
man->cmd_space = NULL;
}
return ret;
}
......@@ -1380,7 +1382,8 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
if (man->using_mob) {
(void) ttm_bo_kunmap(&man->map_obj);
ttm_bo_unref(&man->cmd_space);
ttm_bo_put(man->cmd_space);
man->cmd_space = NULL;
} else {
dma_free_coherent(&man->dev_priv->dev->pdev->dev,
man->size, man->map, man->handle);
......
......@@ -1337,18 +1337,15 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
*buf = NULL;
if (tmp_buf != NULL) {
struct ttm_buffer_object *bo = &tmp_buf->base;
ttm_bo_unref(&bo);
ttm_bo_put(&tmp_buf->base);
}
}
static inline struct vmw_buffer_object *
vmw_bo_reference(struct vmw_buffer_object *buf)
{
if (ttm_bo_reference(&buf->base))
return buf;
return NULL;
ttm_bo_get(&buf->base);
return buf;
}
static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
......
......@@ -300,7 +300,8 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
&batch->otables[i]);
}
ttm_bo_unref(&batch->otable_bo);
ttm_bo_put(batch->otable_bo);
batch->otable_bo = NULL;
out_no_bo:
return ret;
}
......@@ -365,7 +366,8 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
ttm_bo_unref(&batch->otable_bo);
ttm_bo_put(batch->otable_bo);
batch->otable_bo = NULL;
}
/*
......@@ -463,7 +465,8 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
out_unreserve:
ttm_bo_unreserve(mob->pt_bo);
ttm_bo_unref(&mob->pt_bo);
ttm_bo_put(mob->pt_bo);
mob->pt_bo = NULL;
return ret;
}
......@@ -580,8 +583,10 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
*/
void vmw_mob_destroy(struct vmw_mob *mob)
{
if (mob->pt_bo)
ttm_bo_unref(&mob->pt_bo);
if (mob->pt_bo) {
ttm_bo_put(mob->pt_bo);
mob->pt_bo = NULL;
}
kfree(mob);
}
......@@ -698,8 +703,10 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
out_no_cmd_space:
vmw_fifo_resource_dec(dev_priv);
if (pt_set_up)
ttm_bo_unref(&mob->pt_bo);
if (pt_set_up) {
ttm_bo_put(mob->pt_bo);
mob->pt_bo = NULL;
}
return -ENOMEM;
}
......@@ -461,7 +461,8 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
}
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
ttm_bo_get(&res->backup->base);
val_buf->bo = &res->backup->base;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
......@@ -484,7 +485,8 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
ttm_bo_unref(&val_buf->bo);
ttm_bo_put(val_buf->bo);
val_buf->bo = NULL;
if (backup_dirty)
vmw_bo_unreference(&res->backup);
......@@ -544,7 +546,8 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list);
ttm_bo_unref(&val_buf->bo);
ttm_bo_put(val_buf->bo);
val_buf->bo = NULL;
}
/**
......
......@@ -628,8 +628,10 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
struct vmw_validation_bo_node *entry;
struct vmw_validation_res_node *val;
list_for_each_entry(entry, &ctx->bo_list, base.head)
ttm_bo_unref(&entry->base.bo);
list_for_each_entry(entry, &ctx->bo_list, base.head) {
ttm_bo_put(entry->base.bo);
entry->base.bo = NULL;
}
list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
list_for_each_entry(val, &ctx->resource_list, head)
......
......@@ -295,23 +295,6 @@ static inline void ttm_bo_get(struct ttm_buffer_object *bo)
kref_get(&bo->kref);
}
/**
* ttm_bo_reference - reference a struct ttm_buffer_object
*
* @bo: The buffer object.
*
* Returns a refcounted pointer to a buffer object.
*
* This function is deprecated. Use @ttm_bo_get instead.
*/
static inline struct ttm_buffer_object *
ttm_bo_reference(struct ttm_buffer_object *bo)
{
ttm_bo_get(bo);
return bo;
}
/**
* ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
* its refcount has already reached zero.
......@@ -386,17 +369,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
void ttm_bo_put(struct ttm_buffer_object *bo);
/**
* ttm_bo_unref
*
* @bo: The buffer object.
*
* Unreference and clear a pointer to a buffer object.
*
* This function is deprecated. Use @ttm_bo_put instead.
*/
void ttm_bo_unref(struct ttm_buffer_object **bo);
/**
* ttm_bo_add_to_lru
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment