amdgpu_smu.c 75.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright 2019 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

23 24
#define SWSMU_CODE_LAYER_L1

25
#include <linux/firmware.h>
26
#include <linux/pci.h>
27

28 29
#include "amdgpu.h"
#include "amdgpu_smu.h"
30
#include "smu_internal.h"
31
#include "atom.h"
32 33
#include "arcturus_ppt.h"
#include "navi10_ppt.h"
34
#include "sienna_cichlid_ppt.h"
35
#include "renoir_ppt.h"
36
#include "vangogh_ppt.h"
37
#include "aldebaran_ppt.h"
38
#include "yellow_carp_ppt.h"
39
#include "cyan_skillfish_ppt.h"
40
#include "smu_v13_0_5_ppt.h"
41
#include "amd_pcie.h"
42

43 44 45 46 47 48 49 50 51 52
/*
 * DO NOT use these for err/warn/info/debug messages.
 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
 * They are more MGPU friendly.
 */
#undef pr_err
#undef pr_warn
#undef pr_info
#undef pr_debug

53
static const struct amd_pm_funcs swsmu_pm_funcs;
54 55 56
static int smu_force_smuclk_levels(struct smu_context *smu,
				   enum smu_clk_type clk_type,
				   uint32_t mask);
57 58
static int smu_handle_task(struct smu_context *smu,
			   enum amd_dpm_forced_level level,
59
			   enum amd_pp_task task_id);
60
static int smu_reset(struct smu_context *smu);
61
static int smu_set_fan_speed_pwm(void *handle, u32 speed);
62
static int smu_set_fan_control_mode(void *handle, u32 value);
63 64 65 66 67 68
static int smu_set_power_limit(void *handle, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);

static int smu_sys_get_pp_feature_mask(void *handle,
				       char *buf)
69
{
70
	struct smu_context *smu = handle;
71

72 73
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
74

75
	return smu_get_pp_feature_mask(smu, buf);
76 77
}

78 79
static int smu_sys_set_pp_feature_mask(void *handle,
				       uint64_t new_mask)
80
{
81
	struct smu_context *smu = handle;
82

83 84
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
85

86
	return smu_set_pp_feature_mask(smu, new_mask);
87 88
}

89
int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
90
{
91 92
	if (!smu->ppt_funcs->get_gfx_off_status)
		return -EINVAL;
93

94
	*value = smu_get_gfx_off_status(smu);
95

96
	return 0;
97 98
}

99 100 101 102
int smu_set_soft_freq_range(struct smu_context *smu,
			    enum smu_clk_type clk_type,
			    uint32_t min,
			    uint32_t max)
103
{
104
	int ret = 0;
105

106 107 108 109 110 111
	if (smu->ppt_funcs->set_soft_freq_limited_range)
		ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
								  clk_type,
								  min,
								  max);

112 113 114
	return ret;
}

115 116 117 118
int smu_get_dpm_freq_range(struct smu_context *smu,
			   enum smu_clk_type clk_type,
			   uint32_t *min,
			   uint32_t *max)
119
{
120
	int ret = -ENOTSUPP;
121 122 123 124

	if (!min && !max)
		return -EINVAL;

125 126 127 128 129 130
	if (smu->ppt_funcs->get_dpm_ultimate_freq)
		ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
							    clk_type,
							    min,
							    max);

131 132 133
	return ret;
}

134
static u32 smu_get_mclk(void *handle, bool low)
135 136 137 138 139 140 141 142 143 144 145 146 147
{
	struct smu_context *smu = handle;
	uint32_t clk_freq;
	int ret = 0;

	ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
				     low ? &clk_freq : NULL,
				     !low ? &clk_freq : NULL);
	if (ret)
		return 0;
	return clk_freq * 100;
}

148
static u32 smu_get_sclk(void *handle, bool low)
149 150 151 152 153 154 155 156 157 158 159 160 161
{
	struct smu_context *smu = handle;
	uint32_t clk_freq;
	int ret = 0;

	ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
				     low ? &clk_freq : NULL,
				     !low ? &clk_freq : NULL);
	if (ret)
		return 0;
	return clk_freq * 100;
}

162 163
static int smu_dpm_set_vcn_enable(struct smu_context *smu,
				  bool enable)
164 165 166 167 168 169 170 171 172
{
	struct smu_power_context *smu_power = &smu->smu_power;
	struct smu_power_gate *power_gate = &smu_power->power_gate;
	int ret = 0;

	if (!smu->ppt_funcs->dpm_set_vcn_enable)
		return 0;

	if (atomic_read(&power_gate->vcn_gated) ^ enable)
173
		return 0;
174 175 176 177 178

	ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
	if (!ret)
		atomic_set(&power_gate->vcn_gated, !enable);

179 180 181
	return ret;
}

182 183
static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
				   bool enable)
184 185 186 187 188 189 190 191 192
{
	struct smu_power_context *smu_power = &smu->smu_power;
	struct smu_power_gate *power_gate = &smu_power->power_gate;
	int ret = 0;

	if (!smu->ppt_funcs->dpm_set_jpeg_enable)
		return 0;

	if (atomic_read(&power_gate->jpeg_gated) ^ enable)
193
		return 0;
194 195 196 197 198

	ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
	if (!ret)
		atomic_set(&power_gate->jpeg_gated, !enable);

199 200 201
	return ret;
}

202 203 204
/**
 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
 *
205
 * @handle:        smu_context pointer
206 207 208 209 210 211 212 213 214 215
 * @block_type: the IP block to power gate/ungate
 * @gate:       to power gate if true, ungate otherwise
 *
 * This API uses no smu->mutex lock protection due to:
 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
 *    This is guarded to be race condition free by the caller.
 * 2. Or get called on user setting request of power_dpm_force_performance_level.
 *    Under this case, the smu->mutex lock protection is already enforced on
 *    the parent API smu_force_performance_level of the call path.
 */
216 217 218
static int smu_dpm_set_power_gate(void *handle,
				  uint32_t block_type,
				  bool gate)
219
{
220
	struct smu_context *smu = handle;
221 222
	int ret = 0;

223 224 225 226
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
		dev_WARN(smu->adev->dev,
			 "SMU uninitialized but power %s requested for %u!\n",
			 gate ? "gate" : "ungate", block_type);
227
		return -EOPNOTSUPP;
228
	}
229

230
	switch (block_type) {
231 232 233 234
	/*
	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
	 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
	 */
235
	case AMD_IP_BLOCK_TYPE_UVD:
236 237
	case AMD_IP_BLOCK_TYPE_VCN:
		ret = smu_dpm_set_vcn_enable(smu, !gate);
238
		if (ret)
239
			dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
240
				gate ? "gate" : "ungate");
241
		break;
242 243
	case AMD_IP_BLOCK_TYPE_GFX:
		ret = smu_gfx_off_control(smu, gate);
244 245 246
		if (ret)
			dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
				gate ? "enable" : "disable");
247
		break;
248 249
	case AMD_IP_BLOCK_TYPE_SDMA:
		ret = smu_powergate_sdma(smu, gate);
250 251 252
		if (ret)
			dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
				gate ? "gate" : "ungate");
253
		break;
254
	case AMD_IP_BLOCK_TYPE_JPEG:
255
		ret = smu_dpm_set_jpeg_enable(smu, !gate);
256 257 258
		if (ret)
			dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
				gate ? "gate" : "ungate");
259
		break;
260
	default:
261 262
		dev_err(smu->adev->dev, "Unsupported block type!\n");
		return -EINVAL;
263 264
	}

265
	return ret;
266 267
}

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
/**
 * smu_set_user_clk_dependencies - set user profile clock dependencies
 *
 * @smu:	smu_context pointer
 * @clk:	enum smu_clk_type type
 *
 * Enable/Disable the clock dependency for the @clk type.
 */
static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
{
	if (smu->adev->in_suspend)
		return;

	if (clk == SMU_MCLK) {
		smu->user_dpm_profile.clk_dependency = 0;
		smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
	} else if (clk == SMU_FCLK) {
285
		/* MCLK takes precedence over FCLK */
286 287 288 289 290 291
		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
			return;

		smu->user_dpm_profile.clk_dependency = 0;
		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
	} else if (clk == SMU_SOCCLK) {
292
		/* MCLK takes precedence over SOCCLK */
293 294 295 296 297 298
		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
			return;

		smu->user_dpm_profile.clk_dependency = 0;
		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
	} else
299
		/* Add clk dependencies here, if any */
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
		return;
}

/**
 * smu_restore_dpm_user_profile - reinstate user dpm profile
 *
 * @smu:	smu_context pointer
 *
 * Restore the saved user power configurations include power limit,
 * clock frequencies, fan control mode and fan speed.
 */
static void smu_restore_dpm_user_profile(struct smu_context *smu)
{
	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
	int ret = 0;

	if (!smu->adev->in_suspend)
		return;

	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return;

	/* Enable restore flag */
323
	smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342

	/* set the user dpm power limit */
	if (smu->user_dpm_profile.power_limit) {
		ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
		if (ret)
			dev_err(smu->adev->dev, "Failed to set power limit value\n");
	}

	/* set the user dpm clock configurations */
	if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
		enum smu_clk_type clk_type;

		for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
			/*
			 * Iterate over smu clk type and force the saved user clk
			 * configs, skip if clock dependency is enabled
			 */
			if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
					smu->user_dpm_profile.clk_mask[clk_type]) {
343
				ret = smu_force_smuclk_levels(smu, clk_type,
344 345
						smu->user_dpm_profile.clk_mask[clk_type]);
				if (ret)
346 347
					dev_err(smu->adev->dev,
						"Failed to set clock type = %d\n", clk_type);
348 349 350 351 352
			}
		}
	}

	/* set the user dpm fan configurations */
353 354
	if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
	    smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
355
		ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
356
		if (ret != -EOPNOTSUPP) {
357
			smu->user_dpm_profile.fan_speed_pwm = 0;
358
			smu->user_dpm_profile.fan_speed_rpm = 0;
359
			smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
360 361 362
			dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
		}

363 364
		if (smu->user_dpm_profile.fan_speed_pwm) {
			ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
365
			if (ret != -EOPNOTSUPP)
366
				dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
367 368 369 370
		}

		if (smu->user_dpm_profile.fan_speed_rpm) {
			ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
371
			if (ret != -EOPNOTSUPP)
372
				dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
373 374 375
		}
	}

376 377 378 379 380 381 382 383 384
	/* Restore user customized OD settings */
	if (smu->user_dpm_profile.user_od) {
		if (smu->ppt_funcs->restore_user_od_settings) {
			ret = smu->ppt_funcs->restore_user_od_settings(smu);
			if (ret)
				dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
		}
	}

385 386 387 388
	/* Disable restore flag */
	smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
}

389 390
static int smu_get_power_num_states(void *handle,
				    struct pp_states_info *state_info)
391 392 393 394 395 396
{
	if (!state_info)
		return -EINVAL;

	/* not support power state */
	memset(state_info, 0, sizeof(struct pp_states_info));
397 398
	state_info->nums = 1;
	state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
399 400 401 402

	return 0;
}

403 404
bool is_support_sw_smu(struct amdgpu_device *adev)
{
405 406 407 408
	/* vega20 is 11.0.2, but it's supported via the powerplay code */
	if (adev->asic_type == CHIP_VEGA20)
		return false;

409
	if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0))
410
		return true;
411

412
	return false;
413 414
}

415 416
bool is_support_cclk_dpm(struct amdgpu_device *adev)
{
417
	struct smu_context *smu = adev->powerplay.pp_handle;
418 419 420 421 422 423 424 425

	if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
		return false;

	return true;
}


426 427
static int smu_sys_get_pp_table(void *handle,
				char **table)
428
{
429
	struct smu_context *smu = handle;
430 431
	struct smu_table_context *smu_table = &smu->smu_table;

432 433
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
434

435 436 437 438 439 440 441 442
	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
		return -EINVAL;

	if (smu_table->hardcode_pptable)
		*table = smu_table->hardcode_pptable;
	else
		*table = smu_table->power_play_table;

443
	return smu_table->power_play_table_size;
444 445
}

446 447 448
static int smu_sys_set_pp_table(void *handle,
				const char *buf,
				size_t size)
449
{
450
	struct smu_context *smu = handle;
451 452 453 454
	struct smu_table_context *smu_table = &smu->smu_table;
	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
	int ret = 0;

455 456
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
457

458
	if (header->usStructureSize != size) {
459
		dev_err(smu->adev->dev, "pp table size not matched !\n");
460 461 462 463
		return -EIO;
	}

	if (!smu_table->hardcode_pptable) {
464 465 466
		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
		if (!smu_table->hardcode_pptable)
			return -ENOMEM;
467 468 469 470 471 472
	}

	memcpy(smu_table->hardcode_pptable, buf, size);
	smu_table->power_play_table = smu_table->hardcode_pptable;
	smu_table->power_play_table_size = size;

473 474 475 476 477 478
	/*
	 * Special hw_fini action(for Navi1x, the DPMs disablement will be
	 * skipped) may be needed for custom pptable uploading.
	 */
	smu->uploading_custom_pp_table = true;

479 480
	ret = smu_reset(smu);
	if (ret)
481
		dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
482

483 484
	smu->uploading_custom_pp_table = false;

485 486 487
	return ret;
}

488
static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
489 490 491
{
	struct smu_feature *feature = &smu->smu_feature;
	int ret = 0;
492
	uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
493

494
	bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
495

496
	ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
497 498 499 500
					     SMU_FEATURE_MAX/32);
	if (ret)
		return ret;

501 502
	bitmap_or(feature->allowed, feature->allowed,
		      (unsigned long *)allowed_feature_mask,
503 504 505 506
		      feature->feature_num);

	return ret;
}
507

508 509
static int smu_set_funcs(struct amdgpu_device *adev)
{
510
	struct smu_context *smu = adev->powerplay.pp_handle;
511

512 513 514
	if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
		smu->od_enabled = true;

515
	switch (adev->ip_versions[MP1_HWIP][0]) {
516 517 518
	case IP_VERSION(11, 0, 0):
	case IP_VERSION(11, 0, 5):
	case IP_VERSION(11, 0, 9):
519 520
		navi10_set_ppt_funcs(smu);
		break;
521 522 523 524
	case IP_VERSION(11, 0, 7):
	case IP_VERSION(11, 0, 11):
	case IP_VERSION(11, 0, 12):
	case IP_VERSION(11, 0, 13):
525 526
		sienna_cichlid_set_ppt_funcs(smu);
		break;
527 528
	case IP_VERSION(12, 0, 0):
	case IP_VERSION(12, 0, 1):
529
		renoir_set_ppt_funcs(smu);
530
		break;
531
	case IP_VERSION(11, 5, 0):
532 533
		vangogh_set_ppt_funcs(smu);
		break;
534 535
	case IP_VERSION(13, 0, 1):
	case IP_VERSION(13, 0, 3):
536
	case IP_VERSION(13, 0, 8):
537 538
		yellow_carp_set_ppt_funcs(smu);
		break;
539 540 541
	case IP_VERSION(13, 0, 5):
		smu_v13_0_5_set_ppt_funcs(smu);
		break;
542
	case IP_VERSION(11, 0, 8):
543 544
		cyan_skillfish_set_ppt_funcs(smu);
		break;
545 546 547 548 549
	case IP_VERSION(11, 0, 2):
		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
		arcturus_set_ppt_funcs(smu);
		/* OD is not supported on Arcturus */
		smu->od_enabled =false;
550
		break;
551 552 553 554 555 556 557
	case IP_VERSION(13, 0, 2):
		aldebaran_set_ppt_funcs(smu);
		/* Enable pp_od_clk_voltage node */
		smu->od_enabled = true;
		break;
	default:
		return -EINVAL;
558 559
	}

560 561 562 563 564 565
	return 0;
}

static int smu_early_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
566 567 568 569 570
	struct smu_context *smu;

	smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
	if (!smu)
		return -ENOMEM;
571 572

	smu->adev = adev;
573
	smu->pm_enabled = !!amdgpu_dpm;
574
	smu->is_apu = false;
575 576
	smu->smu_baco.state = SMU_BACO_STATE_EXIT;
	smu->smu_baco.platform_support = false;
577
	smu->user_dpm_profile.fan_mode = -1;
578

579 580 581
	adev->powerplay.pp_handle = smu;
	adev->powerplay.pp_funcs = &swsmu_pm_funcs;

582
	return smu_set_funcs(adev);
583 584
}

585 586 587 588 589 590 591 592 593 594 595 596 597
static int smu_set_default_dpm_table(struct smu_context *smu)
{
	struct smu_power_context *smu_power = &smu->smu_power;
	struct smu_power_gate *power_gate = &smu_power->power_gate;
	int vcn_gate, jpeg_gate;
	int ret = 0;

	if (!smu->ppt_funcs->set_default_dpm_table)
		return 0;

	vcn_gate = atomic_read(&power_gate->vcn_gated);
	jpeg_gate = atomic_read(&power_gate->jpeg_gated);

598
	ret = smu_dpm_set_vcn_enable(smu, true);
599
	if (ret)
600
		return ret;
601

602
	ret = smu_dpm_set_jpeg_enable(smu, true);
603
	if (ret)
604
		goto err_out;
605 606 607 608 609 610

	ret = smu->ppt_funcs->set_default_dpm_table(smu);
	if (ret)
		dev_err(smu->adev->dev,
			"Failed to setup default dpm clock tables!\n");

611 612 613
	smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
err_out:
	smu_dpm_set_vcn_enable(smu, !vcn_gate);
614 615 616
	return ret;
}

617 618 619 620 621 622 623 624 625 626 627 628
static int smu_apply_default_config_table_settings(struct smu_context *smu)
{
	struct amdgpu_device *adev = smu->adev;
	int ret = 0;

	ret = smu_get_default_config_table_settings(smu,
						    &adev->pm.config_table);
	if (ret)
		return ret;

	return smu_set_config_table(smu, &adev->pm.config_table);
}
629

630 631 632
static int smu_late_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
633
	struct smu_context *smu = adev->powerplay.pp_handle;
634
	int ret = 0;
635

636 637
	smu_set_fine_grain_gfx_freq_parameters(smu);

638 639
	if (!smu->pm_enabled)
		return 0;
640

641 642 643 644 645 646
	ret = smu_post_init(smu);
	if (ret) {
		dev_err(adev->dev, "Failed to post smu init!\n");
		return ret;
	}

647 648
	if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
	    (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
649 650
		return 0;

651 652 653 654 655 656
	if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
		ret = smu_set_default_od_settings(smu);
		if (ret) {
			dev_err(adev->dev, "Failed to setup default OD settings!\n");
			return ret;
		}
657
	}
658 659

	ret = smu_populate_umd_state_clk(smu);
660 661
	if (ret) {
		dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
662
		return ret;
663
	}
664

665 666 667 668
	ret = smu_get_asic_power_limits(smu,
					&smu->current_power_limit,
					&smu->default_power_limit,
					&smu->max_power_limit);
669
	if (ret) {
670
		dev_err(adev->dev, "Failed to get asic power limits!\n");
671
		return ret;
672
	}
673

674 675
	if (!amdgpu_sriov_vf(adev))
		smu_get_unique_id(smu);
676

677 678
	smu_get_fan_parameters(smu);

679
	smu_handle_task(smu,
680
			smu->smu_dpm.dpm_level,
681
			AMD_PP_TASK_COMPLETE_INIT);
682

683 684 685 686 687 688
	ret = smu_apply_default_config_table_settings(smu);
	if (ret && (ret != -EOPNOTSUPP)) {
		dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
		return ret;
	}

689 690
	smu_restore_dpm_user_profile(smu);

691 692 693
	return 0;
}

694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
static int smu_init_fb_allocations(struct smu_context *smu)
{
	struct amdgpu_device *adev = smu->adev;
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *tables = smu_table->tables;
	struct smu_table *driver_table = &(smu_table->driver_table);
	uint32_t max_table_size = 0;
	int ret, i;

	/* VRAM allocation for tool table */
	if (tables[SMU_TABLE_PMSTATUSLOG].size) {
		ret = amdgpu_bo_create_kernel(adev,
					      tables[SMU_TABLE_PMSTATUSLOG].size,
					      tables[SMU_TABLE_PMSTATUSLOG].align,
					      tables[SMU_TABLE_PMSTATUSLOG].domain,
					      &tables[SMU_TABLE_PMSTATUSLOG].bo,
					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
		if (ret) {
713
			dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
			return ret;
		}
	}

	/* VRAM allocation for driver table */
	for (i = 0; i < SMU_TABLE_COUNT; i++) {
		if (tables[i].size == 0)
			continue;

		if (i == SMU_TABLE_PMSTATUSLOG)
			continue;

		if (max_table_size < tables[i].size)
			max_table_size = tables[i].size;
	}

	driver_table->size = max_table_size;
	driver_table->align = PAGE_SIZE;
	driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;

	ret = amdgpu_bo_create_kernel(adev,
				      driver_table->size,
				      driver_table->align,
				      driver_table->domain,
				      &driver_table->bo,
				      &driver_table->mc_address,
				      &driver_table->cpu_addr);
	if (ret) {
742
		dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
		if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
			amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
	}

	return ret;
}

static int smu_fini_fb_allocations(struct smu_context *smu)
{
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *tables = smu_table->tables;
	struct smu_table *driver_table = &(smu_table->driver_table);

	if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
		amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
				      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
				      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);

	amdgpu_bo_free_kernel(&driver_table->bo,
			      &driver_table->mc_address,
			      &driver_table->cpu_addr);

	return 0;
}

/**
 * smu_alloc_memory_pool - allocate memory pool in the system memory
 *
 * @smu: amdgpu_device pointer
 *
 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
 * and DramLogSetDramAddr can notify it changed.
 *
 * Returns 0 on success, error on failure.
 */
static int smu_alloc_memory_pool(struct smu_context *smu)
{
	struct amdgpu_device *adev = smu->adev;
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *memory_pool = &smu_table->memory_pool;
	uint64_t pool_size = smu->pool_size;
	int ret = 0;

	if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
		return ret;

	memory_pool->size = pool_size;
	memory_pool->align = PAGE_SIZE;
	memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;

	switch (pool_size) {
	case SMU_MEMORY_POOL_SIZE_256_MB:
	case SMU_MEMORY_POOL_SIZE_512_MB:
	case SMU_MEMORY_POOL_SIZE_1_GB:
	case SMU_MEMORY_POOL_SIZE_2_GB:
		ret = amdgpu_bo_create_kernel(adev,
					      memory_pool->size,
					      memory_pool->align,
					      memory_pool->domain,
					      &memory_pool->bo,
					      &memory_pool->mc_address,
					      &memory_pool->cpu_addr);
807 808
		if (ret)
			dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
		break;
	default:
		break;
	}

	return ret;
}

static int smu_free_memory_pool(struct smu_context *smu)
{
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *memory_pool = &smu_table->memory_pool;

	if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
		return 0;

	amdgpu_bo_free_kernel(&memory_pool->bo,
			      &memory_pool->mc_address,
			      &memory_pool->cpu_addr);

	memset(memory_pool, 0, sizeof(struct smu_table));

	return 0;
}

834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
static int smu_alloc_dummy_read_table(struct smu_context *smu)
{
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *dummy_read_1_table =
			&smu_table->dummy_read_1_table;
	struct amdgpu_device *adev = smu->adev;
	int ret = 0;

	dummy_read_1_table->size = 0x40000;
	dummy_read_1_table->align = PAGE_SIZE;
	dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;

	ret = amdgpu_bo_create_kernel(adev,
				      dummy_read_1_table->size,
				      dummy_read_1_table->align,
				      dummy_read_1_table->domain,
				      &dummy_read_1_table->bo,
				      &dummy_read_1_table->mc_address,
				      &dummy_read_1_table->cpu_addr);
	if (ret)
		dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");

	return ret;
}

static void smu_free_dummy_read_table(struct smu_context *smu)
{
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *dummy_read_1_table =
			&smu_table->dummy_read_1_table;


	amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
			      &dummy_read_1_table->mc_address,
			      &dummy_read_1_table->cpu_addr);

	memset(dummy_read_1_table, 0, sizeof(struct smu_table));
}

873 874 875 876
static int smu_smc_table_sw_init(struct smu_context *smu)
{
	int ret;

877 878 879 880 881 882
	/**
	 * Create smu_table structure, and init smc tables such as
	 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
	 */
	ret = smu_init_smc_tables(smu);
	if (ret) {
883
		dev_err(smu->adev->dev, "Failed to init smc tables!\n");
884 885 886
		return ret;
	}

887 888 889 890 891 892
	/**
	 * Create smu_power_context structure, and allocate smu_dpm_context and
	 * context size to fill the smu_power_context data.
	 */
	ret = smu_init_power(smu);
	if (ret) {
893
		dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
894 895 896
		return ret;
	}

897 898 899 900 901 902 903 904 905 906 907
	/*
	 * allocate vram bos to store smc table contents.
	 */
	ret = smu_init_fb_allocations(smu);
	if (ret)
		return ret;

	ret = smu_alloc_memory_pool(smu);
	if (ret)
		return ret;

908 909 910 911
	ret = smu_alloc_dummy_read_table(smu);
	if (ret)
		return ret;

912
	ret = smu_i2c_init(smu);
913 914 915
	if (ret)
		return ret;

916 917 918
	return 0;
}

919 920 921 922
static int smu_smc_table_sw_fini(struct smu_context *smu)
{
	int ret;

923
	smu_i2c_fini(smu);
924

925 926
	smu_free_dummy_read_table(smu);

927 928 929 930 931 932 933 934 935 936
	ret = smu_free_memory_pool(smu);
	if (ret)
		return ret;

	ret = smu_fini_fb_allocations(smu);
	if (ret)
		return ret;

	ret = smu_fini_power(smu);
	if (ret) {
937
		dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
938 939 940
		return ret;
	}

941 942
	ret = smu_fini_smc_tables(smu);
	if (ret) {
943
		dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
944 945 946 947 948 949
		return ret;
	}

	return 0;
}

950 951 952 953 954 955 956 957
static void smu_throttling_logging_work_fn(struct work_struct *work)
{
	struct smu_context *smu = container_of(work, struct smu_context,
					       throttling_logging_work);

	smu_log_thermal_throttling(smu);
}

958 959 960 961 962 963 964 965 966
static void smu_interrupt_work_fn(struct work_struct *work)
{
	struct smu_context *smu = container_of(work, struct smu_context,
					       interrupt_work);

	if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
		smu->ppt_funcs->interrupt_work(smu);
}

967 968 969
static int smu_sw_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
970
	struct smu_context *smu = adev->powerplay.pp_handle;
971 972
	int ret;

973
	smu->pool_size = adev->pm.smu_prv_buffer_size;
974 975 976
	smu->smu_feature.feature_num = SMU_FEATURE_MAX;
	bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
	bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
977

978
	mutex_init(&smu->message_lock);
979

980
	INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
981
	INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
982
	atomic64_set(&smu->throttle_int_counter, 0);
983
	smu->watermarks_bitmap = 0;
984 985 986
	smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
	smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;

987 988 989
	atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
	atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);

990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
	smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
	smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
	smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
	smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
	smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
	smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
	smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
	smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;

	smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
	smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
	smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
	smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
	smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
	smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
	smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1006
	smu->display_config = &adev->pm.pm_display_cfg;
1007

1008 1009
	smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
	smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1010

1011 1012 1013 1014
	ret = smu_init_microcode(smu);
	if (ret) {
		dev_err(adev->dev, "Failed to load smu firmware!\n");
		return ret;
1015 1016
	}

1017 1018
	ret = smu_smc_table_sw_init(smu);
	if (ret) {
1019
		dev_err(adev->dev, "Failed to sw init smc table!\n");
1020 1021 1022
		return ret;
	}

1023 1024 1025 1026 1027 1028 1029
	/* get boot_values from vbios to set revision, gfxclk, and etc. */
	ret = smu_get_vbios_bootup_values(smu);
	if (ret) {
		dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
		return ret;
	}

1030 1031
	ret = smu_register_irq_handler(smu);
	if (ret) {
1032
		dev_err(adev->dev, "Failed to register smc irq handler!\n");
1033 1034 1035
		return ret;
	}

1036 1037 1038 1039
	/* If there is no way to query fan control mode, fan control is not supported */
	if (!smu->ppt_funcs->get_fan_control_mode)
		smu->adev->pm.no_fan = true;

1040 1041 1042 1043 1044 1045
	return 0;
}

static int smu_sw_fini(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1046
	struct smu_context *smu = adev->powerplay.pp_handle;
1047
	int ret;
1048

1049 1050
	ret = smu_smc_table_sw_fini(smu);
	if (ret) {
1051
		dev_err(adev->dev, "Failed to sw fini smc table!\n");
1052 1053 1054
		return ret;
	}

1055 1056
	smu_fini_microcode(smu);

1057 1058
	return 0;
}
1059

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
static int smu_get_thermal_temperature_range(struct smu_context *smu)
{
	struct amdgpu_device *adev = smu->adev;
	struct smu_temperature_range *range =
				&smu->thermal_range;
	int ret = 0;

	if (!smu->ppt_funcs->get_thermal_temperature_range)
		return 0;

	ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
	if (ret)
		return ret;

	adev->pm.dpm.thermal.min_temp = range->min;
	adev->pm.dpm.thermal.max_temp = range->max;
	adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
	adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
	adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
	adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
	adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
	adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
	adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;

	return ret;
}

1087
static int smu_smc_hw_setup(struct smu_context *smu)
1088
{
1089
	struct smu_feature *feature = &smu->smu_feature;
1090
	struct amdgpu_device *adev = smu->adev;
1091
	uint32_t pcie_gen = 0, pcie_width = 0;
1092
	uint64_t features_supported;
1093
	int ret = 0;
1094

1095
	if (adev->in_suspend && smu_is_dpm_running(smu)) {
1096
		dev_info(adev->dev, "dpm has been enabled\n");
1097
		/* this is needed specifically */
1098
		switch (adev->ip_versions[MP1_HWIP][0]) {
1099 1100 1101 1102
		case IP_VERSION(11, 0, 7):
		case IP_VERSION(11, 0, 11):
		case IP_VERSION(11, 5, 0):
		case IP_VERSION(11, 0, 12):
1103
			ret = smu_system_features_control(smu, true);
1104 1105
			if (ret)
				dev_err(adev->dev, "Failed system features control!\n");
1106 1107 1108 1109
			break;
		default:
			break;
		}
1110
		return ret;
1111 1112
	}

1113
	ret = smu_init_display_count(smu, 0);
1114 1115
	if (ret) {
		dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1116
		return ret;
1117
	}
1118

1119
	ret = smu_set_driver_table_location(smu);
1120 1121
	if (ret) {
		dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1122
		return ret;
1123
	}
1124

1125 1126 1127 1128
	/*
	 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
	 */
	ret = smu_set_tool_table_location(smu);
1129 1130
	if (ret) {
		dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1131
		return ret;
1132
	}
1133 1134 1135 1136 1137 1138

	/*
	 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
	 * pool location.
	 */
	ret = smu_notify_memory_pool_location(smu);
1139 1140
	if (ret) {
		dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1141
		return ret;
1142
	}
1143

1144
	/* smu_dump_pptable(smu); */
1145 1146 1147 1148 1149
	/*
	 * Copy pptable bo in the vram to smc with SMU MSGs such as
	 * SetDriverDramAddr and TransferTableDram2Smu.
	 */
	ret = smu_write_pptable(smu);
1150 1151
	if (ret) {
		dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1152
		return ret;
1153
	}
1154

1155 1156 1157 1158
	/* issue Run*Btc msg */
	ret = smu_run_btc(smu);
	if (ret)
		return ret;
1159

1160
	ret = smu_feature_set_allowed_mask(smu);
1161 1162
	if (ret) {
		dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1163
		return ret;
1164
	}
1165

1166
	ret = smu_system_features_control(smu, true);
1167 1168
	if (ret) {
		dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1169
		return ret;
1170
	}
1171

1172 1173 1174 1175 1176 1177 1178 1179 1180
	ret = smu_feature_get_enabled_mask(smu, &features_supported);
	if (ret) {
		dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
		return ret;
	}
	bitmap_copy(feature->supported,
		    (unsigned long *)&features_supported,
		    feature->feature_num);

1181
	if (!smu_is_dpm_running(smu))
1182
		dev_info(adev->dev, "dpm has been disabled\n");
1183

1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
		pcie_gen = 3;
	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
		pcie_gen = 2;
	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
		pcie_gen = 1;
	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
		pcie_gen = 0;

	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
	 */
	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
		pcie_width = 6;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
		pcie_width = 5;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
		pcie_width = 4;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
		pcie_width = 3;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
		pcie_width = 2;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
		pcie_width = 1;
	ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
	if (ret) {
		dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1212
		return ret;
1213
	}
1214

1215 1216 1217 1218 1219 1220
	ret = smu_get_thermal_temperature_range(smu);
	if (ret) {
		dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
		return ret;
	}

1221
	ret = smu_enable_thermal_alert(smu);
1222 1223
	if (ret) {
		dev_err(adev->dev, "Failed to enable thermal alert!\n");
1224
		return ret;
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
	}

	/*
	 * Set initialized values (get from vbios) to dpm tables context such as
	 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
	 * type of clks.
	 */
	ret = smu_set_default_dpm_table(smu);
	if (ret) {
		dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
		return ret;
1236
	}
1237

1238
	ret = smu_notify_display_change(smu);
1239 1240
	if (ret) {
		dev_err(adev->dev, "Failed to notify display change!\n");
1241
		return ret;
1242
	}
1243

1244 1245 1246 1247
	/*
	 * Set min deep sleep dce fclk with bootup value from vbios via
	 * SetMinDeepSleepDcefclk MSG.
	 */
1248 1249
	ret = smu_set_min_dcef_deep_sleep(smu,
					  smu->smu_table.boot_values.dcefclk / 100);
1250

1251
	return ret;
1252 1253
}

1254
static int smu_start_smc_engine(struct smu_context *smu)
1255
{
1256 1257
	struct amdgpu_device *adev = smu->adev;
	int ret = 0;
1258

1259
	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1260
		if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) {
1261 1262
			if (smu->ppt_funcs->load_microcode) {
				ret = smu->ppt_funcs->load_microcode(smu);
1263 1264 1265
				if (ret)
					return ret;
			}
1266
		}
1267 1268
	}

1269 1270
	if (smu->ppt_funcs->check_fw_status) {
		ret = smu->ppt_funcs->check_fw_status(smu);
1271
		if (ret) {
1272
			dev_err(adev->dev, "SMC is not ready\n");
1273 1274
			return ret;
		}
1275
	}
1276

1277 1278 1279 1280 1281 1282 1283 1284
	/*
	 * Send msg GetDriverIfVersion to check if the return value is equal
	 * with DRIVER_IF_VERSION of smc header.
	 */
	ret = smu_check_fw_version(smu);
	if (ret)
		return ret;

1285 1286 1287 1288 1289 1290 1291
	return ret;
}

static int smu_hw_init(void *handle)
{
	int ret;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1292
	struct smu_context *smu = adev->powerplay.pp_handle;
1293

1294 1295
	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
		smu->pm_enabled = false;
1296
		return 0;
1297
	}
1298

1299
	ret = smu_start_smc_engine(smu);
1300
	if (ret) {
1301
		dev_err(adev->dev, "SMC engine is not correctly up!\n");
1302 1303 1304
		return ret;
	}

1305
	if (smu->is_apu) {
1306
		smu_dpm_set_vcn_enable(smu, true);
1307
		smu_dpm_set_jpeg_enable(smu, true);
1308
		smu_set_gfx_cgpg(smu, true);
1309
	}
1310

1311 1312 1313
	if (!smu->pm_enabled)
		return 0;

1314
	ret = smu_setup_pptable(smu);
1315 1316
	if (ret) {
		dev_err(adev->dev, "Failed to setup pptable!\n");
1317
		return ret;
1318
	}
1319

1320
	ret = smu_get_driver_allowed_feature_mask(smu);
1321
	if (ret)
1322
		return ret;
1323

1324
	ret = smu_smc_hw_setup(smu);
1325 1326 1327 1328
	if (ret) {
		dev_err(adev->dev, "Failed to setup smc hw!\n");
		return ret;
	}
1329

1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
	/*
	 * Move maximum sustainable clock retrieving here considering
	 * 1. It is not needed on resume(from S3).
	 * 2. DAL settings come between .hw_init and .late_init of SMU.
	 *    And DAL needs to know the maximum sustainable clocks. Thus
	 *    it cannot be put in .late_init().
	 */
	ret = smu_init_max_sustainable_clocks(smu);
	if (ret) {
		dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
		return ret;
	}

1343
	adev->pm.dpm_enabled = true;
1344

1345
	dev_info(adev->dev, "SMU is initialized successfully!\n");
1346 1347 1348 1349

	return 0;
}

1350
static int smu_disable_dpms(struct smu_context *smu)
1351
{
1352 1353
	struct amdgpu_device *adev = smu->adev;
	int ret = 0;
1354 1355 1356 1357 1358 1359
	/*
	 * TODO: (adev->in_suspend && !adev->in_s0ix) is added to pair
	 * the workaround which always reset the asic in suspend.
	 * It's likely that workaround will be dropped in the future.
	 * Then the change here should be dropped together.
	 */
1360
	bool use_baco = !smu->is_apu &&
1361
		(((amdgpu_in_reset(adev) || (adev->in_suspend && !adev->in_s0ix)) &&
1362
		  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1363
		 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375

	/*
	 * For custom pptable uploading, skip the DPM features
	 * disable process on Navi1x ASICs.
	 *   - As the gfx related features are under control of
	 *     RLC on those ASICs. RLC reinitialization will be
	 *     needed to reenable them. That will cost much more
	 *     efforts.
	 *
	 *   - SMU firmware can handle the DPM reenablement
	 *     properly.
	 */
1376
	if (smu->uploading_custom_pp_table) {
1377
		switch (adev->ip_versions[MP1_HWIP][0]) {
1378 1379 1380 1381 1382 1383 1384 1385
		case IP_VERSION(11, 0, 0):
		case IP_VERSION(11, 0, 5):
		case IP_VERSION(11, 0, 9):
		case IP_VERSION(11, 0, 7):
		case IP_VERSION(11, 0, 11):
		case IP_VERSION(11, 5, 0):
		case IP_VERSION(11, 0, 12):
		case IP_VERSION(11, 0, 13):
1386
			return 0;
1387 1388 1389 1390
		default:
			break;
		}
	}
1391 1392 1393 1394 1395

	/*
	 * For Sienna_Cichlid, PMFW will handle the features disablement properly
	 * on BACO in. Driver involvement is unnecessary.
	 */
1396
	if (use_baco) {
1397
		switch (adev->ip_versions[MP1_HWIP][0]) {
1398 1399 1400 1401
		case IP_VERSION(11, 0, 7):
		case IP_VERSION(11, 0, 0):
		case IP_VERSION(11, 0, 5):
		case IP_VERSION(11, 0, 9):
1402
			return 0;
1403 1404 1405 1406
		default:
			break;
		}
	}
1407 1408

	/*
1409 1410
	 * For gpu reset, runpm and hibernation through BACO,
	 * BACO feature has to be kept enabled.
1411
	 */
1412
	if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1413 1414
		ret = smu_disable_all_features_with_exception(smu,
							      SMU_FEATURE_BACO_BIT);
1415
		if (ret)
1416
			dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1417 1418 1419
	} else {
		ret = smu_system_features_control(smu, false);
		if (ret)
1420
			dev_err(adev->dev, "Failed to disable smu features.\n");
1421 1422
	}

1423
	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
1424 1425 1426 1427
	    adev->gfx.rlc.funcs->stop)
		adev->gfx.rlc.funcs->stop(adev);

	return ret;
1428 1429
}

1430 1431 1432 1433 1434
static int smu_smc_hw_cleanup(struct smu_context *smu)
{
	struct amdgpu_device *adev = smu->adev;
	int ret = 0;

1435
	cancel_work_sync(&smu->throttling_logging_work);
1436
	cancel_work_sync(&smu->interrupt_work);
1437

1438 1439
	ret = smu_disable_thermal_alert(smu);
	if (ret) {
1440
		dev_err(adev->dev, "Fail to disable thermal alert!\n");
1441 1442 1443 1444
		return ret;
	}

	ret = smu_disable_dpms(smu);
1445 1446
	if (ret) {
		dev_err(adev->dev, "Fail to disable dpm features!\n");
1447
		return ret;
1448
	}
1449 1450 1451 1452

	return 0;
}

1453 1454 1455
static int smu_hw_fini(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1456
	struct smu_context *smu = adev->powerplay.pp_handle;
1457

1458 1459 1460
	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
		return 0;

1461 1462 1463 1464 1465 1466
	smu_dpm_set_vcn_enable(smu, false);
	smu_dpm_set_jpeg_enable(smu, false);

	adev->vcn.cur_state = AMD_PG_STATE_GATE;
	adev->jpeg.cur_state = AMD_PG_STATE_GATE;

1467 1468 1469
	if (!smu->pm_enabled)
		return 0;

1470 1471
	adev->pm.dpm_enabled = false;

1472
	return smu_smc_hw_cleanup(smu);
1473 1474
}

1475 1476 1477 1478 1479 1480 1481 1482
static void smu_late_fini(void *handle)
{
	struct amdgpu_device *adev = handle;
	struct smu_context *smu = adev->powerplay.pp_handle;

	kfree(smu);
}

1483
static int smu_reset(struct smu_context *smu)
1484 1485
{
	struct amdgpu_device *adev = smu->adev;
1486 1487
	int ret;

1488 1489 1490 1491 1492 1493 1494 1495
	ret = smu_hw_fini(adev);
	if (ret)
		return ret;

	ret = smu_hw_init(adev);
	if (ret)
		return ret;

1496
	ret = smu_late_init(adev);
1497 1498
	if (ret)
		return ret;
1499

1500
	return 0;
1501 1502
}

1503 1504 1505
static int smu_suspend(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1506
	struct smu_context *smu = adev->powerplay.pp_handle;
1507
	int ret;
1508

1509 1510 1511
	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
		return 0;

1512 1513 1514
	if (!smu->pm_enabled)
		return 0;

1515 1516
	adev->pm.dpm_enabled = false;

1517
	ret = smu_smc_hw_cleanup(smu);
1518 1519
	if (ret)
		return ret;
1520

1521 1522
	smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);

1523
	smu_set_gfx_cgpg(smu, false);
1524

1525 1526 1527 1528 1529 1530 1531
	return 0;
}

static int smu_resume(void *handle)
{
	int ret;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1532
	struct smu_context *smu = adev->powerplay.pp_handle;
1533

1534 1535 1536 1537 1538 1539
	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
		return 0;

	if (!smu->pm_enabled)
		return 0;

1540
	dev_info(adev->dev, "SMU is resuming...\n");
1541

1542 1543
	ret = smu_start_smc_engine(smu);
	if (ret) {
1544 1545
		dev_err(adev->dev, "SMC engine is not correctly up!\n");
		return ret;
1546 1547
	}

1548
	ret = smu_smc_hw_setup(smu);
1549 1550 1551 1552
	if (ret) {
		dev_err(adev->dev, "Failed to setup smc hw!\n");
		return ret;
	}
1553

1554
	smu_set_gfx_cgpg(smu, true);
1555

1556 1557
	smu->disable_uclk_switch = 0;

1558 1559
	adev->pm.dpm_enabled = true;

1560
	dev_info(adev->dev, "SMU is resumed successfully!\n");
1561

1562 1563 1564
	return 0;
}

1565 1566
static int smu_display_configuration_change(void *handle,
					    const struct amd_pp_display_configuration *display_config)
1567
{
1568
	struct smu_context *smu = handle;
1569 1570 1571
	int index = 0;
	int num_of_active_display = 0;

1572 1573
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
1574 1575 1576 1577

	if (!display_config)
		return -EINVAL;

1578 1579
	smu_set_min_dcef_deep_sleep(smu,
				    display_config->min_dcef_deep_sleep_set_clk / 100);
1580 1581 1582 1583 1584 1585 1586 1587 1588

	for (index = 0; index < display_config->num_path_including_non_display; index++) {
		if (display_config->displays[index].controller_id != 0)
			num_of_active_display++;
	}

	return 0;
}

1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600
static int smu_set_clockgating_state(void *handle,
				     enum amd_clockgating_state state)
{
	return 0;
}

static int smu_set_powergating_state(void *handle,
				     enum amd_powergating_state state)
{
	return 0;
}

1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
static int smu_enable_umd_pstate(void *handle,
		      enum amd_dpm_forced_level *level)
{
	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;

	struct smu_context *smu = (struct smu_context*)(handle);
	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1611

1612
	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1613 1614 1615 1616 1617 1618
		return -EINVAL;

	if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
		/* enter umd pstate, save current level, disable gfx cg*/
		if (*level & profile_mode_mask) {
			smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1619
			smu_gpo_control(smu, false);
1620
			smu_gfx_ulv_control(smu, false);
1621
			smu_deep_sleep_control(smu, false);
1622
			amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
1623 1624 1625 1626 1627 1628
		}
	} else {
		/* exit umd pstate, restore level, enable gfx cg*/
		if (!(*level & profile_mode_mask)) {
			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
				*level = smu_dpm_ctx->saved_dpm_level;
1629
			amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
1630
			smu_deep_sleep_control(smu, true);
1631
			smu_gfx_ulv_control(smu, true);
1632
			smu_gpo_control(smu, true);
1633 1634 1635 1636 1637 1638
		}
	}

	return 0;
}

1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
static int smu_bump_power_profile_mode(struct smu_context *smu,
					   long *param,
					   uint32_t param_size)
{
	int ret = 0;

	if (smu->ppt_funcs->set_power_profile_mode)
		ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);

	return ret;
}

1651
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
				   enum amd_dpm_forced_level level,
				   bool skip_display_settings)
{
	int ret = 0;
	int index = 0;
	long workload;
	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);

	if (!skip_display_settings) {
		ret = smu_display_config_changed(smu);
		if (ret) {
1663
			dev_err(smu->adev->dev, "Failed to change display config!");
1664 1665 1666 1667 1668 1669
			return ret;
		}
	}

	ret = smu_apply_clocks_adjust_rules(smu);
	if (ret) {
1670
		dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1671 1672 1673 1674
		return ret;
	}

	if (!skip_display_settings) {
1675
		ret = smu_notify_smc_display_config(smu);
1676
		if (ret) {
1677
			dev_err(smu->adev->dev, "Failed to notify smc display config!");
1678 1679 1680 1681 1682
			return ret;
		}
	}

	if (smu_dpm_ctx->dpm_level != level) {
1683 1684
		ret = smu_asic_set_performance_level(smu, level);
		if (ret) {
1685
			dev_err(smu->adev->dev, "Failed to set performance level!");
1686
			return ret;
1687
		}
1688 1689 1690

		/* update the saved copy */
		smu_dpm_ctx->dpm_level = level;
1691 1692
	}

1693 1694
	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1695 1696 1697 1698 1699
		index = fls(smu->workload_mask);
		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
		workload = smu->workload_setting[index];

		if (smu->power_profile_mode != workload)
1700
			smu_bump_power_profile_mode(smu, &workload, 0);
1701 1702 1703 1704 1705
	}

	return ret;
}

1706 1707
static int smu_handle_task(struct smu_context *smu,
			   enum amd_dpm_forced_level level,
1708
			   enum amd_pp_task task_id)
1709 1710 1711
{
	int ret = 0;

1712 1713
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
1714

1715 1716 1717 1718
	switch (task_id) {
	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
		ret = smu_pre_display_config_changed(smu);
		if (ret)
1719
			return ret;
1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
		ret = smu_adjust_power_state_dynamic(smu, level, false);
		break;
	case AMD_PP_TASK_COMPLETE_INIT:
	case AMD_PP_TASK_READJUST_POWER_STATE:
		ret = smu_adjust_power_state_dynamic(smu, level, true);
		break;
	default:
		break;
	}

	return ret;
}

1733 1734 1735
static int smu_handle_dpm_task(void *handle,
			       enum amd_pp_task task_id,
			       enum amd_pm_state_type *user_state)
1736 1737 1738 1739
{
	struct smu_context *smu = handle;
	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;

1740
	return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
1741 1742 1743

}

1744 1745 1746
static int smu_switch_power_profile(void *handle,
				    enum PP_SMC_POWER_PROFILE type,
				    bool en)
1747
{
1748
	struct smu_context *smu = handle;
1749 1750 1751 1752
	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
	long workload;
	uint32_t index;

1753 1754
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770

	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
		return -EINVAL;

	if (!en) {
		smu->workload_mask &= ~(1 << smu->workload_prority[type]);
		index = fls(smu->workload_mask);
		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
		workload = smu->workload_setting[index];
	} else {
		smu->workload_mask |= (1 << smu->workload_prority[type]);
		index = fls(smu->workload_mask);
		index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
		workload = smu->workload_setting[index];
	}

1771 1772
	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
1773
		smu_bump_power_profile_mode(smu, &workload, 0);
1774 1775 1776 1777

	return 0;
}

1778
static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
1779
{
1780
	struct smu_context *smu = handle;
1781 1782
	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);

1783 1784
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
1785

1786
	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1787 1788
		return -EINVAL;

1789
	return smu_dpm_ctx->dpm_level;
1790 1791
}

1792 1793
static int smu_force_performance_level(void *handle,
				       enum amd_dpm_forced_level level)
1794
{
1795
	struct smu_context *smu = handle;
1796
	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1797
	int ret = 0;
1798

1799 1800
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
1801

1802
	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1803 1804
		return -EINVAL;

1805
	ret = smu_enable_umd_pstate(smu, &level);
1806
	if (ret)
1807
		return ret;
1808

1809
	ret = smu_handle_task(smu, level,
1810
			      AMD_PP_TASK_READJUST_POWER_STATE);
1811

1812 1813 1814 1815 1816 1817
	/* reset user dpm clock state */
	if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
		memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
		smu->user_dpm_profile.clk_dependency = 0;
	}

1818 1819 1820
	return ret;
}

1821
static int smu_set_display_count(void *handle, uint32_t count)
1822
{
1823
	struct smu_context *smu = handle;
1824

1825 1826
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
1827

1828
	return smu_init_display_count(smu, count);
1829 1830
}

1831
static int smu_force_smuclk_levels(struct smu_context *smu,
1832
			 enum smu_clk_type clk_type,
1833
			 uint32_t mask)
1834 1835 1836 1837
{
	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
	int ret = 0;

1838 1839
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
1840

1841
	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1842
		dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
1843 1844 1845
		return -EINVAL;
	}

1846
	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
1847
		ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1848
		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
1849 1850 1851 1852
			smu->user_dpm_profile.clk_mask[clk_type] = mask;
			smu_set_user_clk_dependencies(smu, clk_type);
		}
	}
1853 1854 1855 1856

	return ret;
}

1857 1858 1859
static int smu_force_ppclk_levels(void *handle,
				  enum pp_clock_type type,
				  uint32_t mask)
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895
{
	struct smu_context *smu = handle;
	enum smu_clk_type clk_type;

	switch (type) {
	case PP_SCLK:
		clk_type = SMU_SCLK; break;
	case PP_MCLK:
		clk_type = SMU_MCLK; break;
	case PP_PCIE:
		clk_type = SMU_PCIE; break;
	case PP_SOCCLK:
		clk_type = SMU_SOCCLK; break;
	case PP_FCLK:
		clk_type = SMU_FCLK; break;
	case PP_DCEFCLK:
		clk_type = SMU_DCEFCLK; break;
	case PP_VCLK:
		clk_type = SMU_VCLK; break;
	case PP_DCLK:
		clk_type = SMU_DCLK; break;
	case OD_SCLK:
		clk_type = SMU_OD_SCLK; break;
	case OD_MCLK:
		clk_type = SMU_OD_MCLK; break;
	case OD_VDDC_CURVE:
		clk_type = SMU_OD_VDDC_CURVE; break;
	case OD_RANGE:
		clk_type = SMU_OD_RANGE; break;
	default:
		return -EINVAL;
	}

	return smu_force_smuclk_levels(smu, clk_type, mask);
}

1896 1897 1898 1899 1900 1901 1902
/*
 * On system suspending or resetting, the dpm_enabled
 * flag will be cleared. So that those SMU services which
 * are not supported will be gated.
 * However, the mp1 state setting should still be granted
 * even if the dpm_enabled cleared.
 */
1903 1904
static int smu_set_mp1_state(void *handle,
			     enum pp_mp1_state mp1_state)
1905
{
1906
	struct smu_context *smu = handle;
1907
	int ret = 0;
1908

1909 1910 1911
	if (!smu->pm_enabled)
		return -EOPNOTSUPP;

1912 1913 1914
	if (smu->ppt_funcs &&
	    smu->ppt_funcs->set_mp1_state)
		ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
1915 1916 1917 1918

	return ret;
}

1919 1920
static int smu_set_df_cstate(void *handle,
			     enum pp_df_cstate state)
1921
{
1922
	struct smu_context *smu = handle;
1923 1924
	int ret = 0;

1925 1926
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
1927 1928 1929 1930 1931 1932

	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
		return 0;

	ret = smu->ppt_funcs->set_df_cstate(smu, state);
	if (ret)
1933
		dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
1934 1935 1936 1937

	return ret;
}

1938 1939 1940 1941
int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
{
	int ret = 0;

1942 1943
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
1944 1945 1946 1947 1948 1949

	if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
		return 0;

	ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
	if (ret)
1950
		dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
1951 1952 1953 1954

	return ret;
}

1955 1956
int smu_write_watermarks_table(struct smu_context *smu)
{
1957 1958
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
1959

1960
	return smu_set_watermarks_table(smu, NULL);
1961 1962
}

1963 1964
static int smu_set_watermarks_for_clock_ranges(void *handle,
					       struct pp_smu_wm_range_sets *clock_ranges)
1965
{
1966
	struct smu_context *smu = handle;
1967

1968 1969
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
1970

1971 1972 1973
	if (smu->disable_watermark)
		return 0;

1974
	return smu_set_watermarks_table(smu, clock_ranges);
1975 1976
}

1977 1978 1979 1980
int smu_set_ac_dc(struct smu_context *smu)
{
	int ret = 0;

1981 1982
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
1983

1984 1985 1986 1987
	/* controlled by firmware */
	if (smu->dc_controlled_by_gpio)
		return 0;

1988 1989 1990 1991
	ret = smu_set_power_source(smu,
				   smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
				   SMU_POWER_SOURCE_DC);
	if (ret)
1992
		dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
1993
		       smu->adev->pm.ac_power ? "AC" : "DC");
1994 1995 1996 1997

	return ret;
}

1998 1999 2000
const struct amd_ip_funcs smu_ip_funcs = {
	.name = "smu",
	.early_init = smu_early_init,
2001
	.late_init = smu_late_init,
2002 2003 2004 2005
	.sw_init = smu_sw_init,
	.sw_fini = smu_sw_fini,
	.hw_init = smu_hw_init,
	.hw_fini = smu_hw_fini,
2006
	.late_fini = smu_late_fini,
2007 2008 2009 2010 2011 2012 2013 2014 2015
	.suspend = smu_suspend,
	.resume = smu_resume,
	.is_idle = NULL,
	.check_soft_reset = NULL,
	.wait_for_idle = NULL,
	.soft_reset = NULL,
	.set_clockgating_state = smu_set_clockgating_state,
	.set_powergating_state = smu_set_powergating_state,
};
2016 2017 2018 2019 2020 2021 2022 2023 2024

const struct amdgpu_ip_block_version smu_v11_0_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_SMC,
	.major = 11,
	.minor = 0,
	.rev = 0,
	.funcs = &smu_ip_funcs,
};
2025 2026 2027 2028 2029 2030 2031 2032 2033

const struct amdgpu_ip_block_version smu_v12_0_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_SMC,
	.major = 12,
	.minor = 0,
	.rev = 0,
	.funcs = &smu_ip_funcs,
};
2034

2035 2036 2037 2038 2039 2040 2041 2042 2043
const struct amdgpu_ip_block_version smu_v13_0_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_SMC,
	.major = 13,
	.minor = 0,
	.rev = 0,
	.funcs = &smu_ip_funcs,
};

2044
static int smu_load_microcode(void *handle)
2045
{
2046 2047
	struct smu_context *smu = handle;
	struct amdgpu_device *adev = smu->adev;
2048 2049
	int ret = 0;

2050
	if (!smu->pm_enabled)
2051
		return -EOPNOTSUPP;
2052

2053 2054 2055
	/* This should be used for non PSP loading */
	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
		return 0;
2056

2057
	if (smu->ppt_funcs->load_microcode) {
2058
		ret = smu->ppt_funcs->load_microcode(smu);
2059 2060 2061 2062 2063
		if (ret) {
			dev_err(adev->dev, "Load microcode failed\n");
			return ret;
		}
	}
2064

2065
	if (smu->ppt_funcs->check_fw_status) {
2066
		ret = smu->ppt_funcs->check_fw_status(smu);
2067 2068 2069 2070 2071
		if (ret) {
			dev_err(adev->dev, "SMC is not ready\n");
			return ret;
		}
	}
2072 2073 2074 2075

	return ret;
}

2076
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2077 2078 2079
{
	int ret = 0;

2080 2081
	if (smu->ppt_funcs->set_gfx_cgpg)
		ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2082 2083 2084 2085

	return ret;
}

2086
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2087
{
2088
	struct smu_context *smu = handle;
2089 2090
	int ret = 0;

2091 2092
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2093

2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
	if (!smu->ppt_funcs->set_fan_speed_rpm)
		return -EOPNOTSUPP;

	if (speed == U32_MAX)
		return -EINVAL;

	ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
		smu->user_dpm_profile.fan_speed_rpm = speed;
2104

2105 2106 2107
		/* Override custom PWM setting as they cannot co-exist */
		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
		smu->user_dpm_profile.fan_speed_pwm = 0;
2108
	}
2109 2110 2111 2112

	return ret;
}

2113 2114 2115 2116 2117 2118 2119 2120 2121 2122
/**
 * smu_get_power_limit - Request one of the SMU Power Limits
 *
 * @handle: pointer to smu context
 * @limit: requested limit is written back to this variable
 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
 * @pp_power_type: &pp_power_type type of power
 * Return:  0 on success, <0 on error
 *
 */
2123
int smu_get_power_limit(void *handle,
2124
			uint32_t *limit,
2125 2126
			enum pp_power_limit_level pp_limit_level,
			enum pp_power_type pp_power_type)
2127
{
2128
	struct smu_context *smu = handle;
2129
	struct amdgpu_device *adev = smu->adev;
2130 2131
	enum smu_ppt_limit_level limit_level;
	uint32_t limit_type;
2132 2133
	int ret = 0;

2134 2135
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2136

2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
	switch(pp_power_type) {
	case PP_PWR_TYPE_SUSTAINED:
		limit_type = SMU_DEFAULT_PPT_LIMIT;
		break;
	case PP_PWR_TYPE_FAST:
		limit_type = SMU_FAST_PPT_LIMIT;
		break;
	default:
		return -EOPNOTSUPP;
		break;
	}

	switch(pp_limit_level){
	case PP_PWR_LIMIT_CURRENT:
		limit_level = SMU_PPT_LIMIT_CURRENT;
		break;
	case PP_PWR_LIMIT_DEFAULT:
		limit_level = SMU_PPT_LIMIT_DEFAULT;
		break;
	case PP_PWR_LIMIT_MAX:
		limit_level = SMU_PPT_LIMIT_MAX;
		break;
	case PP_PWR_LIMIT_MIN:
	default:
		return -EOPNOTSUPP;
		break;
	}

2165 2166 2167 2168 2169 2170
	if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
		if (smu->ppt_funcs->get_ppt_limit)
			ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
	} else {
		switch (limit_level) {
		case SMU_PPT_LIMIT_CURRENT:
2171
			switch (adev->ip_versions[MP1_HWIP][0]) {
2172 2173 2174 2175 2176
			case IP_VERSION(13, 0, 2):
			case IP_VERSION(11, 0, 7):
			case IP_VERSION(11, 0, 11):
			case IP_VERSION(11, 0, 12):
			case IP_VERSION(11, 0, 13):
2177 2178 2179 2180
				ret = smu_get_asic_power_limits(smu,
								&smu->current_power_limit,
								NULL,
								NULL);
2181 2182 2183 2184
				break;
			default:
				break;
			}
2185 2186
			*limit = smu->current_power_limit;
			break;
2187 2188 2189
		case SMU_PPT_LIMIT_DEFAULT:
			*limit = smu->default_power_limit;
			break;
2190 2191 2192 2193 2194 2195
		case SMU_PPT_LIMIT_MAX:
			*limit = smu->max_power_limit;
			break;
		default:
			break;
		}
2196
	}
2197

2198
	return ret;
2199 2200
}

2201
static int smu_set_power_limit(void *handle, uint32_t limit)
2202
{
2203
	struct smu_context *smu = handle;
2204
	uint32_t limit_type = limit >> 24;
2205 2206
	int ret = 0;

2207 2208
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2209

2210
	limit &= (1<<24)-1;
2211
	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2212 2213
		if (smu->ppt_funcs->set_power_limit)
			return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2214

2215 2216 2217 2218
	if (limit > smu->max_power_limit) {
		dev_err(smu->adev->dev,
			"New power limit (%d) is over the max allowed %d\n",
			limit, smu->max_power_limit);
2219
		return -EINVAL;
2220 2221 2222 2223 2224
	}

	if (!limit)
		limit = smu->current_power_limit;

2225
	if (smu->ppt_funcs->set_power_limit) {
2226
		ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2227
		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2228 2229
			smu->user_dpm_profile.power_limit = limit;
	}
2230 2231 2232 2233

	return ret;
}

2234
static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2235 2236 2237
{
	int ret = 0;

2238 2239
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2240

2241 2242 2243 2244 2245 2246
	if (smu->ppt_funcs->print_clk_levels)
		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);

	return ret;
}

2247
static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280
{
	enum smu_clk_type clk_type;

	switch (type) {
	case PP_SCLK:
		clk_type = SMU_SCLK; break;
	case PP_MCLK:
		clk_type = SMU_MCLK; break;
	case PP_PCIE:
		clk_type = SMU_PCIE; break;
	case PP_SOCCLK:
		clk_type = SMU_SOCCLK; break;
	case PP_FCLK:
		clk_type = SMU_FCLK; break;
	case PP_DCEFCLK:
		clk_type = SMU_DCEFCLK; break;
	case PP_VCLK:
		clk_type = SMU_VCLK; break;
	case PP_DCLK:
		clk_type = SMU_DCLK; break;
	case OD_SCLK:
		clk_type = SMU_OD_SCLK; break;
	case OD_MCLK:
		clk_type = SMU_OD_MCLK; break;
	case OD_VDDC_CURVE:
		clk_type = SMU_OD_VDDC_CURVE; break;
	case OD_RANGE:
		clk_type = SMU_OD_RANGE; break;
	case OD_VDDGFX_OFFSET:
		clk_type = SMU_OD_VDDGFX_OFFSET; break;
	case OD_CCLK:
		clk_type = SMU_OD_CCLK; break;
	default:
2281
		clk_type = SMU_CLK_COUNT; break;
2282 2283
	}

2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297
	return clk_type;
}

static int smu_print_ppclk_levels(void *handle,
				  enum pp_clock_type type,
				  char *buf)
{
	struct smu_context *smu = handle;
	enum smu_clk_type clk_type;

	clk_type = smu_convert_to_smuclk(type);
	if (clk_type == SMU_CLK_COUNT)
		return -EINVAL;

2298 2299 2300
	return smu_print_smuclk_levels(smu, clk_type, buf);
}

2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313
static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
{
	struct smu_context *smu = handle;
	enum smu_clk_type clk_type;

	clk_type = smu_convert_to_smuclk(type);
	if (clk_type == SMU_CLK_COUNT)
		return -EINVAL;

	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;

	if (!smu->ppt_funcs->emit_clk_levels)
Tom Rix's avatar
Tom Rix committed
2314
		return -ENOENT;
2315 2316 2317 2318 2319

	return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);

}

2320 2321 2322
static int smu_od_edit_dpm_table(void *handle,
				 enum PP_OD_DPM_TABLE_COMMAND type,
				 long *input, uint32_t size)
2323
{
2324
	struct smu_context *smu = handle;
2325 2326
	int ret = 0;

2327 2328
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2329

2330
	if (smu->ppt_funcs->od_edit_dpm_table) {
2331
		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2332
	}
2333 2334 2335 2336

	return ret;
}

2337 2338 2339 2340
static int smu_read_sensor(void *handle,
			   int sensor,
			   void *data,
			   int *size_arg)
2341
{
2342
	struct smu_context *smu = handle;
2343 2344
	struct smu_umd_pstate_table *pstate_table =
				&smu->pstate_table;
2345
	int ret = 0;
2346
	uint32_t *size, size_val;
2347

2348 2349
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2350

2351
	if (!data || !size_arg)
2352 2353
		return -EINVAL;

2354 2355 2356
	size_val = *size_arg;
	size = &size_val;

2357 2358 2359 2360
	if (smu->ppt_funcs->read_sensor)
		if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
			goto unlock;

2361 2362
	switch (sensor) {
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2363
		*((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2364 2365 2366
		*size = 4;
		break;
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2367
		*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2368 2369 2370
		*size = 4;
		break;
	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2371
		ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382
		*size = 8;
		break;
	case AMDGPU_PP_SENSOR_UVD_POWER:
		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
		*size = 4;
		break;
	case AMDGPU_PP_SENSOR_VCE_POWER:
		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
		*size = 4;
		break;
	case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2383
		*(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
2384 2385
		*size = 4;
		break;
2386 2387 2388 2389
	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
		*(uint32_t *)data = 0;
		*size = 4;
		break;
2390
	default:
2391 2392
		*size = 0;
		ret = -EOPNOTSUPP;
2393 2394
		break;
	}
2395

2396
unlock:
2397 2398 2399
	// assign uint32_t to int
	*size_arg = size_val;

2400 2401 2402
	return ret;
}

2403
static int smu_get_power_profile_mode(void *handle, char *buf)
2404
{
2405
	struct smu_context *smu = handle;
2406

2407 2408
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
	    !smu->ppt_funcs->get_power_profile_mode)
2409
		return -EOPNOTSUPP;
2410 2411
	if (!buf)
		return -EINVAL;
2412

2413
	return smu->ppt_funcs->get_power_profile_mode(smu, buf);
2414 2415
}

2416 2417 2418
static int smu_set_power_profile_mode(void *handle,
				      long *param,
				      uint32_t param_size)
2419
{
2420
	struct smu_context *smu = handle;
2421

2422 2423
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
	    !smu->ppt_funcs->set_power_profile_mode)
2424
		return -EOPNOTSUPP;
2425

2426
	return smu_bump_power_profile_mode(smu, param, param_size);
2427 2428 2429
}


2430
static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
2431
{
2432
	struct smu_context *smu = handle;
2433

2434
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2435 2436 2437 2438 2439 2440 2441
		return -EOPNOTSUPP;

	if (!smu->ppt_funcs->get_fan_control_mode)
		return -EOPNOTSUPP;

	if (!fan_mode)
		return -EINVAL;
2442

2443
	*fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
2444

2445
	return 0;
2446 2447
}

2448
static int smu_set_fan_control_mode(void *handle, u32 value)
2449
{
2450
	struct smu_context *smu = handle;
2451 2452
	int ret = 0;

2453
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2454 2455 2456 2457 2458 2459 2460
		return -EOPNOTSUPP;

	if (!smu->ppt_funcs->set_fan_control_mode)
		return -EOPNOTSUPP;

	if (value == U32_MAX)
		return -EINVAL;
2461

2462 2463 2464
	ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
	if (ret)
		goto out;
2465

2466 2467
	if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
		smu->user_dpm_profile.fan_mode = value;
2468

2469 2470 2471 2472 2473 2474
		/* reset user dpm fan speed */
		if (value != AMD_FAN_CTRL_MANUAL) {
			smu->user_dpm_profile.fan_speed_pwm = 0;
			smu->user_dpm_profile.fan_speed_rpm = 0;
			smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
		}
2475
	}
2476

2477 2478
out:
	return ret;
2479 2480
}

2481
static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
2482
{
2483
	struct smu_context *smu = handle;
2484 2485
	int ret = 0;

2486 2487
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2488

2489 2490 2491 2492 2493 2494 2495
	if (!smu->ppt_funcs->get_fan_speed_pwm)
		return -EOPNOTSUPP;

	if (!speed)
		return -EINVAL;

	ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
2496 2497 2498 2499

	return ret;
}

2500
static int smu_set_fan_speed_pwm(void *handle, u32 speed)
2501
{
2502
	struct smu_context *smu = handle;
2503 2504
	int ret = 0;

2505 2506
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2507

2508 2509 2510 2511 2512 2513 2514 2515 2516 2517
	if (!smu->ppt_funcs->set_fan_speed_pwm)
		return -EOPNOTSUPP;

	if (speed == U32_MAX)
		return -EINVAL;

	ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
		smu->user_dpm_profile.fan_speed_pwm = speed;
2518

2519 2520 2521
		/* Override custom RPM setting as they cannot co-exist */
		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
		smu->user_dpm_profile.fan_speed_rpm = 0;
2522
	}
2523 2524 2525 2526

	return ret;
}

2527
static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
2528
{
2529
	struct smu_context *smu = handle;
2530 2531
	int ret = 0;

2532 2533
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2534

2535 2536 2537 2538 2539 2540 2541
	if (!smu->ppt_funcs->get_fan_speed_rpm)
		return -EOPNOTSUPP;

	if (!speed)
		return -EINVAL;

	ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2542 2543 2544 2545

	return ret;
}

2546
static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
2547
{
2548
	struct smu_context *smu = handle;
2549

2550 2551
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2552

2553
	return smu_set_min_dcef_deep_sleep(smu, clk);
2554 2555
}

2556 2557 2558
static int smu_get_clock_by_type_with_latency(void *handle,
					      enum amd_pp_clock_type type,
					      struct pp_clock_levels_with_latency *clocks)
2559
{
2560 2561
	struct smu_context *smu = handle;
	enum smu_clk_type clk_type;
2562 2563
	int ret = 0;

2564 2565
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2566

2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585
	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
		switch (type) {
		case amd_pp_sys_clock:
			clk_type = SMU_GFXCLK;
			break;
		case amd_pp_mem_clock:
			clk_type = SMU_MCLK;
			break;
		case amd_pp_dcef_clock:
			clk_type = SMU_DCEFCLK;
			break;
		case amd_pp_disp_clock:
			clk_type = SMU_DISPCLK;
			break;
		default:
			dev_err(smu->adev->dev, "Invalid clock type!\n");
			return -EINVAL;
		}

2586
		ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2587
	}
2588 2589 2590 2591

	return ret;
}

2592 2593
static int smu_display_clock_voltage_request(void *handle,
					     struct pp_display_clock_request *clock_req)
2594
{
2595
	struct smu_context *smu = handle;
2596 2597
	int ret = 0;

2598 2599
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2600

2601 2602
	if (smu->ppt_funcs->display_clock_voltage_request)
		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2603 2604 2605 2606 2607

	return ret;
}


2608 2609
static int smu_display_disable_memory_clock_switch(void *handle,
						   bool disable_memory_clock_switch)
2610
{
2611
	struct smu_context *smu = handle;
2612 2613
	int ret = -EINVAL;

2614 2615
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2616

2617 2618 2619 2620 2621 2622
	if (smu->ppt_funcs->display_disable_memory_clock_switch)
		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);

	return ret;
}

2623 2624
static int smu_set_xgmi_pstate(void *handle,
			       uint32_t pstate)
2625
{
2626
	struct smu_context *smu = handle;
2627 2628
	int ret = 0;

2629 2630
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2631

2632 2633
	if (smu->ppt_funcs->set_xgmi_pstate)
		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2634

2635 2636 2637
	if(ret)
		dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");

2638 2639 2640
	return ret;
}

2641
static int smu_get_baco_capability(void *handle, bool *cap)
2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
{
	struct smu_context *smu = handle;

	*cap = false;

	if (!smu->pm_enabled)
		return 0;

	if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
		*cap = smu->ppt_funcs->baco_is_support(smu);

2653
	return 0;
2654 2655
}

2656
static int smu_baco_set_state(void *handle, int state)
2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680
{
	struct smu_context *smu = handle;
	int ret = 0;

	if (!smu->pm_enabled)
		return -EOPNOTSUPP;

	if (state == 0) {
		if (smu->ppt_funcs->baco_exit)
			ret = smu->ppt_funcs->baco_exit(smu);
	} else if (state == 1) {
		if (smu->ppt_funcs->baco_enter)
			ret = smu->ppt_funcs->baco_enter(smu);
	} else {
		return -EINVAL;
	}

	if (ret)
		dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
				(state)?"enter":"exit");

	return ret;
}

2681 2682 2683 2684 2685 2686 2687 2688 2689 2690
bool smu_mode1_reset_is_support(struct smu_context *smu)
{
	bool ret = false;

	if (!smu->pm_enabled)
		return false;

	if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
		ret = smu->ppt_funcs->mode1_reset_is_support(smu);

2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703
	return ret;
}

bool smu_mode2_reset_is_support(struct smu_context *smu)
{
	bool ret = false;

	if (!smu->pm_enabled)
		return false;

	if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
		ret = smu->ppt_funcs->mode2_reset_is_support(smu);

2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719
	return ret;
}

int smu_mode1_reset(struct smu_context *smu)
{
	int ret = 0;

	if (!smu->pm_enabled)
		return -EOPNOTSUPP;

	if (smu->ppt_funcs->mode1_reset)
		ret = smu->ppt_funcs->mode1_reset(smu);

	return ret;
}

2720
static int smu_mode2_reset(void *handle)
2721
{
2722
	struct smu_context *smu = handle;
2723 2724
	int ret = 0;

2725 2726 2727
	if (!smu->pm_enabled)
		return -EOPNOTSUPP;

2728 2729
	if (smu->ppt_funcs->mode2_reset)
		ret = smu->ppt_funcs->mode2_reset(smu);
2730

2731 2732 2733
	if (ret)
		dev_err(smu->adev->dev, "Mode2 reset failed!\n");

2734 2735 2736
	return ret;
}

2737 2738
static int smu_get_max_sustainable_clocks_by_dc(void *handle,
						struct pp_smu_nv_clock_table *max_clocks)
2739
{
2740
	struct smu_context *smu = handle;
2741 2742
	int ret = 0;

2743 2744
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2745

2746 2747
	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2748 2749 2750 2751

	return ret;
}

2752 2753 2754
static int smu_get_uclk_dpm_states(void *handle,
				   unsigned int *clock_values_in_khz,
				   unsigned int *num_states)
2755
{
2756
	struct smu_context *smu = handle;
2757 2758
	int ret = 0;

2759 2760
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2761

2762 2763 2764 2765 2766 2767
	if (smu->ppt_funcs->get_uclk_dpm_states)
		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);

	return ret;
}

2768
static enum amd_pm_state_type smu_get_current_power_state(void *handle)
2769
{
2770
	struct smu_context *smu = handle;
2771
	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2772

2773 2774
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2775 2776 2777 2778 2779 2780 2781

	if (smu->ppt_funcs->get_current_power_state)
		pm_state = smu->ppt_funcs->get_current_power_state(smu);

	return pm_state;
}

2782 2783
static int smu_get_dpm_clock_table(void *handle,
				   struct dpm_clocks *clock_table)
2784
{
2785
	struct smu_context *smu = handle;
2786 2787
	int ret = 0;

2788 2789
	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;
2790

2791 2792 2793 2794 2795
	if (smu->ppt_funcs->get_dpm_clock_table)
		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);

	return ret;
}
2796

2797
static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
2798
{
2799
	struct smu_context *smu = handle;
2800 2801 2802 2803 2804 2805 2806

	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;

	if (!smu->ppt_funcs->get_gpu_metrics)
		return -EOPNOTSUPP;

2807
	return smu->ppt_funcs->get_gpu_metrics(smu, table);
2808
}
2809

2810
static int smu_enable_mgpu_fan_boost(void *handle)
2811
{
2812
	struct smu_context *smu = handle;
2813 2814 2815 2816 2817 2818 2819 2820 2821 2822
	int ret = 0;

	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
		return -EOPNOTSUPP;

	if (smu->ppt_funcs->enable_mgpu_fan_boost)
		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);

	return ret;
}
2823

2824 2825
static int smu_gfx_state_change_set(void *handle,
				    uint32_t state)
2826
{
2827
	struct smu_context *smu = handle;
2828 2829 2830 2831 2832 2833 2834
	int ret = 0;

	if (smu->ppt_funcs->gfx_state_change_set)
		ret = smu->ppt_funcs->gfx_state_change_set(smu, state);

	return ret;
}
2835

2836
int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
2837 2838 2839
{
	int ret = 0;

2840 2841
	if (smu->ppt_funcs->smu_handle_passthrough_sbr)
		ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
2842 2843 2844 2845

	return ret;
}

2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857
int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
{
	int ret = -EOPNOTSUPP;

	if (smu->ppt_funcs &&
		smu->ppt_funcs->get_ecc_info)
		ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);

	return ret;

}

2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875
static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
{
	struct smu_context *smu = handle;
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *memory_pool = &smu_table->memory_pool;

	if (!addr || !size)
		return -EINVAL;

	*addr = NULL;
	*size = 0;
	if (memory_pool->bo) {
		*addr = memory_pool->cpu_addr;
		*size = memory_pool->size;
	}

	return 0;
}
2876

2877
static const struct amd_pm_funcs swsmu_pm_funcs = {
2878
	/* export for sysfs */
2879
	.set_fan_control_mode    = smu_set_fan_control_mode,
2880
	.get_fan_control_mode    = smu_get_fan_control_mode,
2881 2882
	.set_fan_speed_pwm   = smu_set_fan_speed_pwm,
	.get_fan_speed_pwm   = smu_get_fan_speed_pwm,
2883 2884
	.force_clock_level       = smu_force_ppclk_levels,
	.print_clock_levels      = smu_print_ppclk_levels,
2885
	.emit_clock_levels       = smu_emit_ppclk_levels,
2886
	.force_performance_level = smu_force_performance_level,
2887
	.read_sensor             = smu_read_sensor,
2888 2889 2890 2891 2892 2893
	.get_performance_level   = smu_get_performance_level,
	.get_current_power_state = smu_get_current_power_state,
	.get_fan_speed_rpm       = smu_get_fan_speed_rpm,
	.set_fan_speed_rpm       = smu_set_fan_speed_rpm,
	.get_pp_num_states       = smu_get_power_num_states,
	.get_pp_table            = smu_sys_get_pp_table,
2894
	.set_pp_table            = smu_sys_set_pp_table,
2895
	.switch_power_profile    = smu_switch_power_profile,
2896
	/* export to amdgpu */
2897
	.dispatch_tasks          = smu_handle_dpm_task,
2898
	.load_firmware           = smu_load_microcode,
2899
	.set_powergating_by_smu  = smu_dpm_set_power_gate,
2900
	.set_power_limit         = smu_set_power_limit,
2901
	.get_power_limit         = smu_get_power_limit,
2902 2903
	.get_power_profile_mode  = smu_get_power_profile_mode,
	.set_power_profile_mode  = smu_set_power_profile_mode,
2904
	.odn_edit_dpm_table      = smu_od_edit_dpm_table,
2905
	.set_mp1_state           = smu_set_mp1_state,
2906
	.gfx_state_change_set    = smu_gfx_state_change_set,
2907
	/* export to DC */
2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923
	.get_sclk                         = smu_get_sclk,
	.get_mclk                         = smu_get_mclk,
	.display_configuration_change     = smu_display_configuration_change,
	.get_clock_by_type_with_latency   = smu_get_clock_by_type_with_latency,
	.display_clock_voltage_request    = smu_display_clock_voltage_request,
	.enable_mgpu_fan_boost            = smu_enable_mgpu_fan_boost,
	.set_active_display_count         = smu_set_display_count,
	.set_min_deep_sleep_dcefclk       = smu_set_deep_sleep_dcefclk,
	.get_asic_baco_capability         = smu_get_baco_capability,
	.set_asic_baco_state              = smu_baco_set_state,
	.get_ppfeature_status             = smu_sys_get_pp_feature_mask,
	.set_ppfeature_status             = smu_sys_set_pp_feature_mask,
	.asic_reset_mode_2                = smu_mode2_reset,
	.set_df_cstate                    = smu_set_df_cstate,
	.set_xgmi_pstate                  = smu_set_xgmi_pstate,
	.get_gpu_metrics                  = smu_sys_get_gpu_metrics,
2924 2925 2926
	.set_watermarks_for_clock_ranges     = smu_set_watermarks_for_clock_ranges,
	.display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
	.get_max_sustainable_clocks_by_dc    = smu_get_max_sustainable_clocks_by_dc,
2927 2928
	.get_uclk_dpm_states              = smu_get_uclk_dpm_states,
	.get_dpm_clock_table              = smu_get_dpm_clock_table,
2929
	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
2930
};
2931

2932
int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
2933 2934 2935 2936
		       uint64_t event_arg)
{
	int ret = -EINVAL;

2937
	if (smu->ppt_funcs->wait_for_event)
2938 2939 2940 2941
		ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);

	return ret;
}
2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959

int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
{

	if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
		return -EOPNOTSUPP;

	/* Confirm the buffer allocated is of correct size */
	if (size != smu->stb_context.stb_buf_size)
		return -EINVAL;

	/*
	 * No need to lock smu mutex as we access STB directly through MMIO
	 * and not going through SMU messaging route (for now at least).
	 * For registers access rely on implementation internal locking.
	 */
	return smu->ppt_funcs->stb_collect_info(smu, buf, size);
}
2960 2961 2962 2963 2964 2965

#if defined(CONFIG_DEBUG_FS)

static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
{
	struct amdgpu_device *adev = filp->f_inode->i_private;
2966
	struct smu_context *smu = adev->powerplay.pp_handle;
2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990
	unsigned char *buf;
	int r;

	buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
	if (r)
		goto out;

	filp->private_data = buf;

	return 0;

out:
	kvfree(buf);
	return r;
}

static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
				loff_t *pos)
{
	struct amdgpu_device *adev = filp->f_inode->i_private;
2991
	struct smu_context *smu = adev->powerplay.pp_handle;
2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031


	if (!filp->private_data)
		return -EINVAL;

	return simple_read_from_buffer(buf,
				       size,
				       pos, filp->private_data,
				       smu->stb_context.stb_buf_size);
}

static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
{
	kvfree(filp->private_data);
	filp->private_data = NULL;

	return 0;
}

/*
 * We have to define not only read method but also
 * open and release because .read takes up to PAGE_SIZE
 * data each time so and so is invoked multiple times.
 *  We allocate the STB buffer in .open and release it
 *  in .release
 */
static const struct file_operations smu_stb_debugfs_fops = {
	.owner = THIS_MODULE,
	.open = smu_stb_debugfs_open,
	.read = smu_stb_debugfs_read,
	.release = smu_stb_debugfs_release,
	.llseek = default_llseek,
};

#endif

void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)

3032
	struct smu_context *smu = adev->powerplay.pp_handle;
3033

3034
	if (!smu || (!smu->stb_context.stb_buf_size))
3035 3036 3037 3038 3039 3040 3041 3042 3043 3044
		return;

	debugfs_create_file_size("amdgpu_smu_stb_dump",
			    S_IRUSR,
			    adev_to_drm(adev)->primary->debugfs_root,
			    adev,
			    &smu_stb_debugfs_fops,
			    smu->stb_context.stb_buf_size);
#endif
}
3045 3046 3047 3048 3049 3050 3051 3052 3053 3054

int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
{
	int ret = 0;

	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
		ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);

	return ret;
}
3055 3056 3057 3058 3059 3060 3061 3062 3063 3064

int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
{
	int ret = 0;

	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
		ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);

	return ret;
}