Commit 50fbe0cc authored by Srinivasan Shanmugam's avatar Srinivasan Shanmugam Committed by Alex Deucher

drm/amdgpu: Add -ENOMEM error handling when there is no memory

Return -ENOMEM, when there is no sufficient dynamically allocated memory

Cc: Christian König <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarSrinivasan Shanmugam <srinivasan.shanmugam@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent fcb7a184
...@@ -110,9 +110,9 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, ...@@ -110,9 +110,9 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
* The bitmask of CUs to be disabled in the shader array determined by se and * The bitmask of CUs to be disabled in the shader array determined by se and
* sh is stored in mask[se * max_sh + sh]. * sh is stored in mask[se * max_sh + sh].
*/ */
void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh) void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh)
{ {
unsigned se, sh, cu; unsigned int se, sh, cu;
const char *p; const char *p;
memset(mask, 0, sizeof(*mask) * max_se * max_sh); memset(mask, 0, sizeof(*mask) * max_se * max_sh);
...@@ -124,6 +124,7 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s ...@@ -124,6 +124,7 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s
for (;;) { for (;;) {
char *next; char *next;
int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu); int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
if (ret < 3) { if (ret < 3) {
DRM_ERROR("amdgpu: could not parse disable_cu\n"); DRM_ERROR("amdgpu: could not parse disable_cu\n");
return; return;
...@@ -349,7 +350,7 @@ void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id) ...@@ -349,7 +350,7 @@ void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
} }
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
unsigned hpd_size, int xcc_id) unsigned int hpd_size, int xcc_id)
{ {
int r; int r;
u32 *hpd; u32 *hpd;
...@@ -376,7 +377,7 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, ...@@ -376,7 +377,7 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
/* create MQD for each compute/gfx queue */ /* create MQD for each compute/gfx queue */
int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
unsigned mqd_size, int xcc_id) unsigned int mqd_size, int xcc_id)
{ {
int r, i, j; int r, i, j;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
...@@ -454,8 +455,10 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ...@@ -454,8 +455,10 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
ring->mqd_size = mqd_size; ring->mqd_size = mqd_size;
/* prepare MQD backup */ /* prepare MQD backup */
adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL); adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
if (!adev->gfx.mec.mqd_backup[j]) if (!adev->gfx.mec.mqd_backup[j]) {
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
return -ENOMEM;
}
} }
} }
...@@ -1286,11 +1289,11 @@ static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, ...@@ -1286,11 +1289,11 @@ static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
return sysfs_emit(buf, "%s\n", supported_partition); return sysfs_emit(buf, "%s\n", supported_partition);
} }
static DEVICE_ATTR(current_compute_partition, S_IRUGO | S_IWUSR, static DEVICE_ATTR(current_compute_partition, 0644,
amdgpu_gfx_get_current_compute_partition, amdgpu_gfx_get_current_compute_partition,
amdgpu_gfx_set_compute_partition); amdgpu_gfx_set_compute_partition);
static DEVICE_ATTR(available_compute_partition, S_IRUGO, static DEVICE_ATTR(available_compute_partition, 0444,
amdgpu_gfx_get_available_compute_partition, NULL); amdgpu_gfx_get_available_compute_partition, NULL);
int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
......
...@@ -500,6 +500,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) ...@@ -500,6 +500,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
hive = kzalloc(sizeof(*hive), GFP_KERNEL); hive = kzalloc(sizeof(*hive), GFP_KERNEL);
if (!hive) { if (!hive) {
dev_err(adev->dev, "XGMI: allocation failed\n"); dev_err(adev->dev, "XGMI: allocation failed\n");
ret = -ENOMEM;
hive = NULL; hive = NULL;
goto pro_end; goto pro_end;
} }
......
...@@ -909,10 +909,12 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev, ...@@ -909,10 +909,12 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev,
/* prepare MQD backup */ /* prepare MQD backup */
adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL); adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL);
if (!adev->mes.mqd_backup[pipe]) if (!adev->mes.mqd_backup[pipe]) {
dev_warn(adev->dev, dev_warn(adev->dev,
"no memory to create MQD backup for ring %s\n", "no memory to create MQD backup for ring %s\n",
ring->name); ring->name);
return -ENOMEM;
}
return 0; return 0;
} }
......
...@@ -1021,10 +1021,12 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev, ...@@ -1021,10 +1021,12 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
/* prepare MQD backup */ /* prepare MQD backup */
adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL); adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL);
if (!adev->mes.mqd_backup[pipe]) if (!adev->mes.mqd_backup[pipe]) {
dev_warn(adev->dev, dev_warn(adev->dev,
"no memory to create MQD backup for ring %s\n", "no memory to create MQD backup for ring %s\n",
ring->name); ring->name);
return -ENOMEM;
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment