Commit c113ea1c authored by Alex Deucher's avatar Alex Deucher

drm/amdgpu: rework sdma structures

Rework the sdma structures in the driver to
consolidate all of the sdma info into a single
structure and allow for asics that may have
different numbers of sdma instances.
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 756e6880
...@@ -1708,7 +1708,7 @@ struct amdgpu_vce { ...@@ -1708,7 +1708,7 @@ struct amdgpu_vce {
/* /*
* SDMA * SDMA
*/ */
struct amdgpu_sdma { struct amdgpu_sdma_instance {
/* SDMA firmware */ /* SDMA firmware */
const struct firmware *fw; const struct firmware *fw;
uint32_t fw_version; uint32_t fw_version;
...@@ -1718,6 +1718,13 @@ struct amdgpu_sdma { ...@@ -1718,6 +1718,13 @@ struct amdgpu_sdma {
bool burst_nop; bool burst_nop;
}; };
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
int num_instances;
};
/* /*
* Firmware * Firmware
*/ */
...@@ -2064,9 +2071,7 @@ struct amdgpu_device { ...@@ -2064,9 +2071,7 @@ struct amdgpu_device {
struct amdgpu_gfx gfx; struct amdgpu_gfx gfx;
/* sdma */ /* sdma */
struct amdgpu_sdma sdma[AMDGPU_MAX_SDMA_INSTANCES]; struct amdgpu_sdma sdma;
struct amdgpu_irq_src sdma_trap_irq;
struct amdgpu_irq_src sdma_illegal_inst_irq;
/* uvd */ /* uvd */
bool has_uvd; bool has_uvd;
...@@ -2203,17 +2208,18 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) ...@@ -2203,17 +2208,18 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
ring->ring_free_dw--; ring->ring_free_dw--;
} }
static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) static inline struct amdgpu_sdma_instance *
amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
int i; int i;
for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++) for (i = 0; i < adev->sdma.num_instances; i++)
if (&adev->sdma[i].ring == ring) if (&adev->sdma.instance[i].ring == ring)
break; break;
if (i < AMDGPU_MAX_SDMA_INSTANCES) if (i < AMDGPU_MAX_SDMA_INSTANCES)
return &adev->sdma[i]; return &adev->sdma.instance[i];
else else
return NULL; return NULL;
} }
......
...@@ -649,12 +649,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) ...@@ -649,12 +649,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
case KGD_ENGINE_SDMA1: case KGD_ENGINE_SDMA1:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->sdma[0].fw->data; adev->sdma.instance[0].fw->data;
break; break;
case KGD_ENGINE_SDMA2: case KGD_ENGINE_SDMA2:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->sdma[1].fw->data; adev->sdma.instance[1].fw->data;
break; break;
default: default:
......
...@@ -523,12 +523,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) ...@@ -523,12 +523,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
case KGD_ENGINE_SDMA1: case KGD_ENGINE_SDMA1:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->sdma[0].fw->data; adev->sdma.instance[0].fw->data;
break; break;
case KGD_ENGINE_SDMA2: case KGD_ENGINE_SDMA2:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->sdma[1].fw->data; adev->sdma.instance[1].fw->data;
break; break;
default: default:
......
...@@ -104,10 +104,11 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, ...@@ -104,10 +104,11 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
} }
break; break;
case AMDGPU_HW_IP_DMA: case AMDGPU_HW_IP_DMA:
if (ring < 2) { if (ring < adev->sdma.num_instances) {
*out_ring = &adev->sdma[ring].ring; *out_ring = &adev->sdma.instance[ring].ring;
} else { } else {
DRM_ERROR("only two SDMA rings are supported\n"); DRM_ERROR("only %d SDMA rings are supported\n",
adev->sdma.num_instances);
return -EINVAL; return -EINVAL;
} }
break; break;
......
...@@ -218,8 +218,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -218,8 +218,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break; break;
case AMDGPU_HW_IP_DMA: case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA; type = AMD_IP_BLOCK_TYPE_SDMA;
ring_mask = adev->sdma[0].ring.ready ? 1 : 0; for (i = 0; i < adev->sdma.num_instances; i++)
ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1); ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
ib_size_alignment = 1; ib_size_alignment = 1;
break; break;
...@@ -341,10 +341,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -341,10 +341,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
fw_info.feature = 0; fw_info.feature = 0;
break; break;
case AMDGPU_INFO_FW_SDMA: case AMDGPU_INFO_FW_SDMA:
if (info->query_fw.index >= 2) if (info->query_fw.index >= adev->sdma.num_instances)
return -EINVAL; return -EINVAL;
fw_info.ver = adev->sdma[info->query_fw.index].fw_version; fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version;
fw_info.feature = adev->sdma[info->query_fw.index].feature_version; fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version;
break; break;
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -540,8 +540,8 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) ...@@ -540,8 +540,8 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]); static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]);
static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]); static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]);
static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]); static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]);
static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma[0].ring); static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring);
static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma[1].ring); static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring);
static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring); static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]); static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]); static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment