Commit a51fe84d authored by Benjamin Gaignard's avatar Benjamin Gaignard

drm: sti: simplify gdp code

Store the physical address at node creation time
to avoid use of virt_to_dma and dma_to_virt everywhere
Signed-off-by: default avatarBenjamin Gaignard <benjamin.gaignard@linaro.org>
parent 2f7d0e82
...@@ -73,7 +73,9 @@ struct sti_gdp_node { ...@@ -73,7 +73,9 @@ struct sti_gdp_node {
struct sti_gdp_node_list { struct sti_gdp_node_list {
struct sti_gdp_node *top_field; struct sti_gdp_node *top_field;
dma_addr_t top_field_paddr;
struct sti_gdp_node *btm_field; struct sti_gdp_node *btm_field;
dma_addr_t btm_field_paddr;
}; };
/** /**
...@@ -168,7 +170,6 @@ static int sti_gdp_get_alpharange(int format) ...@@ -168,7 +170,6 @@ static int sti_gdp_get_alpharange(int format)
static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
{ {
int hw_nvn; int hw_nvn;
void *virt_nvn;
struct sti_gdp *gdp = to_sti_gdp(layer); struct sti_gdp *gdp = to_sti_gdp(layer);
unsigned int i; unsigned int i;
...@@ -176,11 +177,9 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) ...@@ -176,11 +177,9 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
if (!hw_nvn) if (!hw_nvn)
goto end; goto end;
virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
for (i = 0; i < GDP_NODE_NB_BANK; i++) for (i = 0; i < GDP_NODE_NB_BANK; i++)
if ((virt_nvn != gdp->node_list[i].btm_field) && if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
(virt_nvn != gdp->node_list[i].top_field)) (hw_nvn != gdp->node_list[i].top_field_paddr))
return &gdp->node_list[i]; return &gdp->node_list[i];
/* in hazardious cases restart with the first node */ /* in hazardious cases restart with the first node */
...@@ -204,7 +203,6 @@ static ...@@ -204,7 +203,6 @@ static
struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
{ {
int hw_nvn; int hw_nvn;
void *virt_nvn;
struct sti_gdp *gdp = to_sti_gdp(layer); struct sti_gdp *gdp = to_sti_gdp(layer);
unsigned int i; unsigned int i;
...@@ -212,11 +210,9 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) ...@@ -212,11 +210,9 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
if (!hw_nvn) if (!hw_nvn)
goto end; goto end;
virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
for (i = 0; i < GDP_NODE_NB_BANK; i++) for (i = 0; i < GDP_NODE_NB_BANK; i++)
if ((virt_nvn == gdp->node_list[i].btm_field) || if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
(virt_nvn == gdp->node_list[i].top_field)) (hw_nvn == gdp->node_list[i].top_field_paddr))
return &gdp->node_list[i]; return &gdp->node_list[i];
end: end:
...@@ -292,8 +288,8 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare) ...@@ -292,8 +288,8 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
/* Same content and chained together */ /* Same content and chained together */
memcpy(btm_field, top_field, sizeof(*btm_field)); memcpy(btm_field, top_field, sizeof(*btm_field));
top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field); top_field->gam_gdp_nvn = list->btm_field_paddr;
btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field); btm_field->gam_gdp_nvn = list->top_field_paddr;
/* Interlaced mode */ /* Interlaced mode */
if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
...@@ -349,8 +345,8 @@ static int sti_gdp_commit_layer(struct sti_layer *layer) ...@@ -349,8 +345,8 @@ static int sti_gdp_commit_layer(struct sti_layer *layer)
struct sti_gdp_node *updated_top_node = updated_list->top_field; struct sti_gdp_node *updated_top_node = updated_list->top_field;
struct sti_gdp_node *updated_btm_node = updated_list->btm_field; struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
struct sti_gdp *gdp = to_sti_gdp(layer); struct sti_gdp *gdp = to_sti_gdp(layer);
u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node); u32 dma_updated_top = updated_list->top_field_paddr;
u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node); u32 dma_updated_btm = updated_list->btm_field_paddr;
struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer); struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__, dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
...@@ -461,16 +457,16 @@ static void sti_gdp_init(struct sti_layer *layer) ...@@ -461,16 +457,16 @@ static void sti_gdp_init(struct sti_layer *layer)
{ {
struct sti_gdp *gdp = to_sti_gdp(layer); struct sti_gdp *gdp = to_sti_gdp(layer);
struct device_node *np = layer->dev->of_node; struct device_node *np = layer->dev->of_node;
dma_addr_t dma; dma_addr_t dma_addr;
void *base; void *base;
unsigned int i, size; unsigned int i, size;
/* Allocate all the nodes within a single memory page */ /* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) * size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK; GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
base = dma_alloc_writecombine(layer->dev, base = dma_alloc_writecombine(layer->dev,
size, &dma, GFP_KERNEL | GFP_DMA); size, &dma_addr, GFP_KERNEL | GFP_DMA);
if (!base) { if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n"); DRM_ERROR("Failed to allocate memory for GDP node\n");
return; return;
...@@ -478,21 +474,26 @@ static void sti_gdp_init(struct sti_layer *layer) ...@@ -478,21 +474,26 @@ static void sti_gdp_init(struct sti_layer *layer)
memset(base, 0, size); memset(base, 0, size);
for (i = 0; i < GDP_NODE_NB_BANK; i++) { for (i = 0; i < GDP_NODE_NB_BANK; i++) {
if (virt_to_dma(layer->dev, base) & 0xF) { if (dma_addr & 0xF) {
DRM_ERROR("Mem alignment failed\n"); DRM_ERROR("Mem alignment failed\n");
return; return;
} }
gdp->node_list[i].top_field = base; gdp->node_list[i].top_field = base;
gdp->node_list[i].top_field_paddr = dma_addr;
DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base); DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
base += sizeof(struct sti_gdp_node); base += sizeof(struct sti_gdp_node);
dma_addr += sizeof(struct sti_gdp_node);
if (virt_to_dma(layer->dev, base) & 0xF) { if (dma_addr & 0xF) {
DRM_ERROR("Mem alignment failed\n"); DRM_ERROR("Mem alignment failed\n");
return; return;
} }
gdp->node_list[i].btm_field = base; gdp->node_list[i].btm_field = base;
gdp->node_list[i].btm_field_paddr = dma_addr;
DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base); DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
base += sizeof(struct sti_gdp_node); base += sizeof(struct sti_gdp_node);
dma_addr += sizeof(struct sti_gdp_node);
} }
if (of_device_is_compatible(np, "st,stih407-compositor")) { if (of_device_is_compatible(np, "st,stih407-compositor")) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment