Commit ddcda24e authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Hook up guest-backed queries

Perform a translation of legacy query commands should they occur
in the command stream.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarBrian Paul <brianp@vmware.com>
Reviewed-by: default avatarZack Rusin <zackr@vmware.com>
parent 96c5f0df
...@@ -167,6 +167,7 @@ struct vmw_fifo_state { ...@@ -167,6 +167,7 @@ struct vmw_fifo_state {
}; };
struct vmw_relocation { struct vmw_relocation {
SVGAMobId *mob_loc;
SVGAGuestPtr *location; SVGAGuestPtr *location;
uint32_t index; uint32_t index;
}; };
......
...@@ -679,6 +679,66 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ...@@ -679,6 +679,66 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
} }
} }
/**
* vmw_translate_mob_pointer - Prepare to translate a user-space buffer
* handle to a MOB id.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: The software context used for this command batch validation.
* @id: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry
* a reference-counted pointer to the DMA buffer identified by the
* user-space handle in @id.
*
* This function saves information needed to translate a user-space buffer
* handle to a MOB id. The translation does not take place immediately, but
* during a call to vmw_apply_relocations(). This function builds a relocation
* list and a list of buffers to validate. The former needs to be freed using
* either vmw_apply_relocations() or vmw_free_relocations(). The latter
* needs to be freed using vmw_clear_validations.
*/
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
struct vmw_dma_buffer **vmw_bo_p)
{
struct vmw_dma_buffer *vmw_bo = NULL;
struct ttm_buffer_object *bo;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
return -EINVAL;
}
bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission"
" exceeded\n");
ret = -EINVAL;
goto out_no_reloc;
}
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->mob_loc = id;
reloc->location = NULL;
ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
*vmw_bo_p = vmw_bo;
return 0;
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
vmw_bo_p = NULL;
return ret;
}
/** /**
* vmw_translate_guest_pointer - Prepare to translate a user-space buffer * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
* handle to a valid SVGAGuestPtr * handle to a valid SVGAGuestPtr
...@@ -739,6 +799,30 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -739,6 +799,30 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
return ret; return ret;
} }
/**
* vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_begin_gb_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdBeginGBQuery q;
} *cmd;
cmd = container_of(header, struct vmw_begin_gb_query_cmd,
header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->q.cid,
NULL);
}
/** /**
* vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
* *
...@@ -758,11 +842,63 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv, ...@@ -758,11 +842,63 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_begin_query_cmd, cmd = container_of(header, struct vmw_begin_query_cmd,
header); header);
if (unlikely(dev_priv->has_mob)) {
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBeginGBQuery q;
} gb_cmd;
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
gb_cmd.header.size = cmd->header.size;
gb_cmd.q.cid = cmd->q.cid;
gb_cmd.q.type = cmd->q.type;
memcpy(cmd, &gb_cmd, sizeof(*cmd));
return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
}
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->q.cid, user_context_converter, &cmd->q.cid,
NULL); NULL);
} }
/**
* vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdEndGBQuery q;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
ret = vmw_translate_mob_ptr(dev_priv, sw_context,
&cmd->q.mobid,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
/** /**
* vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
* *
...@@ -782,6 +918,25 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, ...@@ -782,6 +918,25 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
int ret; int ret;
cmd = container_of(header, struct vmw_query_cmd, header); cmd = container_of(header, struct vmw_query_cmd, header);
if (dev_priv->has_mob) {
struct {
SVGA3dCmdHeader header;
SVGA3dCmdEndGBQuery q;
} gb_cmd;
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
gb_cmd.header.size = cmd->header.size;
gb_cmd.q.cid = cmd->q.cid;
gb_cmd.q.type = cmd->q.type;
gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
gb_cmd.q.offset = cmd->q.guestResult.offset;
memcpy(cmd, &gb_cmd, sizeof(*cmd));
return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
}
ret = vmw_cmd_cid_check(dev_priv, sw_context, header); ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -798,7 +953,40 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, ...@@ -798,7 +953,40 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
return ret; return ret;
} }
/* /**
* vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery q;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
ret = vmw_translate_mob_ptr(dev_priv, sw_context,
&cmd->q.mobid,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
vmw_dmabuf_unreference(&vmw_bo);
return 0;
}
/**
* vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
...@@ -817,6 +1005,25 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, ...@@ -817,6 +1005,25 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
int ret; int ret;
cmd = container_of(header, struct vmw_query_cmd, header); cmd = container_of(header, struct vmw_query_cmd, header);
if (dev_priv->has_mob) {
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery q;
} gb_cmd;
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
gb_cmd.header.size = cmd->header.size;
gb_cmd.q.cid = cmd->q.cid;
gb_cmd.q.type = cmd->q.type;
gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
gb_cmd.q.offset = cmd->q.guestResult.offset;
memcpy(cmd, &gb_cmd, sizeof(*cmd));
return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
}
ret = vmw_cmd_cid_check(dev_priv, sw_context, header); ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1093,6 +1300,9 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { ...@@ -1093,6 +1300,9 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query),
VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query),
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query),
}; };
static int vmw_cmd_check(struct vmw_private *dev_priv, static int vmw_cmd_check(struct vmw_private *dev_priv,
...@@ -1182,6 +1392,9 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) ...@@ -1182,6 +1392,9 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
case VMW_PL_GMR: case VMW_PL_GMR:
reloc->location->gmrId = bo->mem.start; reloc->location->gmrId = bo->mem.start;
break; break;
case VMW_PL_MOB:
*reloc->mob_loc = bo->mem.start;
break;
default: default:
BUG(); BUG();
} }
......
...@@ -511,24 +511,16 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) ...@@ -511,24 +511,16 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
} }
/** /**
* vmw_fifo_emit_dummy_query - emits a dummy query to the fifo. * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
* legacy query commands.
* *
* @dev_priv: The device private structure. * @dev_priv: The device private structure.
* @cid: The hardware context id used for the query. * @cid: The hardware context id used for the query.
* *
* This function is used to emit a dummy occlusion query with * See the vmw_fifo_emit_dummy_query documentation.
* no primitives rendered between query begin and query end.
* It's used to provide a query barrier, in order to know that when
* this query is finished, all preceding queries are also finished.
*
* A Query results structure should have been initialized at the start
* of the dev_priv->dummy_query_bo buffer object. And that buffer object
* must also be either reserved or pinned when this function is called.
*
* Returns -ENOMEM on failure to reserve fifo space.
*/ */
int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
uint32_t cid) uint32_t cid)
{ {
/* /*
* A query wait without a preceding query end will * A query wait without a preceding query end will
...@@ -566,3 +558,75 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, ...@@ -566,3 +558,75 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
return 0; return 0;
} }
/**
* vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
* guest-backed resource query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
* See the vmw_fifo_emit_dummy_query documentation.
*/
static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
uint32_t cid)
{
/*
* A query wait without a preceding query end will
* actually finish all queries for this cid
* without writing to the query result structure.
*/
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery body;
} *cmd;
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Out of fifo space for dummy query.\n");
return -ENOMEM;
}
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = cid;
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
cmd->body.mobid = bo->mem.start;
cmd->body.offset = 0;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0;
}
/**
* vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
* appropriate resource query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
* This function is used to emit a dummy occlusion query with
* no primitives rendered between query begin and query end.
* It's used to provide a query barrier, in order to know that when
* this query is finished, all preceding queries are also finished.
*
* A Query results structure should have been initialized at the start
* of the dev_priv->dummy_query_bo buffer object. And that buffer object
* must also be either reserved or pinned when this function is called.
*
* Returns -ENOMEM on failure to reserve fifo space.
*/
int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
if (dev_priv->has_mob)
return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment