Commit fb95992a authored by Eric Anholt's avatar Eric Anholt

drm/vc4: Demote user-accessible DRM_ERROR paths to DRM_DEBUG.

Userspace shouldn't be able to spam dmesg by passing bad arguments.
This has particularly become an issues since we started using a bad
argument to set_tiling to detect if set_tiling was supported.
Signed-off-by: default avatarEric Anholt <eric@anholt.net>
Fixes: 83753117 ("drm/vc4: Add get/set tiling ioctls.")
Link: https://patchwork.freedesktop.org/patch/msgid/20170725162733.28007-1-eric@anholt.netReviewed-by: default avatarBoris Brezillon <boris.brezillon@free-electrons.com>
parent 1d5494e9
...@@ -482,7 +482,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) ...@@ -482,7 +482,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
struct vc4_bo *bo = to_vc4_bo(obj); struct vc4_bo *bo = to_vc4_bo(obj);
if (bo->validated_shader) { if (bo->validated_shader) {
DRM_ERROR("Attempting to export shader BO\n"); DRM_DEBUG("Attempting to export shader BO\n");
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -503,7 +503,7 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -503,7 +503,7 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
bo = to_vc4_bo(gem_obj); bo = to_vc4_bo(gem_obj);
if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -528,7 +528,7 @@ int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) ...@@ -528,7 +528,7 @@ int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct vc4_bo *bo = to_vc4_bo(obj); struct vc4_bo *bo = to_vc4_bo(obj);
if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -540,7 +540,7 @@ void *vc4_prime_vmap(struct drm_gem_object *obj) ...@@ -540,7 +540,7 @@ void *vc4_prime_vmap(struct drm_gem_object *obj)
struct vc4_bo *bo = to_vc4_bo(obj); struct vc4_bo *bo = to_vc4_bo(obj);
if (bo->validated_shader) { if (bo->validated_shader) {
DRM_ERROR("mmaping of shader BOs not allowed.\n"); DRM_DEBUG("mmaping of shader BOs not allowed.\n");
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -594,7 +594,7 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, ...@@ -594,7 +594,7 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
gem_obj = drm_gem_object_lookup(file_priv, args->handle); gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) { if (!gem_obj) {
DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -EINVAL; return -EINVAL;
} }
...@@ -698,7 +698,7 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, ...@@ -698,7 +698,7 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
gem_obj = drm_gem_object_lookup(file_priv, args->handle); gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) { if (!gem_obj) {
DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -ENOENT; return -ENOENT;
} }
bo = to_vc4_bo(gem_obj); bo = to_vc4_bo(gem_obj);
...@@ -729,7 +729,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, ...@@ -729,7 +729,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
gem_obj = drm_gem_object_lookup(file_priv, args->handle); gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) { if (!gem_obj) {
DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -ENOENT; return -ENOENT;
} }
bo = to_vc4_bo(gem_obj); bo = to_vc4_bo(gem_obj);
......
...@@ -659,7 +659,7 @@ vc4_cl_lookup_bos(struct drm_device *dev, ...@@ -659,7 +659,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
/* See comment on bo_index for why we have to check /* See comment on bo_index for why we have to check
* this. * this.
*/ */
DRM_ERROR("Rendering requires BOs to validate\n"); DRM_DEBUG("Rendering requires BOs to validate\n");
return -EINVAL; return -EINVAL;
} }
...@@ -690,7 +690,7 @@ vc4_cl_lookup_bos(struct drm_device *dev, ...@@ -690,7 +690,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
struct drm_gem_object *bo = idr_find(&file_priv->object_idr, struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
handles[i]); handles[i]);
if (!bo) { if (!bo) {
DRM_ERROR("Failed to look up GEM BO %d: %d\n", DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
i, handles[i]); i, handles[i]);
ret = -EINVAL; ret = -EINVAL;
spin_unlock(&file_priv->table_lock); spin_unlock(&file_priv->table_lock);
...@@ -728,7 +728,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) ...@@ -728,7 +728,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
args->shader_rec_count >= (UINT_MAX / args->shader_rec_count >= (UINT_MAX /
sizeof(struct vc4_shader_state)) || sizeof(struct vc4_shader_state)) ||
temp_size < exec_size) { temp_size < exec_size) {
DRM_ERROR("overflow in exec arguments\n"); DRM_DEBUG("overflow in exec arguments\n");
ret = -EINVAL; ret = -EINVAL;
goto fail; goto fail;
} }
...@@ -973,7 +973,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data, ...@@ -973,7 +973,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
gem_obj = drm_gem_object_lookup(file_priv, args->handle); gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) { if (!gem_obj) {
DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -EINVAL; return -EINVAL;
} }
bo = to_vc4_bo(gem_obj); bo = to_vc4_bo(gem_obj);
...@@ -1008,7 +1008,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, ...@@ -1008,7 +1008,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
int ret = 0; int ret = 0;
if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) { if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
DRM_ERROR("Unknown flags: 0x%02x\n", args->flags); DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
return -EINVAL; return -EINVAL;
} }
......
...@@ -169,7 +169,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, ...@@ -169,7 +169,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
gem_obj = drm_gem_object_lookup(file_priv, gem_obj = drm_gem_object_lookup(file_priv,
mode_cmd->handles[0]); mode_cmd->handles[0]);
if (!gem_obj) { if (!gem_obj) {
DRM_ERROR("Failed to look up GEM BO %d\n", DRM_DEBUG("Failed to look up GEM BO %d\n",
mode_cmd->handles[0]); mode_cmd->handles[0]);
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
} }
......
...@@ -378,14 +378,14 @@ static int vc4_full_res_bounds_check(struct vc4_exec_info *exec, ...@@ -378,14 +378,14 @@ static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32); u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32);
if (surf->offset > obj->base.size) { if (surf->offset > obj->base.size) {
DRM_ERROR("surface offset %d > BO size %zd\n", DRM_DEBUG("surface offset %d > BO size %zd\n",
surf->offset, obj->base.size); surf->offset, obj->base.size);
return -EINVAL; return -EINVAL;
} }
if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE < if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE <
render_tiles_stride * args->max_y_tile + args->max_x_tile) { render_tiles_stride * args->max_y_tile + args->max_x_tile) {
DRM_ERROR("MSAA tile %d, %d out of bounds " DRM_DEBUG("MSAA tile %d, %d out of bounds "
"(bo size %zd, offset %d).\n", "(bo size %zd, offset %d).\n",
args->max_x_tile, args->max_y_tile, args->max_x_tile, args->max_y_tile,
obj->base.size, obj->base.size,
...@@ -401,7 +401,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec, ...@@ -401,7 +401,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
struct drm_vc4_submit_rcl_surface *surf) struct drm_vc4_submit_rcl_surface *surf)
{ {
if (surf->flags != 0 || surf->bits != 0) { if (surf->flags != 0 || surf->bits != 0) {
DRM_ERROR("MSAA surface had nonzero flags/bits\n"); DRM_DEBUG("MSAA surface had nonzero flags/bits\n");
return -EINVAL; return -EINVAL;
} }
...@@ -415,7 +415,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec, ...@@ -415,7 +415,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj; exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
if (surf->offset & 0xf) { if (surf->offset & 0xf) {
DRM_ERROR("MSAA write must be 16b aligned.\n"); DRM_DEBUG("MSAA write must be 16b aligned.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -437,7 +437,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, ...@@ -437,7 +437,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
int ret; int ret;
if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
DRM_ERROR("Extra flags set\n"); DRM_DEBUG("Extra flags set\n");
return -EINVAL; return -EINVAL;
} }
...@@ -453,12 +453,12 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, ...@@ -453,12 +453,12 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
if (surf == &exec->args->zs_write) { if (surf == &exec->args->zs_write) {
DRM_ERROR("general zs write may not be a full-res.\n"); DRM_DEBUG("general zs write may not be a full-res.\n");
return -EINVAL; return -EINVAL;
} }
if (surf->bits != 0) { if (surf->bits != 0) {
DRM_ERROR("load/store general bits set with " DRM_DEBUG("load/store general bits set with "
"full res load/store.\n"); "full res load/store.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -473,19 +473,19 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, ...@@ -473,19 +473,19 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK | if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK |
VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK | VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK |
VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) { VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) {
DRM_ERROR("Unknown bits in load/store: 0x%04x\n", DRM_DEBUG("Unknown bits in load/store: 0x%04x\n",
surf->bits); surf->bits);
return -EINVAL; return -EINVAL;
} }
if (tiling > VC4_TILING_FORMAT_LT) { if (tiling > VC4_TILING_FORMAT_LT) {
DRM_ERROR("Bad tiling format\n"); DRM_DEBUG("Bad tiling format\n");
return -EINVAL; return -EINVAL;
} }
if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) { if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) {
if (format != 0) { if (format != 0) {
DRM_ERROR("No color format should be set for ZS\n"); DRM_DEBUG("No color format should be set for ZS\n");
return -EINVAL; return -EINVAL;
} }
cpp = 4; cpp = 4;
...@@ -499,16 +499,16 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, ...@@ -499,16 +499,16 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
cpp = 4; cpp = 4;
break; break;
default: default:
DRM_ERROR("Bad tile buffer format\n"); DRM_DEBUG("Bad tile buffer format\n");
return -EINVAL; return -EINVAL;
} }
} else { } else {
DRM_ERROR("Bad load/store buffer %d.\n", buffer); DRM_DEBUG("Bad load/store buffer %d.\n", buffer);
return -EINVAL; return -EINVAL;
} }
if (surf->offset & 0xf) { if (surf->offset & 0xf) {
DRM_ERROR("load/store buffer must be 16b aligned.\n"); DRM_DEBUG("load/store buffer must be 16b aligned.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -533,7 +533,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, ...@@ -533,7 +533,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
int cpp; int cpp;
if (surf->flags != 0) { if (surf->flags != 0) {
DRM_ERROR("No flags supported on render config.\n"); DRM_DEBUG("No flags supported on render config.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -541,7 +541,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, ...@@ -541,7 +541,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
VC4_RENDER_CONFIG_FORMAT_MASK | VC4_RENDER_CONFIG_FORMAT_MASK |
VC4_RENDER_CONFIG_MS_MODE_4X | VC4_RENDER_CONFIG_MS_MODE_4X |
VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) { VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) {
DRM_ERROR("Unknown bits in render config: 0x%04x\n", DRM_DEBUG("Unknown bits in render config: 0x%04x\n",
surf->bits); surf->bits);
return -EINVAL; return -EINVAL;
} }
...@@ -556,7 +556,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, ...@@ -556,7 +556,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj; exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
if (tiling > VC4_TILING_FORMAT_LT) { if (tiling > VC4_TILING_FORMAT_LT) {
DRM_ERROR("Bad tiling format\n"); DRM_DEBUG("Bad tiling format\n");
return -EINVAL; return -EINVAL;
} }
...@@ -569,7 +569,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, ...@@ -569,7 +569,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
cpp = 4; cpp = 4;
break; break;
default: default:
DRM_ERROR("Bad tile buffer format\n"); DRM_DEBUG("Bad tile buffer format\n");
return -EINVAL; return -EINVAL;
} }
...@@ -590,7 +590,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) ...@@ -590,7 +590,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
if (args->min_x_tile > args->max_x_tile || if (args->min_x_tile > args->max_x_tile ||
args->min_y_tile > args->max_y_tile) { args->min_y_tile > args->max_y_tile) {
DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n", DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n",
args->min_x_tile, args->min_y_tile, args->min_x_tile, args->min_y_tile,
args->max_x_tile, args->max_y_tile); args->max_x_tile, args->max_y_tile);
return -EINVAL; return -EINVAL;
...@@ -599,7 +599,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) ...@@ -599,7 +599,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
if (has_bin && if (has_bin &&
(args->max_x_tile > exec->bin_tiles_x || (args->max_x_tile > exec->bin_tiles_x ||
args->max_y_tile > exec->bin_tiles_y)) { args->max_y_tile > exec->bin_tiles_y)) {
DRM_ERROR("Render tiles (%d,%d) outside of bin config " DRM_DEBUG("Render tiles (%d,%d) outside of bin config "
"(%d,%d)\n", "(%d,%d)\n",
args->max_x_tile, args->max_y_tile, args->max_x_tile, args->max_y_tile,
exec->bin_tiles_x, exec->bin_tiles_y); exec->bin_tiles_x, exec->bin_tiles_y);
...@@ -642,7 +642,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) ...@@ -642,7 +642,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
*/ */
if (!setup.color_write && !setup.zs_write && if (!setup.color_write && !setup.zs_write &&
!setup.msaa_color_write && !setup.msaa_zs_write) { !setup.msaa_color_write && !setup.msaa_zs_write) {
DRM_ERROR("RCL requires color or Z/S write\n"); DRM_DEBUG("RCL requires color or Z/S write\n");
return -EINVAL; return -EINVAL;
} }
......
This diff is collapsed.
...@@ -200,7 +200,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, ...@@ -200,7 +200,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
uint32_t clamp_reg, clamp_offset; uint32_t clamp_reg, clamp_offset;
if (sig == QPU_SIG_SMALL_IMM) { if (sig == QPU_SIG_SMALL_IMM) {
DRM_ERROR("direct TMU read used small immediate\n"); DRM_DEBUG("direct TMU read used small immediate\n");
return false; return false;
} }
...@@ -209,7 +209,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, ...@@ -209,7 +209,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
*/ */
if (is_mul || if (is_mul ||
QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) { QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
DRM_ERROR("direct TMU load wasn't an add\n"); DRM_DEBUG("direct TMU load wasn't an add\n");
return false; return false;
} }
...@@ -220,13 +220,13 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, ...@@ -220,13 +220,13 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
*/ */
clamp_reg = raddr_add_a_to_live_reg_index(inst); clamp_reg = raddr_add_a_to_live_reg_index(inst);
if (clamp_reg == ~0) { if (clamp_reg == ~0) {
DRM_ERROR("direct TMU load wasn't clamped\n"); DRM_DEBUG("direct TMU load wasn't clamped\n");
return false; return false;
} }
clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg]; clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
if (clamp_offset == ~0) { if (clamp_offset == ~0) {
DRM_ERROR("direct TMU load wasn't clamped\n"); DRM_DEBUG("direct TMU load wasn't clamped\n");
return false; return false;
} }
...@@ -238,7 +238,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, ...@@ -238,7 +238,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
!(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) { !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
DRM_ERROR("direct TMU load didn't add to a uniform\n"); DRM_DEBUG("direct TMU load didn't add to a uniform\n");
return false; return false;
} }
...@@ -246,14 +246,14 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, ...@@ -246,14 +246,14 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
} else { } else {
if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM && if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
raddr_b == QPU_R_UNIF)) { raddr_b == QPU_R_UNIF)) {
DRM_ERROR("uniform read in the same instruction as " DRM_DEBUG("uniform read in the same instruction as "
"texture setup.\n"); "texture setup.\n");
return false; return false;
} }
} }
if (validation_state->tmu_write_count[tmu] >= 4) { if (validation_state->tmu_write_count[tmu] >= 4) {
DRM_ERROR("TMU%d got too many parameters before dispatch\n", DRM_DEBUG("TMU%d got too many parameters before dispatch\n",
tmu); tmu);
return false; return false;
} }
...@@ -265,7 +265,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, ...@@ -265,7 +265,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
*/ */
if (!is_direct) { if (!is_direct) {
if (validation_state->needs_uniform_address_update) { if (validation_state->needs_uniform_address_update) {
DRM_ERROR("Texturing with undefined uniform address\n"); DRM_DEBUG("Texturing with undefined uniform address\n");
return false; return false;
} }
...@@ -336,35 +336,35 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade ...@@ -336,35 +336,35 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
case QPU_SIG_LOAD_TMU1: case QPU_SIG_LOAD_TMU1:
break; break;
default: default:
DRM_ERROR("uniforms address change must be " DRM_DEBUG("uniforms address change must be "
"normal math\n"); "normal math\n");
return false; return false;
} }
if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) { if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
DRM_ERROR("Uniform address reset must be an ADD.\n"); DRM_DEBUG("Uniform address reset must be an ADD.\n");
return false; return false;
} }
if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) { if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
DRM_ERROR("Uniform address reset must be unconditional.\n"); DRM_DEBUG("Uniform address reset must be unconditional.\n");
return false; return false;
} }
if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP && if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
!(inst & QPU_PM)) { !(inst & QPU_PM)) {
DRM_ERROR("No packing allowed on uniforms reset\n"); DRM_DEBUG("No packing allowed on uniforms reset\n");
return false; return false;
} }
if (add_lri == -1) { if (add_lri == -1) {
DRM_ERROR("First argument of uniform address write must be " DRM_DEBUG("First argument of uniform address write must be "
"an immediate value.\n"); "an immediate value.\n");
return false; return false;
} }
if (validation_state->live_immediates[add_lri] != expected_offset) { if (validation_state->live_immediates[add_lri] != expected_offset) {
DRM_ERROR("Resetting uniforms with offset %db instead of %db\n", DRM_DEBUG("Resetting uniforms with offset %db instead of %db\n",
validation_state->live_immediates[add_lri], validation_state->live_immediates[add_lri],
expected_offset); expected_offset);
return false; return false;
...@@ -372,7 +372,7 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade ...@@ -372,7 +372,7 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
!(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) { !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
DRM_ERROR("Second argument of uniform address write must be " DRM_DEBUG("Second argument of uniform address write must be "
"a uniform.\n"); "a uniform.\n");
return false; return false;
} }
...@@ -417,7 +417,7 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader, ...@@ -417,7 +417,7 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
switch (waddr) { switch (waddr) {
case QPU_W_UNIFORMS_ADDRESS: case QPU_W_UNIFORMS_ADDRESS:
if (is_b) { if (is_b) {
DRM_ERROR("relative uniforms address change " DRM_DEBUG("relative uniforms address change "
"unsupported\n"); "unsupported\n");
return false; return false;
} }
...@@ -452,11 +452,11 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader, ...@@ -452,11 +452,11 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
/* XXX: I haven't thought about these, so don't support them /* XXX: I haven't thought about these, so don't support them
* for now. * for now.
*/ */
DRM_ERROR("Unsupported waddr %d\n", waddr); DRM_DEBUG("Unsupported waddr %d\n", waddr);
return false; return false;
case QPU_W_VPM_ADDR: case QPU_W_VPM_ADDR:
DRM_ERROR("General VPM DMA unsupported\n"); DRM_DEBUG("General VPM DMA unsupported\n");
return false; return false;
case QPU_W_VPM: case QPU_W_VPM:
...@@ -559,7 +559,7 @@ check_instruction_writes(struct vc4_validated_shader_info *validated_shader, ...@@ -559,7 +559,7 @@ check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
bool ok; bool ok;
if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) { if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
DRM_ERROR("ADD and MUL both set up textures\n"); DRM_DEBUG("ADD and MUL both set up textures\n");
return false; return false;
} }
...@@ -588,7 +588,7 @@ check_branch(uint64_t inst, ...@@ -588,7 +588,7 @@ check_branch(uint64_t inst,
* there's no need for it. * there's no need for it.
*/ */
if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) { if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
DRM_ERROR("branch instruction at %d wrote a register.\n", DRM_DEBUG("branch instruction at %d wrote a register.\n",
validation_state->ip); validation_state->ip);
return false; return false;
} }
...@@ -614,7 +614,7 @@ check_instruction_reads(struct vc4_validated_shader_info *validated_shader, ...@@ -614,7 +614,7 @@ check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
validated_shader->uniforms_size += 4; validated_shader->uniforms_size += 4;
if (validation_state->needs_uniform_address_update) { if (validation_state->needs_uniform_address_update) {
DRM_ERROR("Uniform read with undefined uniform " DRM_DEBUG("Uniform read with undefined uniform "
"address\n"); "address\n");
return false; return false;
} }
...@@ -660,19 +660,19 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) ...@@ -660,19 +660,19 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
continue; continue;
if (ip - last_branch < 4) { if (ip - last_branch < 4) {
DRM_ERROR("Branch at %d during delay slots\n", ip); DRM_DEBUG("Branch at %d during delay slots\n", ip);
return false; return false;
} }
last_branch = ip; last_branch = ip;
if (inst & QPU_BRANCH_REG) { if (inst & QPU_BRANCH_REG) {
DRM_ERROR("branching from register relative " DRM_DEBUG("branching from register relative "
"not supported\n"); "not supported\n");
return false; return false;
} }
if (!(inst & QPU_BRANCH_REL)) { if (!(inst & QPU_BRANCH_REL)) {
DRM_ERROR("relative branching required\n"); DRM_DEBUG("relative branching required\n");
return false; return false;
} }
...@@ -682,13 +682,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) ...@@ -682,13 +682,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
* end of the shader object. * end of the shader object.
*/ */
if (branch_imm % sizeof(inst) != 0) { if (branch_imm % sizeof(inst) != 0) {
DRM_ERROR("branch target not aligned\n"); DRM_DEBUG("branch target not aligned\n");
return false; return false;
} }
branch_target_ip = after_delay_ip + (branch_imm >> 3); branch_target_ip = after_delay_ip + (branch_imm >> 3);
if (branch_target_ip >= validation_state->max_ip) { if (branch_target_ip >= validation_state->max_ip) {
DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n", DRM_DEBUG("Branch at %d outside of shader (ip %d/%d)\n",
ip, branch_target_ip, ip, branch_target_ip,
validation_state->max_ip); validation_state->max_ip);
return false; return false;
...@@ -699,7 +699,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) ...@@ -699,7 +699,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
* the shader. * the shader.
*/ */
if (after_delay_ip >= validation_state->max_ip) { if (after_delay_ip >= validation_state->max_ip) {
DRM_ERROR("Branch at %d continues past shader end " DRM_DEBUG("Branch at %d continues past shader end "
"(%d/%d)\n", "(%d/%d)\n",
ip, after_delay_ip, validation_state->max_ip); ip, after_delay_ip, validation_state->max_ip);
return false; return false;
...@@ -709,7 +709,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) ...@@ -709,7 +709,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
} }
if (max_branch_target > validation_state->max_ip - 3) { if (max_branch_target > validation_state->max_ip - 3) {
DRM_ERROR("Branch landed after QPU_SIG_PROG_END"); DRM_DEBUG("Branch landed after QPU_SIG_PROG_END");
return false; return false;
} }
...@@ -750,7 +750,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state) ...@@ -750,7 +750,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
return true; return true;
if (texturing_in_progress(validation_state)) { if (texturing_in_progress(validation_state)) {
DRM_ERROR("Branch target landed during TMU setup\n"); DRM_DEBUG("Branch target landed during TMU setup\n");
return false; return false;
} }
...@@ -837,7 +837,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) ...@@ -837,7 +837,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
case QPU_SIG_LAST_THREAD_SWITCH: case QPU_SIG_LAST_THREAD_SWITCH:
if (!check_instruction_writes(validated_shader, if (!check_instruction_writes(validated_shader,
&validation_state)) { &validation_state)) {
DRM_ERROR("Bad write at ip %d\n", ip); DRM_DEBUG("Bad write at ip %d\n", ip);
goto fail; goto fail;
} }
...@@ -855,7 +855,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) ...@@ -855,7 +855,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
validated_shader->is_threaded = true; validated_shader->is_threaded = true;
if (ip < last_thread_switch_ip + 3) { if (ip < last_thread_switch_ip + 3) {
DRM_ERROR("Thread switch too soon after " DRM_DEBUG("Thread switch too soon after "
"last switch at ip %d\n", ip); "last switch at ip %d\n", ip);
goto fail; goto fail;
} }
...@@ -867,7 +867,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) ...@@ -867,7 +867,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
case QPU_SIG_LOAD_IMM: case QPU_SIG_LOAD_IMM:
if (!check_instruction_writes(validated_shader, if (!check_instruction_writes(validated_shader,
&validation_state)) { &validation_state)) {
DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip); DRM_DEBUG("Bad LOAD_IMM write at ip %d\n", ip);
goto fail; goto fail;
} }
break; break;
...@@ -878,14 +878,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) ...@@ -878,14 +878,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
goto fail; goto fail;
if (ip < last_thread_switch_ip + 3) { if (ip < last_thread_switch_ip + 3) {
DRM_ERROR("Branch in thread switch at ip %d", DRM_DEBUG("Branch in thread switch at ip %d",
ip); ip);
goto fail; goto fail;
} }
break; break;
default: default:
DRM_ERROR("Unsupported QPU signal %d at " DRM_DEBUG("Unsupported QPU signal %d at "
"instruction %d\n", sig, ip); "instruction %d\n", sig, ip);
goto fail; goto fail;
} }
...@@ -898,7 +898,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) ...@@ -898,7 +898,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
} }
if (ip == validation_state.max_ip) { if (ip == validation_state.max_ip) {
DRM_ERROR("shader failed to terminate before " DRM_DEBUG("shader failed to terminate before "
"shader BO end at %zd\n", "shader BO end at %zd\n",
shader_obj->base.size); shader_obj->base.size);
goto fail; goto fail;
...@@ -907,7 +907,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) ...@@ -907,7 +907,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
/* Might corrupt other thread */ /* Might corrupt other thread */
if (validated_shader->is_threaded && if (validated_shader->is_threaded &&
validation_state.all_registers_used) { validation_state.all_registers_used) {
DRM_ERROR("Shader uses threading, but uses the upper " DRM_DEBUG("Shader uses threading, but uses the upper "
"half of the registers, too\n"); "half of the registers, too\n");
goto fail; goto fail;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment